]> gerrit.simantics Code Review - simantics/platform.git/commitdiff
Sharing org.simantics.acorn for everyone to use 28/28/1
authorjsimomaa <jani.simomaa@gmail.com>
Wed, 17 Aug 2016 05:59:14 +0000 (08:59 +0300)
committerjsimomaa <jani.simomaa@gmail.com>
Wed, 17 Aug 2016 05:59:14 +0000 (08:59 +0300)
Change-Id: I3e110dd01c31dd0e0885887cfa66e42c3849bc2e

54 files changed:
bundles/org.simantics.acorn/.classpath [new file with mode: 0644]
bundles/org.simantics.acorn/.project [new file with mode: 0644]
bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs [new file with mode: 0644]
bundles/org.simantics.acorn/.svn/wc.db [new file with mode: 0644]
bundles/org.simantics.acorn/META-INF/MANIFEST.MF [new file with mode: 0644]
bundles/org.simantics.acorn/OSGI-INF/component.xml [new file with mode: 0644]
bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml [new file with mode: 0644]
bundles/org.simantics.acorn/build.properties [new file with mode: 0644]
bundles/org.simantics.acorn/log4j.properties [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/InvalidHeadStateException.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterChangeSet.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterLRU.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateOperation.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateState.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/FileInfo.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java [new file with mode: 0644]

diff --git a/bundles/org.simantics.acorn/.classpath b/bundles/org.simantics.acorn/.classpath
new file mode 100644 (file)
index 0000000..22f3064
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+       <classpathentry kind="src" path="src"/>
+       <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>
+       <classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+       <classpathentry kind="output" path="bin"/>
+</classpath>
diff --git a/bundles/org.simantics.acorn/.project b/bundles/org.simantics.acorn/.project
new file mode 100644 (file)
index 0000000..9726c0b
--- /dev/null
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+       <name>org.simantics.acorn</name>
+       <comment></comment>
+       <projects>
+       </projects>
+       <buildSpec>
+               <buildCommand>
+                       <name>org.eclipse.jdt.core.javabuilder</name>
+                       <arguments>
+                       </arguments>
+               </buildCommand>
+               <buildCommand>
+                       <name>org.eclipse.pde.ManifestBuilder</name>
+                       <arguments>
+                       </arguments>
+               </buildCommand>
+               <buildCommand>
+                       <name>org.eclipse.pde.SchemaBuilder</name>
+                       <arguments>
+                       </arguments>
+               </buildCommand>
+               <buildCommand>
+                       <name>org.eclipse.pde.ds.core.builder</name>
+                       <arguments>
+                       </arguments>
+               </buildCommand>
+       </buildSpec>
+       <natures>
+               <nature>org.eclipse.pde.PluginNature</nature>
+               <nature>org.eclipse.jdt.core.javanature</nature>
+       </natures>
+</projectDescription>
diff --git a/bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs b/bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs
new file mode 100644 (file)
index 0000000..0c68a61
--- /dev/null
@@ -0,0 +1,7 @@
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
+org.eclipse.jdt.core.compiler.compliance=1.8
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.source=1.8
diff --git a/bundles/org.simantics.acorn/.svn/wc.db b/bundles/org.simantics.acorn/.svn/wc.db
new file mode 100644 (file)
index 0000000..9defa90
Binary files /dev/null and b/bundles/org.simantics.acorn/.svn/wc.db differ
diff --git a/bundles/org.simantics.acorn/META-INF/MANIFEST.MF b/bundles/org.simantics.acorn/META-INF/MANIFEST.MF
new file mode 100644 (file)
index 0000000..24f576b
--- /dev/null
@@ -0,0 +1,22 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: Acorn Database for Simantics
+Bundle-SymbolicName: org.simantics.acorn
+Bundle-Version: 1.1.2.qualifier
+Bundle-Vendor: Semantum Oy
+Require-Bundle: org.apache.log4j;visibility:=reexport,
+ org.simantics.db;bundle-version="0.8.0";visibility:=reexport,
+ org.simantics.db.common;bundle-version="0.8.0";visibility:=reexport,
+ gnu.trove3;bundle-version="3.0.0",
+ gnu.trove2;bundle-version="2.0.4",
+ org.simantics.db.impl;bundle-version="0.8.0",
+ org.simantics.db.server;bundle-version="1.0.0",
+ org.simantics.compressions;bundle-version="1.0.0",
+ org.simantics.backup,
+ org.eclipse.core.runtime;bundle-version="3.11.1",
+ org.simantics.fastlz;bundle-version="1.2.1",
+ org.simantics.db.procore
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Bundle-ActivationPolicy: lazy
+Bundle-Activator: org.simantics.acorn.internal.Activator
+Service-Component: OSGI-INF/component.xml, OSGI-INF/org.simantics.acorn.AcornDriver.xml
diff --git a/bundles/org.simantics.acorn/OSGI-INF/component.xml b/bundles/org.simantics.acorn/OSGI-INF/component.xml
new file mode 100644 (file)
index 0000000..75dd81f
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="org.simantics.acorn.backupProvider">
+    <implementation class="org.siorg.simantics.acorn.backup.AcornBackupProvider"/>
+    <service>
+        <provide interface="org.simantics.backup.IBackupProvider"/>
+    </service>
+</scr:component>
diff --git a/bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml b/bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml
new file mode 100644 (file)
index 0000000..f1a97d1
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="AcornDriver">
+   <implementation class="org.simantics.acorn.AcornDriver"/>
+   <service>
+      <provide interface="org.simantics.db.Driver"/>
+   </service>
+</scr:component>
diff --git a/bundles/org.simantics.acorn/build.properties b/bundles/org.simantics.acorn/build.properties
new file mode 100644 (file)
index 0000000..40374cc
--- /dev/null
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2007, 2010 Association for Decentralized Information Management
+# in Industry THTH ry.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Eclipse Public License v1.0
+# which accompanies this distribution, and is available at
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Contributors:
+#     VTT Technical Research Centre of Finland - initial API and implementation
+###############################################################################
+output.. = bin/
+bin.includes = META-INF/,\
+               .,\
+               log4j.properties,\
+               OSGI-INF/
+source.. = src/
diff --git a/bundles/org.simantics.acorn/log4j.properties b/bundles/org.simantics.acorn/log4j.properties
new file mode 100644 (file)
index 0000000..6fecb6d
--- /dev/null
@@ -0,0 +1,63 @@
+###############################################################################
+# Copyright (c) 2007, 2010 Association for Decentralized Information Management
+# in Industry THTH ry.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Eclipse Public License v1.0
+# which accompanies this distribution, and is available at
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Contributors:
+#     VTT Technical Research Centre of Finland - initial API and implementation
+###############################################################################
+# For the general syntax of property based configuration files see the
+# documentation of org.apache.log4j.PropertyConfigurator.
+
+# The root category uses the appender called rolling. If no priority is
+# specified, the root category assumes the default priority for root
+# which is DEBUG in log4j. The root category is the only category that
+# has a default priority. All other categories need not be assigned a
+# priority in which case they inherit their priority from the
+# hierarchy.
+
+# This will provide console output on log4j configuration loading
+#log4j.debug=true
+
+log4j.rootCategory=warn, stdout
+#log4j.rootCategory=warn
+
+# BEGIN APPENDER: CONSOLE APPENDER (stdout)
+#  first:  type of appender (fully qualified class name)
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+
+#  second: Any configuration information needed for that appender.
+#    Many appenders require a layout.
+log4j.appender.stdout.layout=org.apache.log4j.TTCCLayout
+# log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout
+
+# Possible information overload?
+# log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+#  additionally, some layouts can take additional information --
+#    like the ConversionPattern for the PatternLayout.
+# log4j.appender.stdout.layout.ConversionPattern=%d %-5p %-17c{2} (%30F:%L) %3x - %m%n
+# END APPENDER: CONSOLE APPENDER (stdout)
+
+# BEGIN APPENDER: ROLLING FILE APPENDER (rolling)
+#log4j.appender.rolling=com.tools.logging.PluginFileAppender
+#log4j.appender.rolling=org.apache.log4j.FileAppender
+log4j.appender.rolling=org.apache.log4j.RollingFileAppender
+log4j.appender.rolling.File=procore.log
+log4j.appender.rolling.append=true
+log4j.appender.rolling.MaxFileSize=8192KB
+# Keep one backup file
+log4j.appender.rolling.MaxBackupIndex=1
+log4j.appender.rolling.layout=org.apache.log4j.PatternLayout
+#log4j.appender.rolling.layout.ConversionPattern=%p %t %c - %m%n
+log4j.appender.rolling.layout.ConversionPattern=%-6r [%15.15t] %-5p %30.30c - %m%n
+# END APPENDER: ROLLING FILE APPENDER (rolling)
+
+# BEGIN APPENDER: PLUG-IN LOG APPENDER (plugin)
+log4j.appender.plugin=com.tools.logging.PluginLogAppender
+log4j.appender.plugin.layout=org.apache.log4j.PatternLayout
+#log4j.appender.plugin.layout.ConversionPattern=%p %t %c - %m%n
+log4j.appender.plugin.layout.ConversionPattern=%-6r [%15.15t] %-5p %30.30c - %m%n
+# END APPENDER: PLUG-IN LOG APPENDER (plugin)
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java
new file mode 100644 (file)
index 0000000..db2c167
--- /dev/null
@@ -0,0 +1,40 @@
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.simantics.acorn.internal.AcornDatabase;
+import org.simantics.db.Database;
+import org.simantics.db.server.ProCoreException;
+
+/**
+ * @author Tuukka Lehtonen
+ */
+public class AcornDatabaseManager {
+
+    private static Map<String, Database> dbs = new HashMap<String, Database>();
+
+    public static synchronized Database getDatabase(Path folder) throws ProCoreException {
+        Path canonical;
+        try {
+            if (!Files.exists(folder))
+                Files.createDirectories(folder);
+            canonical = folder.toRealPath();
+        } catch (IOException e) {
+            throw new ProCoreException("Could not get canonical path.", e);
+        }
+
+        String canonicalPath = canonical.toString();
+        Database db = dbs.get(canonicalPath);
+        if (null != db)
+            return db;
+
+        db = new AcornDatabase(canonical);
+        dbs.put(canonicalPath, db);
+        return db;
+    }
+
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java
new file mode 100644 (file)
index 0000000..0e6d52b
--- /dev/null
@@ -0,0 +1,108 @@
+package org.simantics.acorn;
+
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Properties;
+
+import org.simantics.db.DatabaseUserAgent;
+import org.simantics.db.Driver;
+import org.simantics.db.ServerI;
+import org.simantics.db.ServerReference;
+import org.simantics.db.Session;
+import org.simantics.db.SessionReference;
+import org.simantics.db.exception.DatabaseException;
+
+public class AcornDriver implements Driver {
+
+    public static final String AcornDriverName = "acorn";
+
+    @Override
+    public String getName() {
+        return AcornDriverName;
+    }
+
+    @Override
+    public DatabaseUserAgent getDatabaseUserAgent(String address) throws DatabaseException {
+        Path dbFolder = Paths.get(address);
+        return AcornDatabaseManager.getDatabase(dbFolder).getUserAgent();
+    }
+
+    @Override
+    public void setDatabaseUserAgent(String address, DatabaseUserAgent dbUserAgent) throws DatabaseException {
+        Path dbFolder = Paths.get(address);
+        AcornDatabaseManager.getDatabase(dbFolder).setUserAgent(dbUserAgent);
+    }
+
+    @Override
+    public Session getSession(String address, Properties properties) throws DatabaseException {
+        Path dbFolder = Paths.get(address);
+        Session session = AcornSessionManagerImpl.getInstance().createSession(new SessionReference() {
+            
+            @Override
+            public ServerReference getServerReference() {
+                return new ServerReference() {
+                    
+                    @Override
+                    public Path getDBFolder() {
+                        return dbFolder;
+                    }
+                };
+            }
+
+            @Override
+            public long getSessionId() {
+                return 0L;
+            }
+        }, null);
+        if (!properties.containsKey("clientId"))
+            properties.put("clientId", dbFolder.toFile().getAbsolutePath());
+        session.registerService(Properties.class, properties);
+        Session s = session.peekService(Session.class);
+        if (null == s)
+            session.registerService(Session.class, session);
+        return session;
+    }
+
+    @Override
+    public ServerI getServer(String address, Properties properties) throws DatabaseException {
+        return new ServerI() {
+            
+            @Override
+            public void stop() throws DatabaseException {
+                // nop
+            }
+            
+            @Override
+            public void start() throws DatabaseException {
+                // nop
+            }
+            
+            @Override
+            public boolean isActive() throws DatabaseException {
+                return true;
+            }
+            
+            @Override
+            public String getAddress() throws DatabaseException {
+                return address;
+            }
+            
+            @Override
+            public String executeAndDisconnect(String command) throws DatabaseException {
+                return "";
+            }
+            
+            @Override
+            public String execute(String command) throws DatabaseException {
+                return "";
+            }
+        };
+    }
+
+    @Override
+    public Management getManagement(String address, Properties properties) throws DatabaseException {
+        Path dbFolder = Paths.get(address);
+        return new AcornManagement(dbFolder, properties);
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java
new file mode 100644 (file)
index 0000000..561a039
--- /dev/null
@@ -0,0 +1,51 @@
+package org.simantics.acorn;
+
+import java.nio.file.Path;
+import java.util.Properties;
+
+import org.simantics.db.Database;
+import org.simantics.db.Driver.Management;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.server.ProCoreException;
+
+public class AcornManagement implements Management {
+
+    private final Database db;
+    private final Properties properties;
+
+    AcornManagement(Path dbFolder, Properties properties) throws ProCoreException {
+        db = AcornDatabaseManager.getDatabase(dbFolder);
+        this.properties = properties;
+    }
+
+    @Override
+    public boolean exist() throws DatabaseException {
+        return db.isFolderOk();
+    }
+
+    @Override
+    public void delete() throws DatabaseException {
+        db.deleteFiles();
+        if (exist())
+            throw new DatabaseException("Failed to delete database. folder=" + db.getFolder());
+    }
+
+    @Override
+    public void create() throws DatabaseException {
+        db.initFolder(properties);
+        if (!exist())
+            throw new DatabaseException("Failed to create ProCore database. folder=" + db.getFolder());
+    }
+
+    @Override
+    public void purge() throws DatabaseException {
+        db.purgeDatabase();
+    }
+
+    @Override
+    public void shutdown() throws DatabaseException {
+        db.tryToStop();
+        db.disconnect();
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java
new file mode 100644 (file)
index 0000000..1a1e160
--- /dev/null
@@ -0,0 +1,125 @@
+package org.simantics.acorn;
+
+import java.nio.file.Path;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.simantics.db.Database;
+import org.simantics.db.Session;
+import org.simantics.db.SessionErrorHandler;
+import org.simantics.db.SessionManager;
+import org.simantics.db.SessionReference;
+import org.simantics.db.authentication.UserAuthenticationAgent;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.event.SessionEvent;
+import org.simantics.db.event.SessionListener;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.LifecycleSupport;
+import org.simantics.utils.datastructures.ListenerList;
+
+import fi.vtt.simantics.procore.internal.SessionImplDb;
+import fi.vtt.simantics.procore.internal.SessionImplSocket;
+
+public class AcornSessionManagerImpl implements SessionManager {
+
+    private static AcornSessionManagerImpl INSTANCE;
+    
+    private ConcurrentHashMap<SessionImplSocket, SessionImplSocket> sessionMap = new ConcurrentHashMap<SessionImplSocket, SessionImplSocket>();
+    private ListenerList<SessionListener> sessionListeners = new ListenerList<SessionListener>(SessionListener.class);
+    private SessionErrorHandler errorHandler;
+
+       private Database database;
+
+    private AcornSessionManagerImpl() {}
+    
+    void finish() {
+        sessionMap = null;
+        sessionListeners = null;
+    }
+
+    @Override
+    public void addSessionListener(SessionListener listener) {
+        sessionListeners.add(listener);
+    }
+
+    @Override
+    public Session createSession(SessionReference sessionReference, UserAuthenticationAgent authAgent)
+    throws DatabaseException {
+        SessionImplDb sessionImpl = new SessionImplDb(this, authAgent);
+        boolean ok = false;
+        try {
+            Path dbFolder = sessionReference.getServerReference().getDBFolder();
+            database = AcornDatabaseManager.getDatabase(dbFolder);
+            Database.Session dbSession = database.newSession(sessionImpl);
+            sessionImpl.connect(sessionReference, dbSession);
+            sessionMap.put(sessionImpl, sessionImpl);
+            fireSessionOpened(sessionImpl);
+            ok = true;
+        } catch (Throwable e) {
+            Logger.defaultLogError("Connection failed. See exception for details.", e);
+            try {
+                fireSessionClosed(sessionImpl, e);
+                sessionMap.remove(sessionImpl);
+                sessionImpl = null;
+            } catch (Throwable t) {
+            }
+            throw new DatabaseException(e);
+        } finally {
+            if (!ok && null != sessionImpl)
+                sessionImpl.getService(LifecycleSupport.class).close();
+        }
+        return sessionImpl;
+    }
+
+    @Override
+    public void removeSessionListener(SessionListener listener) {
+        sessionListeners.remove(listener);
+    }
+
+    private void fireSessionOpened(SessionImplSocket session) {
+        SessionEvent se = new SessionEvent(session, null);
+        for (SessionListener listener : sessionListeners.getListeners()) {
+               listener.sessionOpened(se);
+        }
+    }
+
+    private void fireSessionClosed(SessionImplSocket session, Throwable cause) {
+        SessionEvent se = new SessionEvent(session, cause);
+        for (SessionListener listener : sessionListeners.getListeners()) {
+               listener.sessionClosed(se);
+        }
+    }
+
+    @Override
+    public void shutdown(Session s, Throwable cause) {
+        SessionImplSocket sis = sessionMap.get(s);
+        if (null == sis)
+            return;
+        try {
+            fireSessionClosed(sis, cause);
+        } finally {
+            sessionMap.remove(s);
+        }
+    }
+
+    @Override
+    public SessionErrorHandler getErrorHandler() {
+        return errorHandler;
+    }
+
+    @Override
+    public void setErrorHandler(SessionErrorHandler errorHandler) {
+        this.errorHandler = errorHandler;
+    }
+
+    public synchronized static AcornSessionManagerImpl getInstance() {
+        if (INSTANCE == null)
+            INSTANCE = new AcornSessionManagerImpl();
+        return INSTANCE;
+    }
+
+    @Override
+    public Database getDatabase() {
+        return database;
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java
new file mode 100644 (file)
index 0000000..5b8e5ab
--- /dev/null
@@ -0,0 +1,584 @@
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.acorn.lru.ChangeSetInfo;
+import org.simantics.acorn.lru.ClusterInfo;
+import org.simantics.acorn.lru.ClusterLRU;
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.FileInfo;
+import org.simantics.acorn.lru.LRU;
+import org.simantics.db.ClusterCreator;
+import org.simantics.db.ServiceLocator;
+import org.simantics.db.Database.Session.ClusterIds;
+import org.simantics.db.Database.Session.ResourceSegment;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.db.service.ClusterSetsSupport;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.threads.logger.ITask;
+import org.simantics.utils.threads.logger.ThreadLogger;
+
+public class ClusterManager {
+
+       private ArrayList<String> currentChanges = new ArrayList<String>();
+
+       public final Path dbFolder;
+       public Path lastSessionDirectory;
+       public Path workingDirectory;
+
+       public LRU<String, ClusterStreamChunk> streamLRU;
+       public LRU<Long, ChangeSetInfo> csLRU;
+       public ClusterLRU clusterLRU;
+       public LRU<String, FileInfo> fileLRU;
+
+       public MainState mainState;
+       public HeadState state;
+
+       private long lastSnapshot = System.nanoTime();
+
+       final public ClusterSupport2 support = new ClusterSupport2(this);
+
+       /*
+        * Public interface
+        * 
+        */
+
+       public ClusterManager(Path dbFolder) {
+               this.dbFolder = dbFolder;
+       }
+
+       public ArrayList<String> getChanges(long changeSetId) {
+               ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId);
+               info.acquireMutex();
+               try {
+                       info.makeResident();
+                       return info.getCSSIds();
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+       }
+
+       public ClusterBase getClusterByClusterKey(int clusterKey) throws DatabaseException {
+               return clusterLRU.getClusterByClusterKey(clusterKey);
+       }
+       
+       public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) throws DatabaseException {
+               return clusterLRU.getClusterByClusterUIDOrMake(clusterUID);
+       }
+
+       public ClusterImpl getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) throws DatabaseException {
+               return clusterLRU.getClusterByClusterUIDOrMakeProxy(clusterUID);
+       }
+
+       public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+               return clusterLRU.getClusterKeyByClusterUIDOrMake(clusterUID);
+       }
+
+       public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) {
+               return clusterLRU.getClusterKeyByClusterUIDOrMakeWithoutMutex(clusterUID);
+       }
+
+       public int getClusterKeyByUID(long id1, long id2) throws DatabaseException {
+               return clusterLRU.getClusterKeyByUIDWithoutMutex(id1, id2);
+       }
+       
+       public <T extends ClusterI> T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException {
+               return clusterLRU.getClusterProxyByResourceKey(resourceKey);
+       }
+
+       public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException {
+               return clusterLRU.getClusterUIDByResourceKey(resourceKey);
+       }
+
+       public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws DatabaseException {
+               return clusterLRU.getClusterUIDByResourceKeyWithoutMutex(resourceKey);
+       }
+
+       /*
+        * Private implementation
+        * 
+        */
+
+       private static long countFiles(Path directory) throws IOException {
+               try (DirectoryStream<Path> ds = Files.newDirectoryStream(directory)) {
+                       int count = 0;
+                       for (@SuppressWarnings("unused") Path p : ds)
+                               ++count;
+                       return count;
+               }
+       }
+
+       public synchronized boolean makeSnapshot(ServiceLocator locator, boolean force) throws IOException {
+
+               // Maximum autosave frequency is per 60s
+               if(!force && System.nanoTime() - lastSnapshot < 10*1000000000L) {
+//                 System.err.println("lastSnapshot too early");
+                   return false;
+               }
+
+               // Cluster files are always there 
+               // Nothing has been written => no need to do anything
+               long amountOfFiles = countFiles(workingDirectory);
+               if(!force && amountOfFiles < 3) {
+//                 System.err.println("amountOfFiles < 3");
+                   return false;
+               }
+
+               System.err.println("makeSnapshot");
+
+               // Schedule writing of all data to disk
+               refreshHeadState();
+
+               // Wait for all files to be written
+               clusterLRU.shutdown();
+               fileLRU.shutdown();
+               streamLRU.shutdown();
+               csLRU.shutdown();
+               
+               persistHeadState();
+               
+               mainState.save(dbFolder);
+
+               ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class); 
+               cssi.save();
+
+               amountOfFiles = countFiles(workingDirectory);
+               
+               System.err.println(" -finished: amount of files is " + amountOfFiles);
+
+               workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
+               if (!Files.exists(workingDirectory)) {
+                   Files.createDirectories(workingDirectory);
+               }
+
+               cssi.updateReadAndWriteDirectories(lastSessionDirectory, workingDirectory);
+
+               clusterLRU.setWriteDir(workingDirectory);
+               fileLRU.setWriteDir(workingDirectory);
+               streamLRU.setWriteDir(workingDirectory);
+               csLRU.setWriteDir(workingDirectory);
+
+               clusterLRU.resume();
+               fileLRU.resume();
+               streamLRU.resume();
+               csLRU.resume();
+
+               lastSnapshot = System.nanoTime();
+               
+               return true;
+               
+       }
+       
+       public void refreshHeadState() throws IOException {
+
+               state.clusters.clear();
+               state.files.clear();
+               state.stream.clear();
+               state.cs.clear();
+
+               clusterLRU.persist(state.clusters);
+               fileLRU.persist(state.files);
+               streamLRU.persist(state.stream);
+               csLRU.persist(state.cs);
+
+       }
+       
+       public void persistHeadState() throws IOException {
+
+               // Sync current working directory
+               Files.walk(workingDirectory, 1).filter(Files::isRegularFile).forEach(FileIO::uncheckedSyncPath);
+               state.save(workingDirectory);
+               mainState.headDir++;
+       }
+
+       
+//     public void save() throws IOException {
+//
+//             refreshHeadState();
+//             
+//             clusterLRU.shutdown();
+//             fileLRU.shutdown();
+//             streamLRU.shutdown();
+//             csLRU.shutdown();
+//
+//             persistHeadState();
+//
+//             mainState.save(getBaseDirectory());
+
+//             try {
+//                     ThreadLogVisualizer visualizer = new ThreadLogVisualizer();
+//                     visualizer.read(new DataInputStream(new FileInputStream(
+//                                     ThreadLogger.LOG_FILE)));
+//                     visualizer.visualize3(new PrintStream(ThreadLogger.LOG_FILE
+//                                     + ".svg"));
+//             } catch (FileNotFoundException e) {
+//                     // TODO Auto-generated catch block
+//                     e.printStackTrace();
+//             }
+
+               // System.err.println("-- load statistics --");
+               // for(Pair<ClusterUID, Integer> entry :
+               // CollectionUtils.valueSortedEntries(histogram)) {
+               // System.err.println(" " + entry.second + " " + entry.first);
+               // }
+
+//     }
+       
+       private void acquireAll() {
+               clusterLRU.acquireMutex();
+               fileLRU.acquireMutex();
+               streamLRU.acquireMutex();
+               csLRU.acquireMutex();
+       }
+       
+       private void releaseAll() {
+               csLRU.releaseMutex();
+               streamLRU.releaseMutex();
+               fileLRU.releaseMutex();
+               clusterLRU.releaseMutex();
+       }
+
+       public void load() throws IOException {
+
+               // Main state
+               mainState = MainState.load(dbFolder);
+
+               lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1));
+               
+               // Head State
+               try {
+            state = HeadState.load(lastSessionDirectory);
+        } catch (InvalidHeadStateException e) {
+            // For backwards compatibility only!
+            Throwable cause = e.getCause();
+            if (cause instanceof Throwable) {
+                try {
+                    org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory);
+                    
+                    HeadState newState = new HeadState();
+                    newState.clusters = oldState.clusters;
+                    newState.cs = oldState.cs;
+                    newState.files = oldState.files;
+                    newState.stream = oldState.stream;
+                    newState.headChangeSetId = oldState.headChangeSetId;
+                    newState.reservedIds = oldState.reservedIds;
+                    newState.transactionId = oldState.transactionId;
+                    state = newState;
+                } catch (InvalidHeadStateException e1) {
+                    throw new IOException("Could not load HeadState due to corruption", e1);
+                }
+            } else {
+                // This should never happen as MainState.load() checks the integrity
+                // of head.state files and rolls back in cases of corruption until a
+                // consistent state is found (could be case 0 - initial db state)
+                // IF this does happen something is completely wrong
+                throw new IOException("Could not load HeadState due to corruption", e);
+            }
+        }
+
+               workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
+               Files.createDirectories(workingDirectory);
+
+               csLRU = new LRU<Long, ChangeSetInfo>("Change Set", workingDirectory);
+               streamLRU = new LRU<String, ClusterStreamChunk>("Cluster Stream", workingDirectory);
+               clusterLRU = new ClusterLRU(this, "Cluster", workingDirectory);
+               fileLRU = new LRU<String, FileInfo>("External Value", workingDirectory);
+
+               acquireAll();
+               
+               // Clusters
+               for (String clusterKey : state.clusters) {
+                       String[] parts1 = clusterKey.split("#");
+                       String[] parts = parts1[0].split("\\.");
+                       long first = new BigInteger(parts[0], 16).longValue();
+                       long second = new BigInteger(parts[1], 16).longValue();
+                       ClusterUID uuid = ClusterUID.make(first, second);
+                       Path readDir = dbFolder.resolve(parts1[1]);
+                       int offset = Integer.parseInt(parts1[2]);
+                       int length = Integer.parseInt(parts1[3]);
+                       clusterLRU.map(new ClusterInfo(this, clusterLRU, readDir, uuid, offset, length));
+               }
+               // Files
+               for (String fileKey : state.files) {
+//                     System.err.println("loadFile: " + fileKey);
+                       String[] parts = fileKey.split("#");
+                       Path readDir = dbFolder.resolve(parts[1]);
+                       int offset = Integer.parseInt(parts[2]);
+                       int length = Integer.parseInt(parts[3]);
+                       FileInfo info = new FileInfo(fileLRU, readDir, parts[0], offset, length);
+                       fileLRU.map(info);
+               }
+               // Update chunks
+               for (String fileKey : state.stream) {
+//                     System.err.println("loadStream: " + fileKey);
+                       String[] parts = fileKey.split("#");
+                       Path readDir = dbFolder.resolve(parts[1]);
+                       int offset = Integer.parseInt(parts[2]);
+                       int length = Integer.parseInt(parts[3]);
+                       ClusterStreamChunk info = new ClusterStreamChunk(this,
+                                       streamLRU, readDir, parts[0], offset, length);
+                       streamLRU.map(info);
+               }
+               // Change sets
+               for (String fileKey : state.cs) {
+                       String[] parts = fileKey.split("#");
+                       Path readDir = dbFolder.resolve(parts[1]);
+                       Long revisionId = Long.parseLong(parts[0]);
+                       int offset = Integer.parseInt(parts[2]);
+                       int length = Integer.parseInt(parts[3]);
+                       ChangeSetInfo info = new ChangeSetInfo(csLRU, readDir, revisionId, offset, length);
+                       csLRU.map(info);
+               }
+               
+               releaseAll();
+
+       }
+
+       public <T> T clone(ClusterUID uid, ClusterCreator creator)
+                       throws DatabaseException {
+               
+               clusterLRU.ensureUpdates(uid);
+               
+               ClusterInfo info = clusterLRU.getWithoutMutex(uid);
+               return info.clone(uid, creator);
+
+       }
+
+       //private int loadCounter = 0;
+
+       public static void startLog(String msg) {
+               tasks.put(msg, ThreadLogger.getInstance().begin(msg));
+       }
+
+       public static void endLog(String msg) {
+               ITask task = tasks.get(msg);
+               if (task != null)
+                       task.finish();
+       }
+
+       static Map<String, ITask> tasks = new HashMap<String, ITask>();
+
+       public void update(ClusterUID uid, ClusterImpl clu) {
+
+               ClusterInfo info = clusterLRU.getWithoutMutex(uid);
+               info.acquireMutex();
+               try {
+                       info.update(clu);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+               
+       }
+
+       public long getClusterIdOrCreate(ClusterUID clusterUID) {
+               return 1;
+       }
+
+       public int getResourceKey(ClusterUID uid, int index) {
+               return clusterLRU.getResourceKey(uid, index);
+       }
+
+       public int getResourceKeyWitoutMutex(ClusterUID uid, int index) {
+               return clusterLRU.getResourceKeyWithoutMutex(uid, index);
+       }
+
+       public ClusterIds getClusterIds() throws ProCoreException {
+
+               clusterLRU.acquireMutex();
+
+               try {
+
+                       Collection<ClusterInfo> infos = clusterLRU.values();
+                       final int status = infos.size();
+                       final long[] firsts = new long[status];
+                       final long[] seconds = new long[status];
+
+                       int index = 0;
+                       for (ClusterInfo info : infos) {
+                               firsts[index] = 0;
+                               seconds[index] = info.getKey().second;
+                               index++;
+                       }
+
+                       return new ClusterIds() {
+
+                               @Override
+                               public int getStatus() {
+                                       return status;
+                               }
+
+                               @Override
+                               public long[] getFirst() {
+                                       return firsts;
+                               }
+
+                               @Override
+                               public long[] getSecond() {
+                                       return seconds;
+                               }
+
+                       };
+
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       clusterLRU.releaseMutex();
+               }
+
+       }
+
+       public void addIntoCurrentChangeSet(String ccs) {
+               
+               csLRU.acquireMutex();
+
+               try {
+                       
+                       currentChanges.add(ccs);
+                       
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       
+                       csLRU.releaseMutex();
+                       
+               }
+
+       }
+
+       public void commitChangeSet(long changeSetId, byte[] data) {
+               csLRU.acquireMutex();
+               try {
+                       ArrayList<String> csids = new ArrayList<String>(currentChanges);
+                       currentChanges = new ArrayList<String>();
+                       new ChangeSetInfo(csLRU, changeSetId, data, csids);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       csLRU.releaseMutex();
+               }
+       }
+
+       public byte[] getMetadata(long changeSetId) {
+               
+               ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId);
+               if (info == null) return null;
+               info.acquireMutex();
+               try {
+                       return info.getMetadataBytes();
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+               
+       }
+
+       public byte[] getResourceFile(final byte[] clusterUID,
+                       final int resourceIndex) throws ProCoreException {
+
+               ClusterUID uid = ClusterUID.make(clusterUID, 0);
+               String key = uid.toString() + "_" + resourceIndex;
+               FileInfo info = fileLRU.getWithoutMutex(key);
+               if(info == null) return null;
+               info.acquireMutex();
+               try {
+                       return info.getResourceFile();
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+
+       }
+
+       public ResourceSegment getResourceSegment(final byte[] clusterUID,
+                       final int resourceIndex, final long segmentOffset, short segmentSize)
+                       throws ProCoreException {
+
+               ClusterUID uid = ClusterUID.make(clusterUID, 0);
+
+               String key = uid.toString() + "_" + resourceIndex;
+               FileInfo info = fileLRU.getWithoutMutex(key);
+               if(info == null) return null;
+               info.acquireMutex();
+               try {
+                       return info.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+
+       }
+
+       public void modiFileEx(ClusterUID uid, int resourceKey, long offset,
+                       long size, byte[] bytes, long pos, ClusterSupport support) {
+
+               try {
+
+                       String key = uid.toString()
+                                       + "_"
+                                       + ClusterTraits
+                                                       .getResourceIndexFromResourceKey(resourceKey);
+
+                       FileInfo info = null;
+
+                       fileLRU.acquireMutex();
+
+                       try {
+
+                               info = fileLRU.get(key);
+                               if (info == null)
+                                       info = new FileInfo(fileLRU, key, (int) (offset + size));
+                               
+                               
+                       } catch (Throwable t) {
+                               throw new IllegalStateException(t);
+                       } finally {
+                               
+                               fileLRU.releaseMutex();
+                               
+                       }
+                       
+                       info.acquireMutex();
+                       try {
+                               info.updateData(bytes, offset, pos, size);
+                       } catch (Throwable t) {
+                               throw new IllegalStateException(t);
+                       } finally {
+                               info.releaseMutex();
+                       }
+
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+
+       }
+
+    public void shutdown() {
+        clusterLRU.shutdown();
+        fileLRU.shutdown();
+        streamLRU.shutdown();
+        csLRU.shutdown();
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java
new file mode 100644 (file)
index 0000000..8d0bac2
--- /dev/null
@@ -0,0 +1,43 @@
+package org.simantics.acorn;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+
+public class ExternalizableExample implements Externalizable {
+
+    public int first;
+    private long second;
+    
+    public ExternalizableExample(int first, long second) {
+        this.first = first;
+        this.second = second;
+    }
+    
+    @Override
+    public void writeExternal(ObjectOutput out) throws IOException {
+        out.writeInt(first);
+        out.writeLong(second);
+    }
+
+    @Override
+    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        
+    }
+
+    
+    public static void main(String[] args) {
+        Externalizable test = new ExternalizableExample(123, 3456);
+        
+        try (ObjectOutputStream stream = new ObjectOutputStream(Files.newOutputStream(Paths.get("C:/Users/Jani Simomaa/Desktop/test"), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING))) {
+            stream.writeObject(test);
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java
new file mode 100644 (file)
index 0000000..aa71732
--- /dev/null
@@ -0,0 +1,142 @@
+package org.simantics.acorn;
+
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.SeekableByteChannel;
+import java.nio.file.Files;
+import java.nio.file.OpenOption;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.nio.file.attribute.FileAttribute;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.simantics.databoard.file.RuntimeIOException;
+
+public class FileIO {
+       
+    private static final FileAttribute<?>[] NO_ATTRIBUTES = new FileAttribute[0];
+    
+    private static final Set<OpenOption> CREATE_OPTIONS = new HashSet<>(2);
+    private static final Set<OpenOption> APPEND_OPTIONS = new HashSet<>(1);
+    
+    static {
+        CREATE_OPTIONS.add(StandardOpenOption.WRITE);
+        CREATE_OPTIONS.add(StandardOpenOption.CREATE);
+        
+        APPEND_OPTIONS.add(StandardOpenOption.APPEND);
+    }
+    
+       private Path path;
+       private int writePosition = 0;
+
+       private FileIO(Path path) {
+               this.path = path;
+       }
+       
+       private static Map<Path, FileIO> map = new HashMap<Path, FileIO>();
+       
+       public static FileIO get(Path path) {
+               synchronized(map) {
+                       FileIO existing = map.get(path);
+                       if(existing == null) {
+                               existing = new FileIO(path);
+                               map.put(path, existing);
+                       }
+                       return existing;
+               }
+       }
+       
+       //private static final boolean TRACE_SWAP = false;
+       private static final boolean TRACE_PERF = false;
+
+       public synchronized int saveBytes(byte[] bytes, int length, boolean overwrite) throws IOException {
+               if(overwrite) writePosition = 0;
+               int result = writePosition;
+               long start = System.nanoTime();
+               Set<OpenOption> options = writePosition == 0 ? CREATE_OPTIONS : APPEND_OPTIONS;
+               
+               ByteBuffer bb = ByteBuffer.wrap(bytes, 0, length);
+               try (FileChannel fc = FileChannel.open(path, options, NO_ATTRIBUTES)) {
+            fc.write(bb);
+               }
+               
+        writePosition += length;
+               if(TRACE_PERF) {
+                       long duration = System.nanoTime()-start;
+                       double ds = 1e-9*duration;
+                       System.err.println("Wrote " + bytes.length + " bytes @ " + 1e-6*bytes.length / ds + "MB/s");
+               }
+               return result;
+       }
+
+    public synchronized byte[] readBytes(int offset, int length) throws IOException {
+        long start = System.nanoTime();
+        try (SeekableByteChannel channel = Files.newByteChannel(path)) {
+            channel.position(offset);
+            ByteBuffer buf = ByteBuffer.allocate(length);
+            int read = 0;
+            while (read < length) {
+                read += channel.read(buf);
+            }
+            byte[] result = buf.array();
+            if (result.length != length)
+                System.err.println("faa");
+            if (TRACE_PERF) {
+                long duration = System.nanoTime() - start;
+                double ds = 1e-9 * duration;
+                System.err.println("Read " + result.length + " bytes @ " + 1e-6 * result.length / ds + "MB/s");
+            }
+            return result;
+        }
+    }
+
+       public static void syncPath(Path f) throws IOException {
+               // Does not seem to need 's' according to unit test in Windows
+               try (RandomAccessFile raf = new RandomAccessFile(f.toFile(), "rw")) {
+                       raf.getFD().sync();
+               }
+       }
+
+       static void uncheckedSyncPath(Path f) {
+               try {
+                       syncPath(f);
+               } catch (IOException e) {
+                       throw new RuntimeIOException(e);
+               }
+       }
+
+       public static void main(String[] args) throws Exception {
+
+               byte[] buf = new byte[1024*1024];
+               
+               long s = System.nanoTime();
+               
+               Path test = Paths.get("e:/work/test.dat");
+               OutputStream fs = Files.newOutputStream(test);
+               OutputStream os = new BufferedOutputStream(fs, 128*1024);
+               
+               for(int i=0;i<40;i++) {
+                       os.write(buf);
+               }
+               
+               os.flush();
+               //fs.getFD().sync();
+               os.close();
+               
+               syncPath(test);
+               
+               long duration = System.nanoTime()-s;
+               System.err.println("Took " + 1e-6*duration + "ms.");
+               
+               
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java
new file mode 100644 (file)
index 0000000..2796e3e
--- /dev/null
@@ -0,0 +1,708 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterUpdateProcessorBase;
+import org.simantics.acorn.internal.UndoClusterUpdateProcessor;
+import org.simantics.acorn.lru.ClusterInfo;
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.ClusterUpdateOperation;
+import org.simantics.acorn.lru.ClusterChangeSet.Entry;
+import org.simantics.db.ClusterCreator;
+import org.simantics.db.Database;
+import org.simantics.db.ServiceLocator;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.SDBException;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.db.service.ClusterSetsSupport;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Pair;
+import org.simantics.utils.logging.TimeLogger;
+
+import gnu.trove.map.hash.TLongObjectHashMap;
+
+public class GraphClientImpl2 implements Database.Session {
+       
+       public static final boolean DEBUG = false;
+
+       public final ClusterManager clusters;
+       
+       private TransactionManager transactionManager = new TransactionManager();
+       private ExecutorService executor = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Main Program", false));
+       private ExecutorService saver = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Snapshot Saver", true));
+
+       private static GraphClientImpl2 INSTANCE;
+       private Path dbFolder;
+       private final Database database;
+       private ServiceLocator locator;
+       private MainProgram mainProgram;
+
+       static class ClientThreadFactory implements ThreadFactory {
+               
+               final String name;
+               final boolean daemon;
+               
+               public ClientThreadFactory(String name, boolean daemon) {
+                       this.name = name;
+                       this.daemon = daemon;
+               }
+               
+               @Override
+               public Thread newThread(Runnable r) {
+                       Thread thread = new Thread(r, name);
+                       thread.setDaemon(daemon);
+                       return thread;
+               }
+       }
+
+       public GraphClientImpl2(Database database, Path dbFolder, ServiceLocator locator) throws IOException {
+           this.database = database;
+           this.dbFolder = dbFolder;
+           this.locator = locator;
+           this.clusters = new ClusterManager(dbFolder);
+           load();
+           ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class); 
+           cssi.updateReadAndWriteDirectories(clusters.lastSessionDirectory, clusters.workingDirectory);
+           mainProgram = new MainProgram(this, clusters);
+           executor.execute(mainProgram);
+           INSTANCE = this;
+       }
+
+       public Path getDbFolder() {
+           return dbFolder;
+       }
+
+       public void tryMakeSnapshot() throws IOException {
+               
+           if (isClosing)
+               return;
+           
+               saver.execute(new Runnable() {
+
+                       @Override
+                       public void run() {
+                               Transaction tr = null;
+                               try {
+                                       // First take a write transaction
+                                       tr = askWriteTransaction(-1);
+                                       // Then make sure that MainProgram is idling
+                                       mainProgram.mutex.acquire();
+                                       try {
+                                               synchronized(mainProgram) {
+                                                       if(mainProgram.operations.isEmpty()) {
+                                                               makeSnapshot(false);
+                                                       } else {
+                                                               // MainProgram is becoming busy again - delay snapshotting
+                                                               return;
+                                                       }
+                                               }
+                                       } finally {
+                                               mainProgram.mutex.release();
+                                       }
+                               } catch (IOException e) {
+                                       Logger.defaultLogError(e);
+                               } catch (ProCoreException e) {
+                                       Logger.defaultLogError(e);
+                               } catch (InterruptedException e) {
+                                       Logger.defaultLogError(e);
+                               } finally {
+                                       try {
+                                               if(tr != null)
+                                                       endTransaction(tr.getTransactionId());
+                                       } catch (ProCoreException e) {
+                                               Logger.defaultLogError(e);
+                                       }
+                               }
+                       }
+                       
+               });
+       }
+       
+    public void makeSnapshot(boolean force) throws IOException {
+        clusters.makeSnapshot(locator, force);
+    }
+       
+       public <T> T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException {
+               return clusters.clone(uid, creator);
+       }
+
+//     private void save() throws IOException {
+//             clusters.save();
+//     }
+       
+       public void load() throws IOException {
+               clusters.load();
+       }
+       
+//     public void modiFileEx(ClusterUID uid, int resourceKey, long offset, long size, byte[] bytes, long pos, ClusterSupport support) {
+//             clusters.modiFileEx(uid, resourceKey, offset, size, bytes, pos, support);
+//     }
+
+       @Override
+       public Database getDatabase() {
+               return database;
+       }
+
+       private boolean closed = false;
+       private boolean isClosing = false;
+       
+       @Override
+       public void close() throws ProCoreException {
+           System.err.println("Closing " + this + " and mainProgram " + mainProgram);
+               if(!closed && !isClosing) {
+                   isClosing = true;
+                       try {
+                               makeSnapshot(true);
+                               
+                               mainProgram.close();
+                               clusters.shutdown();
+                               executor.shutdown();
+                               saver.shutdown();
+                               boolean executorTerminated = executor.awaitTermination(500, TimeUnit.MILLISECONDS);
+                               boolean saverTerminated = saver.awaitTermination(500, TimeUnit.MILLISECONDS);
+                               
+                               System.err.println("executorTerminated=" + executorTerminated + ", saverTerminated=" + saverTerminated);
+                               
+                               INSTANCE = null;
+                               mainProgram = null;
+                               executor = null;
+                               saver = null;
+                               
+                       } catch (IOException | InterruptedException e) {
+                               throw new ProCoreException(e);
+                       }
+               }
+               closed = true;
+               //impl.close();
+       }
+
+       @Override
+       public void open() throws ProCoreException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public boolean isClosed() throws ProCoreException {
+               return closed;
+       }
+       
+       @Override
+       public void acceptCommit(long transactionId, long changeSetId, byte[] metadata) throws ProCoreException {
+               
+               clusters.state.headChangeSetId++;
+
+               long committedChangeSetId = changeSetId + 1;
+               
+               clusters.commitChangeSet(committedChangeSetId, metadata);
+               
+               clusters.state.transactionId = transactionId;
+               
+               mainProgram.committed();
+               
+               TimeLogger.log("Accepted commit");
+               
+       }
+
+       @Override
+       public long cancelCommit(long transactionId, long changeSetId,
+                       byte[] metadata, OnChangeSetUpdate onChangeSetUpdate)
+                       throws ProCoreException {
+           System.err.println("GraphClientImpl2.cancelCommit() called!! this is experimental and might cause havoc!");
+           try {
+            undo(new long[] {changeSetId}, onChangeSetUpdate);
+        } catch (SDBException e) {
+            e.printStackTrace();
+            throw new ProCoreException(e);
+        }
+           clusters.state.headChangeSetId++;
+           return clusters.state.headChangeSetId;
+       }
+
+       @Override
+       public Transaction askReadTransaction() throws ProCoreException {
+               return transactionManager.askReadTransaction();
+       }
+
+       enum TransactionState {
+               IDLE,WRITE,READ
+       }
+       
+       class TransactionRequest {
+               public TransactionState state;
+               public Semaphore semaphore;
+               public TransactionRequest(TransactionState state, Semaphore semaphore) {
+                       this.state = state;
+                       this.semaphore = semaphore;
+               }
+       }
+
+       class TransactionManager {
+
+               private TransactionState currentTransactionState = TransactionState.IDLE;
+               
+               private int reads = 0;
+               
+               LinkedList<TransactionRequest> requests = new LinkedList<TransactionRequest>();
+               
+               TLongObjectHashMap<TransactionRequest> requestMap = new TLongObjectHashMap<TransactionRequest>();
+               
+               private synchronized Transaction makeTransaction(TransactionRequest req) {
+                       
+                       final int csId = clusters.state.headChangeSetId;
+                       final long trId = clusters.state.transactionId+1;
+                       requestMap.put(trId, req);
+                       return new Transaction() {
+                               
+                               @Override
+                               public long getTransactionId() {
+                                       return trId;
+                               }
+                               
+                               @Override
+                               public long getHeadChangeSetId() {
+                                       return csId;
+                               }
+                       };
+               }
+               
+               /*
+                * This method cannot be synchronized since it waits and must support multiple entries
+                * by query thread(s) and internal transactions such as snapshot saver
+                */
+               public Transaction askReadTransaction() throws ProCoreException {
+               
+                       Semaphore semaphore = new Semaphore(0);
+                       
+                       TransactionRequest req = queue(TransactionState.READ, semaphore);
+                       
+                       try {
+                               semaphore.acquire();
+                       } catch (InterruptedException e) {
+                               throw new ProCoreException(e);
+                       }
+                       
+                       return makeTransaction(req);
+
+               }
+
+               private synchronized void dispatch() {
+                       TransactionRequest r = requests.removeFirst();
+                       if(r.state == TransactionState.READ) reads++;
+                       r.semaphore.release();
+               }
+               
+               private synchronized void processRequests() {
+                       
+                       while(true) {
+
+                               if(requests.isEmpty()) return;
+                               TransactionRequest req = requests.peek();
+
+                               if(currentTransactionState == TransactionState.IDLE) {
+                               
+                                       // Accept anything while IDLE
+                                       currentTransactionState = req.state;
+                                       dispatch();
+                                       
+                               } else if (currentTransactionState == TransactionState.READ) {
+                                       
+                                       if(req.state == currentTransactionState) {
+
+                                               // Allow other reads
+                                               dispatch();
+
+                                       } else {
+                                               
+                                               // Wait
+                                               return;
+                                               
+                                       }
+                                       
+                               }  else if (currentTransactionState == TransactionState.WRITE) {
+
+                                       // Wait
+                                       return;
+                                       
+                               }
+                               
+                       }
+                       
+               }
+               
+               private synchronized TransactionRequest queue(TransactionState state, Semaphore semaphore) {
+                       TransactionRequest req = new TransactionRequest(state, semaphore); 
+                       requests.addLast(req);
+                       processRequests();
+                       return req;
+               }
+               
+               /*
+                * This method cannot be synchronized since it waits and must support multiple entries
+                * by query thread(s) and internal transactions such as snapshot saver
+                */
+               public Transaction askWriteTransaction()
+                               throws ProCoreException {
+                       
+                       Semaphore semaphore = new Semaphore(0);
+                       
+                       TransactionRequest req = queue(TransactionState.WRITE, semaphore);
+                       
+                       try {
+                               semaphore.acquire();
+                       } catch (InterruptedException e) {
+                               throw new ProCoreException(e);
+                       }
+                       
+                       mainProgram.startTransaction(clusters.state.headChangeSetId+1);
+                       
+                       return makeTransaction(req);
+                       
+               }
+               
+               public synchronized long endTransaction(long transactionId) throws ProCoreException {
+                       
+                       TransactionRequest req = requestMap.remove(transactionId);
+                       if(req.state == TransactionState.WRITE) {
+                               currentTransactionState = TransactionState.IDLE;
+                               processRequests();
+                       } else {
+                               reads--;
+                               if(reads == 0) {
+                                       currentTransactionState = TransactionState.IDLE;
+                                       processRequests();
+                               }
+                       }
+                       return clusters.state.transactionId;
+               }
+
+       }
+       
+       @Override
+       public Transaction askWriteTransaction(final long transactionId)
+                       throws ProCoreException {
+               return transactionManager.askWriteTransaction();
+       }
+
+       @Override
+       public long endTransaction(long transactionId) throws ProCoreException {
+               return transactionManager.endTransaction(transactionId);
+       }
+
+       @Override
+       public String execute(String command) throws ProCoreException {
+               // This is called only by WriteGraphImpl.commitAccessorChanges
+               // We can ignore this in Acorn
+               return "";
+       }
+
+       @Override
+       public byte[] getChangeSetMetadata(long changeSetId)
+                       throws ProCoreException {
+               return clusters.getMetadata(changeSetId);
+       }
+
+       @Override
+       public ChangeSetData getChangeSetData(long minChangeSetId,
+                       long maxChangeSetId, OnChangeSetUpdate onChangeSetupate)
+                       throws ProCoreException {
+               
+               new Exception("GetChangeSetDataFunction " + minChangeSetId + " " + maxChangeSetId).printStackTrace();;
+               return null;
+               
+       }
+
+       @Override
+       public ChangeSetIds getChangeSetIds() throws ProCoreException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public Cluster getCluster(byte[] clusterId) throws ProCoreException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterChanges getClusterChanges(long changeSetId, byte[] clusterId)
+                       throws ProCoreException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterIds getClusterIds() throws ProCoreException {
+               return clusters.getClusterIds();
+       }
+
+       @Override
+       public Information getInformation() throws ProCoreException {
+               return new Information() {
+
+                       @Override
+                       public String getServerId() {
+                               return "server";
+                       }
+
+                       @Override
+                       public String getProtocolId() {
+                               return "";
+                       }
+
+                       @Override
+                       public String getDatabaseId() {
+                               return "database";
+                       }
+
+                       @Override
+                       public long getFirstChangeSetId() {
+                               return 0;
+                       }
+                       
+               };
+       }
+
+       @Override
+       public Refresh getRefresh(long changeSetId) throws ProCoreException {
+               
+               final ClusterIds ids = getClusterIds();
+               
+               return new Refresh() {
+
+                       @Override
+                       public long getHeadChangeSetId() {
+                               return clusters.state.headChangeSetId;
+                       }
+
+                       @Override
+                       public long[] getFirst() {
+                               return ids.getFirst();
+                       }
+
+                       @Override
+                       public long[] getSecond() {
+                               return ids.getSecond();
+                       }
+                       
+               };
+               
+       }
+
+       public byte[] getResourceFile(final byte[] clusterUID, final int resourceIndex) throws ProCoreException {
+               return clusters.getResourceFile(clusterUID, resourceIndex);
+       }
+
+       @Override
+       public ResourceSegment getResourceSegment(final byte[] clusterUID,
+                       final int resourceIndex, final long segmentOffset, short segmentSize) throws ProCoreException {
+               
+               return clusters.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize);
+
+       }
+
+       @Override
+       public long reserveIds(int count) throws ProCoreException {
+               return clusters.state.reservedIds++;
+       }
+
+       @Override
+       public void updateCluster(byte[] operations) throws ProCoreException {
+
+               ClusterUpdateOperation operation = new ClusterUpdateOperation(clusters, operations);
+               ClusterInfo info = clusters.clusterLRU.getOrCreate(operation.uid, true);
+               if(info == null) throw new IllegalStateException();
+               info.acquireMutex();
+               try {
+                       info.scheduleUpdate();
+                       mainProgram.schedule(operation);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+
+       }
+
+       private UndoClusterUpdateProcessor getUndoCSS(String ccsId) throws DatabaseException {
+
+               String[] ss = ccsId.split("\\.");
+               String chunkKey = ss[0];
+               int chunkOffset = Integer.parseInt(ss[1]);
+               ClusterStreamChunk chunk = clusters.streamLRU.getWithoutMutex(chunkKey);
+               if(chunk == null) throw new IllegalStateException("Cluster Stream Chunk " + chunkKey + " was not found.");
+               chunk.acquireMutex();
+               try {
+                       return chunk.getUndoProcessor(clusters, chunkOffset, ccsId);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       chunk.releaseMutex();
+               }
+               
+       }
+       
+       private void performUndo(String ccsId, ArrayList<Pair<ClusterUID, byte[]>> clusterChanges, UndoClusterSupport support) throws ProCoreException, DatabaseException {
+
+               UndoClusterUpdateProcessor proc = getUndoCSS(ccsId);
+
+               int clusterKey = clusters.getClusterKeyByClusterUIDOrMakeWithoutMutex(proc.getClusterUID());
+
+               clusters.clusterLRU.acquireMutex();
+               try {
+
+                       ClusterChange cs = new ClusterChange(clusterChanges, proc.getClusterUID());
+                       for(int i=0;i<proc.entries.size();i++) {
+                               
+                               Entry e = proc.entries.get(proc.entries.size() - 1 - i);
+                               e.process(clusters, cs, clusterKey);
+                               
+                       }
+                       
+                       cs.flush();
+
+               } finally {
+                       clusters.clusterLRU.releaseMutex();
+               }
+               
+       }
+       
+       @Override
+       public boolean undo(long[] changeSetIds, OnChangeSetUpdate onChangeSetUpdate) throws SDBException {
+               
+               final ArrayList<Pair<ClusterUID, byte[]>> clusterChanges = new ArrayList<Pair<ClusterUID, byte[]>>();
+               
+               UndoClusterSupport support = new UndoClusterSupport(clusters);
+               
+               final int changeSetId = clusters.state.headChangeSetId;
+               
+               if(ClusterUpdateProcessorBase.DEBUG)
+                       System.err.println(" === BEGIN UNDO ===");
+               
+               for(int i=0;i<changeSetIds.length;i++) {
+                       final long id = changeSetIds[changeSetIds.length-1-i];
+                       ArrayList<String> ccss = clusters.getChanges(id);
+                       for(int j=0;j<ccss.size();j++) {
+                               try {
+                                       if(ClusterUpdateProcessorBase.DEBUG)
+                                               System.err.println("performUndo " + ccss.get(ccss.size()-j-1));
+                                       performUndo(ccss.get(ccss.size()-j-1), clusterChanges, support);
+                               } catch (DatabaseException e) {
+                                       e.printStackTrace();
+                               }
+                       }
+               }
+
+               if(ClusterUpdateProcessorBase.DEBUG)
+                       System.err.println(" === END UNDO ===");
+
+               for(int i=0;i<clusterChanges.size();i++) {
+                       
+                       final int changeSetIndex = i;
+                       
+                       final Pair<ClusterUID, byte[]> pair = clusterChanges.get(i);
+                       
+                       final ClusterUID cuid = pair.first;
+                       final byte[] data = pair.second;
+
+                       onChangeSetUpdate.onChangeSetUpdate(new ChangeSetUpdate() {
+
+                               @Override
+                               public long getChangeSetId() {
+                                       return changeSetId;
+                               }
+
+                               @Override
+                               public int getChangeSetIndex() {
+                                       return 0;
+                               }
+
+                               @Override
+                               public int getNumberOfClusterChangeSets() {
+                                       return clusterChanges.size();
+                               }
+
+                               @Override
+                               public int getIndexOfClusterChangeSet() {
+                                       return changeSetIndex;
+                               }
+
+                               @Override
+                               public byte[] getClusterId() {
+                                       return cuid.asBytes();
+                               }
+
+                               @Override
+                               public boolean getNewCluster() {
+                                       return false;
+                               }
+
+                               @Override
+                               public byte[] getData() {
+                                       return data;
+                               }
+
+                       });
+
+               }
+
+               
+               return false;
+               
+       }
+       
+       public static GraphClientImpl2 getInstance() {
+           return INSTANCE;
+       }
+       
+       public ServiceLocator getServiceLocator() {
+           return locator;
+       }
+
+    @Override
+    public boolean refreshEnabled() {
+        return false;
+    }
+
+       
+       
+       
+       
+       
+       
+       
+       
+       
+       
+       ////////////////////////
+       
+       
+       
+       
+       
+       
+       
+       
+       
+       
+       
+       
+}
+
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java
new file mode 100644 (file)
index 0000000..56ef481
--- /dev/null
@@ -0,0 +1,105 @@
+package org.simantics.acorn;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import org.simantics.databoard.Bindings;
+import org.simantics.databoard.binding.mutable.MutableVariant;
+import org.simantics.databoard.serialization.Serializer;
+import org.simantics.databoard.util.binary.BinaryMemory;
+
+public class HeadState {
+
+    public int headChangeSetId = 0;
+    public long transactionId = 1;
+    public long reservedIds = 3;
+
+    public ArrayList<String> clusters = new ArrayList<>();
+    public ArrayList<String> files = new ArrayList<>();
+    public ArrayList<String> stream = new ArrayList<>();
+    public ArrayList<String> cs = new ArrayList<>();
+//    public ArrayList<String> ccs = new ArrayList<String>();
+
+    public static HeadState load(Path directory) throws InvalidHeadStateException {
+        Path f = directory.resolve("head.state");
+        
+        try {
+            byte[] bytes = Files.readAllBytes(f);
+            MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+            int digestLength = sha1.getDigestLength();
+            
+            sha1.update(bytes, digestLength, bytes.length - digestLength);
+            byte[] newChecksum = sha1.digest();
+            if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) {
+                throw new InvalidHeadStateException(
+                        "Checksum " + Arrays.toString(newChecksum) + " does not match excpected "
+                                + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + f.toAbsolutePath());
+            }
+            
+            HeadState object = (HeadState) org.simantics.databoard.Files.readFile(new ByteArrayInputStream(bytes, digestLength, bytes.length - digestLength), Bindings.getBindingUnchecked(HeadState.class));
+            return object;
+            
+        } catch (IOException i) {
+            return new HeadState();
+//            throw new InvalidHeadStateException(i);
+        } catch (NoSuchAlgorithmException e) {
+            throw new Error("SHA-1 Algorithm not found", e);
+        } catch (Throwable t) {
+            throw new InvalidHeadStateException(t);
+        }
+    }
+    
+    public void save(Path directory) throws IOException {
+        Path f = directory.resolve("head.state");
+        try {
+            File file = f.toFile();
+            BinaryMemory rf = new BinaryMemory(4096);
+            try {
+                MutableVariant v = new MutableVariant(Bindings.getBindingUnchecked(HeadState.class), this);
+                Serializer s = Bindings.getSerializerUnchecked( Bindings.VARIANT );
+                s.serialize(v, file);
+            } finally {
+                rf.close();
+            }
+            
+            byte[] bytes = rf.toByteBuffer().array();
+            
+            MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+            sha1.update(bytes);
+            byte[] checksum = sha1.digest();
+            
+            try (OutputStream out = Files.newOutputStream(f)) {
+                out.write(checksum);
+                out.write(bytes);
+            }
+            FileIO.syncPath(f);
+        } catch (NoSuchAlgorithmException e) {
+            throw new Error("SHA-1 digest not found, should not happen", e);
+        }
+    }
+
+    public static void validateHeadStateIntegrity(Path headState) throws InvalidHeadStateException, IOException {
+        try {
+            byte[] bytes = Files.readAllBytes(headState);
+            MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+            int digestLength = sha1.getDigestLength();
+            sha1.update(bytes, digestLength, bytes.length - digestLength);
+            byte[] newChecksum = sha1.digest();
+            if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) {
+                throw new InvalidHeadStateException(
+                        "Checksum " + Arrays.toString(newChecksum) + " does not match excpected "
+                                + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + headState.toAbsolutePath());
+            }
+        } catch (NoSuchAlgorithmException e) {
+            throw new Error("SHA-1 digest not found, should not happen", e);
+        }
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/InvalidHeadStateException.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/InvalidHeadStateException.java
new file mode 100644 (file)
index 0000000..2c342b7
--- /dev/null
@@ -0,0 +1,27 @@
+package org.simantics.acorn;
+
+public class InvalidHeadStateException extends Exception {
+
+    private static final long serialVersionUID = -7291859180968235955L;
+
+    public InvalidHeadStateException() {
+        super();
+    }
+
+    public InvalidHeadStateException(String message, Throwable cause, boolean enableSuppression,
+            boolean writableStackTrace) {
+        super(message, cause, enableSuppression, writableStackTrace);
+    }
+
+    public InvalidHeadStateException(String message, Throwable cause) {
+        super(message, cause);
+    }
+
+    public InvalidHeadStateException(String message) {
+        super(message);
+    }
+
+    public InvalidHeadStateException(Throwable cause) {
+        super(cause);
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java
new file mode 100644 (file)
index 0000000..f39a498
--- /dev/null
@@ -0,0 +1,342 @@
+package org.simantics.acorn;
+
+import java.io.Closeable;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.ClusterUpdateOperation;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.logging.TimeLogger;
+
+public class MainProgram implements Runnable, Closeable {
+
+       private static final int CLUSTER_THREADS = 4;
+       private static final int CHUNK_CACHE_SIZE = 100;
+
+       private final GraphClientImpl2 client;
+       private final ClusterManager clusters;
+       private final ExecutorService[] clusterUpdateThreads;
+    private final List<ClusterUpdateOperation>[] updateSchedules;
+       
+       private int residentOperationBytes = 0;
+       private long currentChangeSetId = -1;
+       private int nextChunkId = 0;
+       private boolean alive = true;
+       private Semaphore deathBarrier = new Semaphore(0);
+
+       final Semaphore mutex = new Semaphore(1);
+       final LinkedList<ClusterStreamChunk> operations = new LinkedList<>();
+
+       static class ClusterThreadFactory implements ThreadFactory {
+
+               final String name;
+               final boolean daemon;
+
+               public ClusterThreadFactory(String name, boolean daemon) {
+                       this.name = name;
+                       this.daemon = daemon;
+               }
+
+               @Override
+               public Thread newThread(Runnable r) {
+                       Thread thread = new Thread(r, name);
+                       thread.setDaemon(daemon);
+                       return thread;
+               }
+       }
+
+       public MainProgram(GraphClientImpl2 client, ClusterManager clusters) {
+
+               this.client = client;
+               this.clusters = clusters;
+               this.clusterUpdateThreads = new ExecutorService[CLUSTER_THREADS];
+               this.updateSchedules = new ArrayList[CLUSTER_THREADS];
+               for(int i=0;i<clusterUpdateThreads.length;i++) {
+                       clusterUpdateThreads[i] = Executors.newSingleThreadExecutor(new ClusterThreadFactory("Cluster Updater " + (i+1), false));
+                       updateSchedules[i] = new ArrayList<ClusterUpdateOperation>();
+               }
+       }
+
+       public void startTransaction(long id) {
+               currentChangeSetId = id;
+               nextChunkId = 0;
+       }
+
+       private static Comparator<ClusterUID> clusterComparator = new Comparator<ClusterUID>() {
+
+               @Override
+               public int compare(ClusterUID o1, ClusterUID o2) {
+                       return Long.compare(o1.second, o2.second);
+               }
+       };
+
+       @Override
+       public void run() {
+               try {
+
+                       mutex.acquire();
+                       main:
+                       while(alive) {
+
+                               TreeMap<ClusterUID, List<ClusterUpdateOperation>> updates = new TreeMap<ClusterUID, List<ClusterUpdateOperation>>(clusterComparator);
+
+                               synchronized(MainProgram.this) {
+
+                                       while(!operations.isEmpty() && updates.size() < 100) {
+
+                                               ClusterStreamChunk chunk = operations.pollFirst();
+
+                                               for(int i=chunk.nextToProcess;i<chunk.operations.size();i++) {
+                                                       ClusterUpdateOperation o = chunk.operations.get(i);
+                                                       ClusterUID uid = o.uid;
+                                                       List<ClusterUpdateOperation> ops = updates.get(uid);
+                                                       if(ops == null) {
+                                                               ops = new ArrayList<ClusterUpdateOperation>();
+                                                               updates.put(uid, ops);
+                                                       }
+                                                       ops.add(o);
+                                               }
+
+                                               chunk.nextToProcess = chunk.operations.size();
+
+                                               if(!chunk.isCommitted()) {
+                                                       assert(operations.isEmpty());
+                                                       operations.add(chunk);
+                                                       break;
+                                               }
+
+                                       }
+
+                                       if(updates.isEmpty()) {
+                                               try {
+                                                       long start = System.nanoTime();
+                                                       mutex.release();
+                                                       MainProgram.this.wait(5000);
+                                                       mutex.acquire();
+                                                       if (!alive)
+                                                               break main;
+                                                       long duration = System.nanoTime()-start;
+                                                       if(duration > 4000000000L) {
+
+                                                               // Was this a time-out or a new stream request?
+                                                               if(operations.isEmpty()) {
+
+                                                                       /*
+                                                                        * We are idling here.
+                                                                        * Flush all caches gradually
+                                                                        */
+
+                                                                       // Write pending cs to disk
+                                                                       boolean written = clusters.csLRU.swapForced();
+                                                                       while(written) {
+                                                                               if(!updates.isEmpty()) break;
+                                                                               written = clusters.csLRU.swapForced();
+                                                                       }
+                                                                       // Write pending chunks to disk
+                                                                       written = clusters.streamLRU.swapForced();
+                                                                       while(written) {
+                                                                               if(!updates.isEmpty()) break;
+                                                                               written = clusters.streamLRU.swapForced();
+                                                                       }
+                                                                       // Write pending files to disk
+                                                                       written = clusters.fileLRU.swapForced();
+                                                                       while(written) {
+                                                                               if(!updates.isEmpty()) break;
+                                                                               written = clusters.fileLRU.swapForced();
+                                                                       }
+                                                                       // Write pending clusters to disk
+                                                                       written = clusters.clusterLRU.swapForced();
+                                                                       while(written) {
+                                                                               if(!updates.isEmpty()) break;
+                                                                               written = clusters.clusterLRU.swapForced();
+                                                                       }
+
+                                                                       client.tryMakeSnapshot();
+                                                               }
+                                                       }
+                                               } catch (InterruptedException e) {
+                                                       e.printStackTrace();
+                                               }
+
+                                       }
+
+                               }
+
+//                             long sss = System.nanoTime();
+
+                               for(int i=0;i<CLUSTER_THREADS;i++)
+                                       updateSchedules[i].clear();
+
+                               final Semaphore s = new Semaphore(0);
+
+                               for(Map.Entry<ClusterUID, List<ClusterUpdateOperation>> entry : updates.entrySet()) {
+                                       ClusterUID key = entry.getKey();
+                                       int hash = key.hashCode() & (clusterUpdateThreads.length-1);
+                                       updateSchedules[hash].addAll(entry.getValue());
+                               }
+
+                               //                              final AtomicLong elapsed = new AtomicLong(0);
+                               int acquireAmount = 0;
+                               for(int i=0;i<CLUSTER_THREADS;i++) {
+                                       final List<ClusterUpdateOperation> ops = updateSchedules[i];
+                                       if (!ops.isEmpty()) {
+                                               acquireAmount++;
+                                               clusterUpdateThreads[i].execute(() -> {
+
+                                                       //long st = System.nanoTime();
+                                                       for(ClusterUpdateOperation op : ops) {
+                                                               op.run();
+                                                       }
+                                                       s.release();
+                                                       //                                                      long duration = System.nanoTime()-st;
+                                                       //                                                      elapsed.addAndGet(duration);
+                                                       //                                                      double dur = 1e-9*duration;
+                                                       //                                                      if(dur > 0.05)
+                                                       //                                                              System.err.println("duration=" + dur + "s. " + ops.size());
+                                               });
+                                       }
+                               }
+
+                               s.acquire(acquireAmount);
+
+                               /*
+                                * Here we are actively processing updates from client.
+                                * Maintain necessary caching here.
+                                */
+
+                               clusters.streamLRU.acquireMutex();
+                               try {
+                                       swapChunks();
+                               } catch (Throwable t) {
+                                       throw new IllegalStateException(t);
+                               } finally {
+                                       clusters.streamLRU.releaseMutex();
+                               }
+                               clusters.csLRU.acquireMutex();
+                               try {
+                                       swapCS();
+                               } catch (Throwable t) {
+                                       throw new IllegalStateException(t);
+                               } finally {
+                                       clusters.csLRU.releaseMutex();
+                               }
+
+                               TimeLogger.log("Performed updates");
+
+                       }
+
+               } catch (Throwable t) {
+                       t.printStackTrace();
+               } finally {
+                       deathBarrier.release();
+               }
+
+       }
+
+       /*
+        * Mutex for streamLRU is assumed here
+        * 
+        */
+       private void swapChunks() {
+
+               // Cache chunks during update operations
+               boolean written = clusters.streamLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+               while(written) {
+                       written = clusters.streamLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+               }
+
+       }
+
+       private void swapCS() {
+
+               // Cache chunks during update operations
+               boolean written = clusters.csLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+               while(written) {
+                       written = clusters.csLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+               }
+
+       }
+
+       public synchronized void committed() {
+
+               ClusterStreamChunk last = operations.isEmpty() ? null : operations.getLast();
+        if (!alive) {
+            System.err.println("Trying to commit operation after MainProgram is closed! Operation is " + last);
+//          return;
+        }
+               if(last != null) last.commit();
+
+       }
+
+       public synchronized void schedule(ClusterUpdateOperation operation) {
+           if (!alive) {
+               System.err.println("Trying to schedule operation after MainProgram is closed! Operation is " + operation);
+//             return;
+           }
+               clusters.streamLRU.acquireMutex();
+
+               try {
+
+                       ClusterStreamChunk last = operations.isEmpty() ? null : operations.getLast();
+                       if(last == null || last.isCommitted()) {
+                               String id = "" + currentChangeSetId + "-" + nextChunkId++;
+                               last = new ClusterStreamChunk(clusters, clusters.streamLRU, id);
+                               operations.add(last);
+                       }
+
+                       String chunkId = last.getKey();
+                       int chunkOffset = last.operations.size();
+                       operation.scheduled(chunkId + "." + chunkOffset);
+
+                       last.addOperation(operation);
+
+                       swapChunks();
+
+                       notifyAll();
+
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+
+                       clusters.streamLRU.releaseMutex();
+
+               }
+
+       }
+
+    @Override
+    public void close() {
+        alive = false;
+        synchronized (this) {
+            notifyAll();
+        }
+        try {
+            deathBarrier.acquire();
+        } catch (InterruptedException e) {
+        }
+
+        for (ExecutorService executor : clusterUpdateThreads)
+            executor.shutdown();
+
+        for (int i = 0; i < clusterUpdateThreads.length; i++) {
+            try {
+                ExecutorService executor  = clusterUpdateThreads[i];
+                executor.awaitTermination(500, TimeUnit.MILLISECONDS);
+                clusterUpdateThreads[i] = null;
+            } catch (InterruptedException e) {
+                e.printStackTrace();
+            }
+        }
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java
new file mode 100644 (file)
index 0000000..7733528
--- /dev/null
@@ -0,0 +1,135 @@
+package org.simantics.acorn;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.simantics.databoard.file.RuntimeIOException;
+import org.simantics.utils.FileUtils;
+
+public class MainState implements Serializable {
+
+    private static final long serialVersionUID = 6237383147637270225L;
+
+    public int headDir = 0;
+
+    public MainState() {
+    }
+
+    public MainState(int headDir) {
+        this.headDir = headDir;
+    }
+
+    public static MainState load(Path directory) throws IOException {
+        Files.createDirectories(directory);
+        Path f = directory.resolve("main.state");
+        try {
+            MainState state = null;
+            try (ObjectInputStream in = new ObjectInputStream(new BufferedInputStream(Files.newInputStream(f)))) {
+                state = (MainState) in.readObject();
+            }
+            while (true) {
+                Path last = directory.resolve(Integer.toString(state.headDir - 1));
+                try {
+                    Path headState = last.resolve("head.state");
+                    HeadState.validateHeadStateIntegrity(headState);
+                    break;
+                } catch (InvalidHeadStateException e) {
+                    e.printStackTrace();
+                    state.headDir--;
+                    uncheckedDeleteAll(last);
+                }
+            }
+            return state;
+        } catch(IOException i) {
+            return new MainState( findNewHeadState(directory) );
+        } catch(ClassNotFoundException c) {
+            throw new Error("MainState class not found", c);
+        } finally {
+            if (Files.exists(f)) {
+                Files.delete(f);
+            }
+        }
+    }
+
+    public void save(Path directory) throws IOException {
+        Path f = directory.resolve("main.state");
+        try (ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream(Files.newOutputStream(f)))) {
+            out.writeObject(this);
+        }
+        FileIO.syncPath(f);
+    }
+
+    private static boolean isInteger(Path p) {
+        try {
+            Integer.parseInt(p.getFileName().toString());
+            return true;
+        } catch (NumberFormatException e) {
+            return false;
+        }
+    }
+
+    /**
+     * TODO> shouldn't do two things in the same function, this does both head.state search and directory cleanup
+     *  
+     * @param directory
+     * @return
+     * @throws IOException
+     */
+    private static int findNewHeadState(Path directory) throws IOException {
+        try (Stream<Path> s = Files.walk(directory, 1)) {
+            List<Path> reverseSortedPaths = s
+            .filter(p -> !p.equals(directory) && isInteger(p) && Files.isDirectory(p))
+            .sorted((p1, p2) -> {
+                int p1Name = Integer.parseInt(p1.getFileName().toString()); 
+                int p2Name = Integer.parseInt(p2.getFileName().toString());
+                return Integer.compare(p2Name, p1Name);
+            }).collect(Collectors.toList());
+
+            int largest = -1;
+            for (Path last : reverseSortedPaths) {
+                Path headState = last.resolve("head.state");
+                if (Files.exists(headState)) {
+                    try {
+                        HeadState.validateHeadStateIntegrity(headState);
+                        largest = safeParseInt(-1, last.getFileName().toString());
+                        break;
+                    } catch (IOException | InvalidHeadStateException e) {
+                        e.printStackTrace();
+                        uncheckedDeleteAll(last);
+                    }
+                } else {
+                    uncheckedDeleteAll(last);
+                }
+            }
+            // +1 because we want to return the next head version to use,
+            // not the latest existing version.
+            return largest + 1;
+        }
+    }
+
+    private static int safeParseInt(int defaultValue, String s) {
+        try {
+            return Integer.parseInt(s);
+        } catch (NumberFormatException e) {
+            return defaultValue;
+        }
+    }
+
+    private static void uncheckedDeleteAll(Path path) {
+        try {
+            FileUtils.deleteAll(path.toFile());
+        } catch (IOException e) {
+            throw new RuntimeIOException(e);
+        }
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java
new file mode 100644 (file)
index 0000000..0d209b2
--- /dev/null
@@ -0,0 +1,11 @@
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+public interface Persistable {
+       
+       void toFile(Path path) throws IOException ;
+       void fromFile(byte[] data);
+       
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java
new file mode 100644 (file)
index 0000000..1e7352c
--- /dev/null
@@ -0,0 +1,170 @@
+package org.simantics.acorn;
+
+import java.io.InputStream;
+
+import org.simantics.db.Session;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.service.ClusterUID;
+
+public class UndoClusterSupport implements ClusterSupport {
+
+       final ClusterManager impl;
+       
+       public UndoClusterSupport(ClusterManager impl) {
+               this.impl = impl;
+       }
+       
+       @Override
+       public int createClusterKeyByClusterUID(ClusterUID clusterUID,
+                       long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterId(long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterKey(int clusterKey) {
+               try {
+            return impl.getClusterByClusterKey(clusterKey);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            return null;
+        }
+       }
+
+       @Override
+       public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByResourceKey(int resourceKey) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public long getClusterIdOrCreate(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void addStatement(Object cluster) {
+       }
+
+       @Override
+       public void cancelStatement(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void removeStatement(Object cluster) {
+       }
+
+       @Override
+       public void cancelValue(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void removeValue(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setValue(Object cluster, long clusterId, byte[] bytes,
+                       int length) {
+       }
+
+       @Override
+       public void modiValue(Object cluster, long clusterId, long voffset,
+                       int length, byte[] bytes, int offset) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setImmutable(Object cluster, boolean immutable) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setDeleted(Object cluster, boolean deleted) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void createResource(Object cluster, short resourceIndex,
+                       long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void addStatementIndex(Object cluster, int resourceKey,
+                       ClusterUID clusterUID, byte op) {
+       }
+
+       @Override
+       public void setStreamOff(boolean setOff) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public boolean getStreamOff() {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public InputStream getValueStreamEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId, long voffset,
+                       int length) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public long getValueSizeEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int wait4RequestsLess(int limit) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public Session getSession() {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public IClusterTable getClusterTable() {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int getClusterKeyByClusterUIDOrMake(long id1, long id2) {
+               throw new UnsupportedOperationException();
+       }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java
new file mode 100644 (file)
index 0000000..5ea0799
--- /dev/null
@@ -0,0 +1,316 @@
+package org.simantics.acorn.backup;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.LinkOption;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.StandardCopyOption;
+import java.nio.file.StandardOpenOption;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.Arrays;
+import java.util.concurrent.Future;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.simantics.acorn.GraphClientImpl2;
+import org.simantics.backup.BackupException;
+import org.simantics.backup.IBackupProvider;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.utils.FileUtils;
+
+/**
+ * @author Jani
+ *
+ * TODO: get rid of {@link GraphClientImpl2#getInstance()} invocations somehow in a cleaner way
+ */
+public class AcornBackupProvider implements IBackupProvider {
+
+    private static final String IDENTIFIER = "AcornBackupProvider";
+    private long trId = -1;
+    private final Semaphore lock = new Semaphore(1);
+
+    private static Path getAcornMetadataFile(Path dbFolder) {
+        return dbFolder.getParent().resolve(IDENTIFIER);
+    }
+    
+    @Override
+    public void lock() throws BackupException {
+        try {
+            if (trId != -1)
+                throw new IllegalStateException(this + " backup provider is already locked");
+            trId = GraphClientImpl2.getInstance().askWriteTransaction(-1)
+                    .getTransactionId();
+        } catch (ProCoreException e) {
+            e.printStackTrace();
+        }
+    }
+
+    @Override
+    public Future<BackupException> backup(Path targetPath, int revision) throws BackupException {
+        boolean releaseLock = true;
+        try {
+            lock.acquire();
+
+            GraphClientImpl2 client = GraphClientImpl2.getInstance();
+            client.makeSnapshot(true);
+
+            Path dbDir = client.getDbFolder();
+            int newestFolder = client.clusters.mainState.headDir - 1;
+            int latestFolder = -2;
+            Path AcornMetadataFile = getAcornMetadataFile(dbDir);
+            if (Files.exists(AcornMetadataFile)) {
+                try (BufferedReader br = Files.newBufferedReader(AcornMetadataFile)) {
+                    latestFolder = Integer.parseInt( br.readLine() );
+                }
+            }
+
+            AcornBackupRunnable r = new AcornBackupRunnable(
+                    lock, targetPath, revision, dbDir, latestFolder, newestFolder);
+            new Thread(r, "Acorn backup thread").start();
+
+             releaseLock = false;
+             return r;
+        } catch (InterruptedException e) {
+            releaseLock = false;
+            throw new BackupException("Failed to lock Acorn for backup.", e);
+        } catch (NumberFormatException e) {
+            throw new BackupException("Failed to read Acorn head state file.", e);
+        } catch (IOException e) {
+            throw new BackupException("I/O problem during Acorn backup.", e);
+        } finally {
+            if (releaseLock)
+                lock.release();
+        }
+    }
+
+    @Override
+    public void unlock() throws BackupException {
+        try {
+            if (trId == -1)
+                throw new BackupException(this + " backup provider is not locked");
+            GraphClientImpl2.getInstance().endTransaction(trId);
+            trId = -1;
+        } catch (ProCoreException e) {
+            throw new BackupException(e);
+        }
+    }
+
+    @Override
+    public void restore(Path fromPath, int revision) {
+        try {
+            // 1. Resolve initial backup restore target.
+            // This can be DB directory directly or a temporary directory that
+            // will replace the DB directory.
+            Path dbRoot = GraphClientImpl2.getInstance().getDbFolder();
+            Path restorePath = dbRoot;
+            if (!Files.exists(dbRoot, LinkOption.NOFOLLOW_LINKS)) {
+                Files.createDirectories(dbRoot);
+            } else {
+                Path dbRootParent = dbRoot.getParent();
+                restorePath = dbRootParent == null ? Files.createTempDirectory("restore")
+                        : Files.createTempDirectory(dbRootParent, "restore");
+            }
+
+            // 2. Restore the backup.
+            Files.walkFileTree(fromPath, new RestoreCopyVisitor(restorePath, revision));
+
+            // 3. Override existing DB root with restored temporary copy if necessary.
+            if (dbRoot != restorePath) {
+                FileUtils.deleteAll(dbRoot.toFile());
+                Files.move(restorePath, dbRoot);
+            } 
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+    }
+
+    private class RestoreCopyVisitor extends SimpleFileVisitor<Path> {
+
+        private final Path toPath;
+        private final int revision;
+        private Path currentSubFolder;
+
+        public RestoreCopyVisitor(Path toPath, int revision) {
+            this.toPath = toPath;
+            this.revision = revision;
+        }
+
+        @Override
+        public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
+            Path dirName = dir.getFileName();
+            if (dirName.toString().equals(IDENTIFIER)) {
+                currentSubFolder = dir;
+                return FileVisitResult.CONTINUE;
+            } else if (dir.getParent().getFileName().toString().equals(IDENTIFIER)) {
+                Path targetPath = toPath.resolve(dirName);
+                if (!Files.exists(targetPath)) {
+                    Files.createDirectory(targetPath);
+                }
+                return FileVisitResult.CONTINUE;
+            } else if (dirName.toString().length() == 1 && Character.isDigit(dirName.toString().charAt(0))) {
+                int dirNameInt = Integer.parseInt(dirName.toString());
+                if (dirNameInt <= revision) {
+                    return FileVisitResult.CONTINUE;
+                } else {
+                    return FileVisitResult.SKIP_SUBTREE;
+                }
+            } else {
+                return FileVisitResult.CONTINUE;
+            }
+        }
+
+        @Override
+        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+            if (file.getFileName().toString().endsWith(".tar.gz"))
+                return FileVisitResult.CONTINUE;
+            System.out.println("Restore " + file + " to " + toPath.resolve(currentSubFolder.relativize(file)));
+            Files.copy(file, toPath.resolve(currentSubFolder.relativize(file)), StandardCopyOption.REPLACE_EXISTING);
+            return FileVisitResult.CONTINUE;
+        }
+    }
+
+    private static class AcornBackupRunnable implements Runnable, Future<BackupException> {
+
+        private final Semaphore lock;
+        private final Path targetPath;
+        private final int revision;
+        private final Path baseDir;
+        private final int latestFolder;
+        private final int newestFolder;
+
+        private boolean done = false;
+        private final Semaphore completion = new Semaphore(0);
+        private BackupException exception = null;
+
+        public AcornBackupRunnable(Semaphore lock, Path targetPath, int revision,
+                Path baseDir, int latestFolder, int newestFolder) {
+            this.lock = lock;
+            this.targetPath = targetPath;
+            this.revision = revision;
+            this.baseDir = baseDir;
+            this.latestFolder = latestFolder;
+            this.newestFolder = newestFolder;
+        }
+
+        @Override
+        public void run() {
+            try {
+                doBackup();
+                writeHeadstateFile();
+            } catch (IOException e) {
+                exception = new BackupException("Acorn backup failed", e);
+                rollback();
+            } finally {
+                done = true;
+                lock.release();
+                completion.release();
+            }
+        }
+
+        private void doBackup() throws IOException {
+            Path target = targetPath.resolve(String.valueOf(revision)).resolve(IDENTIFIER);
+            if (!Files.exists(target))
+                Files.createDirectories(target);
+            Files.walkFileTree(baseDir,
+                    new BackupCopyVisitor(baseDir, target));
+        }
+
+        private void writeHeadstateFile() throws IOException {
+            Path AcornMetadataFile = getAcornMetadataFile(baseDir);
+            if (!Files.exists(AcornMetadataFile)) {
+                Files.createFile(AcornMetadataFile);
+            }
+            Files.write(AcornMetadataFile,
+                    Arrays.asList(Integer.toString(newestFolder)),
+                    StandardOpenOption.WRITE,
+                    StandardOpenOption.TRUNCATE_EXISTING,
+                    StandardOpenOption.CREATE);
+        }
+
+        private void rollback() {
+            // TODO
+        }
+
+        private class BackupCopyVisitor extends SimpleFileVisitor<Path> {
+
+            private Path fromPath;
+            private Path toPath;
+
+            public BackupCopyVisitor(Path fromPath, Path toPath) {
+                this.fromPath = fromPath;
+                this.toPath = toPath;
+            }
+
+            @Override
+            public FileVisitResult preVisitDirectory(Path dir,
+                    BasicFileAttributes attrs) throws IOException {
+                Path dirName = dir.getFileName();
+                if (dirName.equals(fromPath)) {
+                    Path targetPath = toPath.resolve(fromPath.relativize(dir));
+                    if (!Files.exists(targetPath)) {
+                        Files.createDirectory(targetPath);
+                    }
+                    return FileVisitResult.CONTINUE;
+                } else {
+                    int dirNameInt = Integer.parseInt(dirName.toString());
+                    if (latestFolder < dirNameInt && dirNameInt <= newestFolder) {
+                        Path targetPath = toPath.resolve(fromPath
+                                .relativize(dir));
+                        if (!Files.exists(targetPath)) {
+                            Files.createDirectory(targetPath);
+                        }
+                        return FileVisitResult.CONTINUE;
+                    }
+                    return FileVisitResult.SKIP_SUBTREE;
+                }
+            }
+
+            @Override
+            public FileVisitResult visitFile(Path file,
+                    BasicFileAttributes attrs) throws IOException {
+                System.out.println("Backup " + file + " to "
+                        + toPath.resolve(fromPath.relativize(file)));
+                Files.copy(file, toPath.resolve(fromPath.relativize(file)),
+                        StandardCopyOption.REPLACE_EXISTING);
+                return FileVisitResult.CONTINUE;
+            }
+        }
+
+        @Override
+        public boolean cancel(boolean mayInterruptIfRunning) {
+            return false;
+        }
+
+        @Override
+        public boolean isCancelled() {
+            return false;
+        }
+
+        @Override
+        public boolean isDone() {
+            return done;
+        }
+
+        @Override
+        public BackupException get() throws InterruptedException {
+            completion.acquire();
+            completion.release();
+            return exception;
+        }
+
+        @Override
+        public BackupException get(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException {
+            if (completion.tryAcquire(timeout, unit))
+                completion.release();
+            else
+                throw new TimeoutException("Acorn backup completion waiting timed out.");
+            return exception;
+        }
+
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java
new file mode 100644 (file)
index 0000000..f623d58
--- /dev/null
@@ -0,0 +1,1104 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.cluster;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterStream;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.acorn.internal.DebugPolicy;
+import org.simantics.db.Resource;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.ExternalValueException;
+import org.simantics.db.exception.ValidationException;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterI.PredicateProcedure;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.ClusterTraitsBase;
+import org.simantics.db.impl.ForEachObjectContextProcedure;
+import org.simantics.db.impl.ForEachObjectProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueContextProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueProcedure;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.impl.Table;
+import org.simantics.db.impl.TableHeader;
+import org.simantics.db.impl.graph.ReadGraphImpl;
+import org.simantics.db.impl.query.QueryProcessor;
+import org.simantics.db.procedure.AsyncContextMultiProcedure;
+import org.simantics.db.procedure.AsyncMultiProcedure;
+import org.simantics.db.procore.cluster.ClusterMap;
+import org.simantics.db.procore.cluster.ClusterPrintDebugInfo;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.procore.cluster.CompleteTable;
+import org.simantics.db.procore.cluster.FlatTable;
+import org.simantics.db.procore.cluster.ForeignTable;
+import org.simantics.db.procore.cluster.ObjectTable;
+import org.simantics.db.procore.cluster.PredicateTable;
+import org.simantics.db.procore.cluster.ResourceTable;
+import org.simantics.db.procore.cluster.ValueTable;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Callback;
+
+final public class ClusterBig extends ClusterImpl {
+    private static final int TABLE_HEADER_SIZE = TableHeader.HEADER_SIZE + TableHeader.EXTRA_SIZE;
+    private static final int RESOURCE_TABLE_OFFSET = 0;
+    private static final int PREDICATE_TABLE_OFFSET = RESOURCE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int OBJECT_TABLE_OFFSET = PREDICATE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int VALUE_TABLE_OFFSET = OBJECT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int FLAT_TABLE_OFFSET = VALUE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int COMPLETE_TABLE_OFFSET = FLAT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int FOREIGN_TABLE_OFFSET = COMPLETE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int INT_HEADER_SIZE = FOREIGN_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private final int clusterBits;
+    final private ResourceTable resourceTable;
+    //final private ResourceTable movedResourceTable;
+    final private PredicateTable predicateTable;
+    final private ObjectTable objectTable;
+    final private ValueTable valueTable;
+    final private FlatTable flatTable;
+    final private ForeignTable foreignTable;
+    final private CompleteTable completeTable;
+    final private ClusterMap clusterMap;
+    final private int[] headerTable;
+    final private ClusterSupport2 clusterSupport;
+    
+    public ClusterBig(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) {
+        super(clusterTable, clusterUID, clusterKey, support);
+        if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+            new Exception(getClusterUID().toString()).printStackTrace();
+        this.headerTable = new int[INT_HEADER_SIZE];
+        this.resourceTable = new ResourceTable(this, headerTable, RESOURCE_TABLE_OFFSET);
+        this.foreignTable = new ForeignTable(this, headerTable, FOREIGN_TABLE_OFFSET);
+        this.predicateTable = new PredicateTable(this, headerTable, PREDICATE_TABLE_OFFSET);
+        this.objectTable = new ObjectTable(this, headerTable, OBJECT_TABLE_OFFSET);
+        this.valueTable = new ValueTable(this, headerTable, VALUE_TABLE_OFFSET);
+        this.completeTable = new CompleteTable(this, headerTable, COMPLETE_TABLE_OFFSET);
+        this.flatTable = null;
+        this.clusterMap = new ClusterMap(foreignTable, flatTable);
+        this.clusterSupport = support;
+        this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+        this.importance = 0;
+//        clusterTable.setDirtySizeInBytes(true);
+    }
+    protected ClusterBig(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey)
+    throws DatabaseException {
+        super(clusterTable, checkValidity(0, longs, ints, bytes), clusterKey, support);
+        if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+            new Exception(getClusterUID().toString()).printStackTrace();
+        if (ints.length < INT_HEADER_SIZE)
+            throw new IllegalArgumentException("Too small integer table for cluster.");
+        this.headerTable = ints;
+        this.resourceTable = new ResourceTable(this, ints, RESOURCE_TABLE_OFFSET, longs);
+        this.foreignTable = new ForeignTable(this, headerTable, FOREIGN_TABLE_OFFSET, longs);
+        this.predicateTable = new PredicateTable(this, ints, PREDICATE_TABLE_OFFSET, ints);
+        this.objectTable = new ObjectTable(this, ints, OBJECT_TABLE_OFFSET, ints);
+        this.valueTable = new ValueTable(this, ints, VALUE_TABLE_OFFSET, bytes);
+        this.flatTable = null;
+        this.completeTable = new CompleteTable(this, headerTable, COMPLETE_TABLE_OFFSET, ints);
+        this.clusterMap = new ClusterMap(foreignTable, flatTable);
+        this.clusterSupport = support;
+        this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+    }
+    void analyse() {
+        System.out.println("Cluster " + clusterId);
+        System.out.println("-size:" + getUsedSpace());
+        System.out.println(" -rt:" + (resourceTable.getTableCapacity() * 8 + 8));
+        System.out.println(" -ft:" + foreignTable.getTableCapacity() * 8);
+        System.out.println(" -pt:" + predicateTable.getTableCapacity() * 4);
+        System.out.println(" -ot:" + objectTable.getTableCapacity() * 4);
+        System.out.println(" -ct:" + completeTable.getTableCapacity() * 4);
+        System.out.println(" -vt:" + valueTable.getTableCapacity());
+
+        System.out.println("-resourceTable:");
+        System.out.println(" -resourceCount=" + resourceTable.getResourceCount());
+        System.out.println(" -size=" + resourceTable.getTableSize());
+        System.out.println(" -capacity=" + resourceTable.getTableCapacity());
+        System.out.println(" -count=" + resourceTable.getTableCount());
+        System.out.println(" -size=" + resourceTable.getTableSize());
+        //resourceTable.analyse();
+    }
+    public void checkDirectReference(int dr)
+    throws DatabaseException {
+        if (!ClusterTraits.statementIndexIsDirect(dr))
+            throw new ValidationException("Reference is not direct. Reference=" + dr);
+        if (ClusterTraits.isFlat(dr))
+            throw new ValidationException("Reference is flat. Reference=" + dr);
+        if (ClusterTraits.isLocal(dr)) {
+            if (dr < 1 || dr > resourceTable.getUsedSize())
+                throw new ValidationException("Illegal local reference. Reference=" + dr);
+        } else {
+            int fi = ClusterTraits.getForeignIndexFromReference(dr);
+            int ri = ClusterTraits.getResourceIndexFromForeignReference(dr);
+            if (fi < 1 || fi > foreignTable.getUsedSize())
+                throw new ValidationException("Illegal foreign reference. Reference=" + dr + " foreign index=" + fi);
+            if (ri < 1 || ri > ClusterTraits.getMaxNumberOfResources())
+                throw new ValidationException("Illegal foreign reference. Reference=" + dr + " resource index=" + ri);
+        }
+    }
+    public void checkPredicateIndex(int pi)
+    throws DatabaseException {
+        predicateTable.checkPredicateSetIndex(this, pi);
+    }
+    public void checkObjectSetReference(int or)
+    throws DatabaseException {
+        if (ClusterTraits.statementIndexIsDirect(or))
+            throw new ValidationException("Illegal object set reference. Reference=" + or);
+        int oi = ClusterTraits.statementIndexGet(or);
+        this.objectTable.checkObjectSetIndex(this, oi);
+    }
+
+    public void checkValueInit()
+    throws DatabaseException {
+        valueTable.checkValueInit();
+    }
+    public void checkValue(int capacity, int index)
+    throws DatabaseException {
+        valueTable.checkValue(capacity, index);
+    }
+    public void checkValueFini()
+    throws DatabaseException {
+        valueTable.checkValueFini();
+    }
+    public void checkForeingIndex(int fi)
+    throws DatabaseException {
+        if (fi<1 || fi > foreignTable.getUsedSize())
+            throw new ValidationException("Illegal foreign index=" + fi);
+    }
+    public void checkCompleteSetReference(int cr)
+    throws DatabaseException {
+        if (!ClusterTraits.completeReferenceIsMultiple(cr))
+            throw new ValidationException("Illegal complete set reference. Reference=" + cr);
+        int ci = cr;
+        this.completeTable.checkCompleteSetIndex(this, ci);
+    }
+    public void check()
+    throws DatabaseException {
+        this.completeTable.check(this);
+        this.objectTable.check(this);
+        // Must be after object table check.
+        this.predicateTable.check(this);
+        this.resourceTable.check(this);
+    }
+    @Override
+    public CompleteTypeEnum getCompleteType(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceRef = getLocalReference(resourceKey);
+        int completeRef = resourceTable.getCompleteObjectRef(resourceRef);
+        CompleteTypeEnum ct = ClusterTraits.completeReferenceGetType(completeRef);
+        if (DEBUG)
+            System.out.println("Cluster.getCompleteType rk=" + resourceKey + " ct=" + ct);
+        int i = ct.getValue();
+        switch (i) {
+            case 0: return CompleteTypeEnum.NotComplete;
+            case 1: return CompleteTypeEnum.InstanceOf;
+            case 2: return CompleteTypeEnum.Inherits;
+            case 3: return CompleteTypeEnum.SubrelationOf;
+            default: throw new DatabaseException("Illegal complete type enumeration.");
+        }
+    }
+
+    @Override
+    public int getCompleteObjectKey(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceRef = getLocalReference(resourceKey);
+        int completeRef = resourceTable.getCompleteObjectRef(resourceRef);
+        int clusterIndex;
+        int resourceIndex = ClusterTraits.completeReferenceGetResourceIndex(completeRef);
+        
+        ClusterI.CompleteTypeEnum completeType = ClusterTraits.completeReferenceGetType(completeRef);
+        if (completeType == ClusterI.CompleteTypeEnum.NotComplete)
+            throw new DatabaseException("Resource has multiple complete objects. Resource key=" + resourceKey + ".");
+        
+        if (ClusterTraits.completeReferenceIsLocal(completeRef)) {
+            clusterIndex = clusterKey;
+        } else {
+            int foreignIndex = ClusterTraits.completeReferenceGetForeignIndex(completeRef);
+//            System.err.println("completeRef=" + completeRef + " foreignIndex=" + foreignIndex );
+            ClusterUID clusterUID = foreignTable.getResourceUID(foreignIndex).asCID();
+            ClusterI c = support.getClusterByClusterUIDOrMake(clusterUID);
+            clusterIndex = c.getClusterKey();
+        }
+        int key = ClusterTraits.createResourceKey(clusterIndex, resourceIndex);
+        if (DEBUG)
+            System.out.println("Cluster.complete object rk=" + resourceKey + " ck=" + key);
+        return key;
+    }
+
+    @Override
+    public boolean isComplete(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceRef = getLocalReference(resourceKey);
+        int completeRef = resourceTable.getCompleteObjectRef(resourceRef);
+        ClusterI.CompleteTypeEnum completeType = ClusterTraits.completeReferenceGetType(completeRef);
+        boolean complete = completeType != ClusterI.CompleteTypeEnum.NotComplete;
+        if (DEBUG)
+            System.out.println("Cluster.key=" + resourceKey + " isComplete=" + complete);
+        return complete;
+    }
+
+    public int getSingleObject(int resourceKey, int predicateKey, int objectIndex, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.getSingleObject: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = getLocalReference(resourceKey);
+            final int pRef = getInternalReferenceOrZero(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        }
+        return objectTable.getSingleObject(objectIndex, support, this);
+    }
+
+    public void forObjects(int resourceKey, int predicateKey, int objectIndex, QueryProcessor processor, ReadGraphImpl graph, AsyncMultiProcedure<Resource> procedure,
+            ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = getLocalReference(resourceKey);
+            final int pRef = getInternalReferenceOrZero(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+            return;
+        }
+        objectTable.foreachObject(graph, objectIndex, procedure, this);
+    }
+    public <C> void forObjects(int resourceKey, int predicateKey, int objectIndex, QueryProcessor processor, ReadGraphImpl graph, C context, AsyncContextMultiProcedure<C, Resource> procedure,
+            ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = getLocalReference(resourceKey);
+            final int pRef = getInternalReferenceOrZero(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+            return;
+        }
+        objectTable.foreachObject(graph, objectIndex, context, procedure, this);
+    }
+    @Override
+    public <Context> boolean forObjects(int resourceKey, int predicateKey, int objectIndex, ObjectProcedure<Context> procedure,
+            Context context, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.forObjects2: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = getLocalReference(resourceKey);
+            final int pRef = getInternalReferenceOrZero(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        }
+        return objectTable.foreachObject(objectIndex, procedure, context, support, this);
+    }
+
+    @Override
+    public int getSingleObject(int resourceKey, int predicateKey, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+    
+    @Override
+    public <T> int getSingleObject(int resourceKey, ForPossibleRelatedValueProcedure<T> procedure, ClusterSupport support) throws DatabaseException {
+        final int predicateKey = procedure.predicateKey;
+        if (DEBUG)
+            System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+
+    @Override
+    public <C, T> int getSingleObject(int resourceKey, ForPossibleRelatedValueContextProcedure<C, T> procedure, ClusterSupport support) throws DatabaseException {
+        final int predicateKey = procedure.predicateKey;
+        if (DEBUG)
+            System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+    
+    @Override
+    public void forObjects(ReadGraphImpl graph, int resourceKey,
+            int predicateKey, AsyncMultiProcedure<Resource> procedure)
+            throws DatabaseException {
+        
+       throw new UnsupportedOperationException();
+       
+//        SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//        ClusterSupport support = session.clusterTranslator;
+//        
+//        if (DEBUG)
+//            System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+//        final int resourceIndex = getLocalReference(resourceKey);
+//        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+//        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+//        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+//        if (0 == predicateIndex) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+//        forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, procedure, support);
+        
+    }
+    
+    @Override
+    public void forObjects(ReadGraphImpl graph, int resourceKey, ForEachObjectProcedure procedure) throws DatabaseException {
+       
+       throw new UnsupportedOperationException();
+       
+//        SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//        ClusterSupport support = session.clusterTranslator;
+//        final int predicateKey = procedure.predicateKey;
+//        if (DEBUG)
+//            System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+//        final int resourceIndex = getLocalReference(resourceKey);
+//        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+//        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+//        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+//        if (0 == predicateIndex) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+//        forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, procedure, support);
+        
+    }
+    @Override
+    public <C> void forObjects(ReadGraphImpl graph, int resourceKey, C context,
+            ForEachObjectContextProcedure<C> procedure) throws DatabaseException {
+        
+       throw new UnsupportedOperationException();
+
+//     SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//        ClusterSupport support = session.clusterTranslator;
+//        
+//        final int predicateKey = procedure.predicateKey;
+//        
+//        if (DEBUG)
+//            System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+//        final int resourceIndex = getLocalReference(resourceKey);
+//        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+//        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+//        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+//            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+//        if (0 == predicateIndex) {
+//            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+//        forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, context, procedure, support);
+        
+    }
+
+    @Override
+    public <Context> boolean forObjects(int resourceKey, int predicateKey,
+            ObjectProcedure<Context> procedure, Context context, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.forObjects4: rk=" + resourceKey + " pk=" + predicateKey);
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+        return forObjects(resourceKey, predicateKey, objectIndex, procedure, context, support);
+    }
+    @Override
+    public <Context> boolean forPredicates(int resourceKey,
+            PredicateProcedure<Context> procedure, Context context, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.forPredicates: rk=" + resourceKey);
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.foreachPredicate(resourceIndex,
+                    procedure, context, support, this, completeTable);
+        else {
+            boolean broken = resourceTable.foreachPredicate(resourceIndex,
+                    procedure, context, support, this, completeTable);
+            if (broken)
+                return true;
+        }
+        return predicateTable.foreachPredicate(predicateIndex, procedure, context, support, this);
+    }
+    @Override
+    public ClusterI addRelation(int sResourceKey, ClusterUID puid, int pResourceKey, ClusterUID ouid, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey);
+        int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+        int pri = getReferenceOrCreateIfForeign(pResourceKey, puid, support, ClusterStream.NULL_OPERATION);
+        int ori = getReferenceOrCreateIfForeign(oResourceKey, ouid, support, ClusterStream.NULL_OPERATION);
+        ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+        boolean ret = addRelationInternal(sri, pri, ori, completeType);
+//      check();
+        if (ret) {
+            support.addStatement(this);
+            return this;
+        } else {
+            support.cancelStatement(this);
+            return null;
+        }
+    }
+    @Override
+    public ClusterI addRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey);
+        int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+        int pri = getReferenceOrCreateIfForeign(pResourceKey, support, ClusterStream.NULL_OPERATION);
+        int ori = getReferenceOrCreateIfForeign(oResourceKey, support, ClusterStream.NULL_OPERATION);
+        ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+        boolean ret = addRelationInternal(sri, pri, ori, completeType);
+//      check();
+        if (ret) {
+            support.addStatement(this);
+            return this;
+        } else {
+            support.cancelStatement(this);
+            return null;
+        }
+    }
+    @Override
+    public boolean removeRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+//        check();
+        int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.REMOVE_OPERATION);
+        int pri = getInternalReferenceOrZeroAnd(pResourceKey, support, ClusterStream.NULL_OPERATION);
+        int ori = getInternalReferenceOrZeroAnd(oResourceKey, support, ClusterStream.NULL_OPERATION);
+        boolean ret = false;
+        if (0 != pri && 0 != ori) {
+            ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+            ret = removeRelationInternal(sri, pri, ori, completeType, support);
+        }
+        if (ret)
+            support.removeStatement(this);
+        else
+            support.cancelStatement(this);
+//        check();
+        return ret;
+    }
+    @Override
+    public void denyRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int sri = checkResourceKeyIsOursAndGetResourceIndexIf(sResourceKey, support);
+        ResourceIndexAndId p = checkResourceKeyAndGetResourceIndexIf(pResourceKey, support);
+        ResourceIndexAndId o = checkResourceKeyAndGetResourceIndexIf(oResourceKey, support);
+        if (0 == sri || 0 == p.index || 0 == o.index)
+            return;
+//        check();
+        ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+        boolean ret = removeRelationInternal(sri, p.reference, o.reference, completeType, support);
+        if (ret) {
+            support.addStatementIndex(this, sResourceKey, getClusterUID(), ClusterChange.REMOVE_OPERATION);
+            support.addStatementIndex(this, pResourceKey, p.clusterUID, ClusterStream.NULL_OPERATION);
+            support.addStatementIndex(this, oResourceKey, o.clusterUID, ClusterStream.NULL_OPERATION);
+            support.removeStatement(this);
+        }
+//        check();
+        return;
+    }
+    @Override
+    public InputStream getValueStream(int rResourceId, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterBig.getValue " + rResourceId);
+        int resourceIndex = getLocalReference(rResourceId);
+        try {
+            byte[] buffer = resourceTable.getValue(valueTable, resourceIndex);
+            if(buffer == null) return null;
+            return new ByteArrayInputStream(buffer);
+        } catch (ExternalValueException e) {
+            return support.getValueStreamEx(resourceIndex, clusterUID.second);
+        }
+    }
+    @Override
+    public byte[] getValue(int rResourceId, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterBig.getValue " + rResourceId);
+        int resourceIndex = getLocalReference(rResourceId);
+        try {
+            return resourceTable.getValue(valueTable, resourceIndex);
+        } catch (ExternalValueException e) {
+               return clusterSupport.impl.getResourceFile(clusterUID.asBytes(), resourceIndex);
+//            return support.getValueEx(resourceIndex, clusterUID.second);
+        }
+    }
+    @Override
+    public boolean hasValue(int rResourceId, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(rResourceId);
+        return resourceTable.hasValue(resourceIndex);
+    }
+    @Override
+    public boolean removeValue(int rResourceId, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterChange.DELETE_OPERATION);
+        support.removeValue(this);
+        return resourceTable.removeValue(valueTable, resourceIndex);
+    }
+    
+    @Override
+    public ClusterI setValue(int rResourceId, byte[] value, int length, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.SET_OPERATION);
+        support.setValue(this, getClusterId(), value, length);
+        resourceTable.setValue(valueTable, resourceIndex, value, length);
+        return this;
+    }
+    @Override
+    public ClusterI modiValueEx(int rResourceId, long voffset, int length, byte[] value, int offset, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.MODI_OPERATION);
+        support.modiValue(this, getClusterId(), voffset, length, value, offset);
+        resourceTable.setValueEx(valueTable, resourceIndex);
+        return this;
+    }
+    @Override
+    public byte[] readValueEx(int rResourceId, long voffset, int length, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(rResourceId);
+        boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+        if (!isExternal)
+            throw new DatabaseException("ClusterI.readValue supported only for external value. Resource key=" + rResourceId);
+        return support.getValueEx(resourceIndex, getClusterId(), voffset, length);
+    }
+    @Override
+    public long getValueSizeEx(int resourceKey, ClusterSupport support)
+    throws DatabaseException, ExternalValueException {
+        int resourceIndex = getLocalReference(resourceKey);
+        boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+        if (!isExternal)
+            throw new ExternalValueException("ClusterI.getSize supported only for external value. Resource key=" + resourceKey);
+        return support.getValueSizeEx(resourceIndex, getClusterId());
+    }
+    public boolean isValueEx(int resourceKey)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(resourceKey);
+        return resourceTable.isValueEx(valueTable, resourceIndex);
+    }
+    @Override
+    public void setValueEx(int resourceKey)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(resourceKey);
+        resourceTable.setValueEx(valueTable, resourceIndex);
+    }
+    @Override
+    public int createResource(ClusterSupport support)
+    throws DatabaseException {
+        short resourceIndex = resourceTable.createResource();
+
+        if(DebugPolicy.REPORT_RESOURCE_ID_ALLOCATION)
+            System.out.println("[RID_ALLOCATION]: ClusterBig[" + clusterId + "] allocates " + resourceIndex);
+
+        support.createResource(this, resourceIndex, clusterId);
+        return ClusterTraits.createResourceKey(clusterKey, resourceIndex);
+    }
+    @Override
+    public boolean hasResource(int resourceKey, ClusterSupport support) {
+        int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey);
+        if (this.clusterKey != clusterKey) // foreign resource
+            return false;
+        int resourceIndex;
+        try {
+            resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        } catch (DatabaseException e) {
+            return false;
+        }
+        if (resourceIndex > 0 & resourceIndex <= resourceTable.getTableCount())
+            return true;
+        else
+            return false;
+    }
+    @Override
+    public int getNumberOfResources(ClusterSupport support) {
+        return resourceTable.getUsedSize();
+    }
+    @Override
+    public long getUsedSpace() {
+        long rt = resourceTable.getTableCapacity() * 8 + 8; // (8 = cluster id)
+        long ft = foreignTable.getTableCapacity() * 8;
+        long pt = predicateTable.getTableCapacity() * 4;
+        long ot = objectTable.getTableCapacity() * 4;
+        long ct = completeTable.getTableCapacity() * 4;
+        long vt = valueTable.getTableCapacity() * 1;
+        long cm = clusterMap.getUsedSpace();
+        
+        return rt + ft + pt + ot + ct + vt + cm;
+//        System.out.println("resource table " + rt);
+//        System.out.println("foreign table (non flat cluster table) " + ft);
+//        System.out.println("predicate table " + pt);
+//        long pt2 = getRealSizeOfPredicateTable() * 4;
+//        System.out.println("predicate table real size " + pt2);
+//        System.out.println("object table " + ot);
+//        long ot2 = getRealSizeOfObjectTable() * 4;
+//        System.out.println("object table real size " + ot2);
+//        System.out.println("value table " + vt);
+    }
+    int getRealSizeOfPredicateTable() throws DatabaseException {
+        SizeOfPredicateTable proc = new SizeOfPredicateTable(resourceTable, predicateTable);
+        resourceTable.foreachResource(proc, 0, null, null);
+        return proc.getSize();
+    }
+    int getRealSizeOfObjectTable() throws DatabaseException {
+        SizeOfObjectTable proc = new SizeOfObjectTable(resourceTable, predicateTable, objectTable);
+        resourceTable.foreachResource(proc, 0, null, null);
+        return proc.getSize();
+    }
+    @Override
+    public boolean isEmpty() {
+        return resourceTable.getTableCount() == 0;
+    }
+    @Override
+    public void printDebugInfo(String message, ClusterSupport support)
+    throws DatabaseException {
+        predicateTable.printDebugInfo();
+        objectTable.printDebugInfo();
+        ClusterPrintDebugInfo proc = new ClusterPrintDebugInfo(this
+                , resourceTable, predicateTable, support, objectTable);
+        resourceTable.foreachResource(proc, 0, null, null);
+    }
+    private int getInternalReferenceOrZero(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) { // foreign resource
+            ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey);
+            ClusterUID clusterUID = foreignCluster.getClusterUID();
+            int foreignResourceIndex = clusterMap.getForeignReferenceOrZero(resourceIndex, clusterUID);
+            return foreignResourceIndex;
+        }
+        return resourceIndex;
+    }
+    private int getInternalReferenceOrZeroAnd(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) { // foreign resource
+            ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+            int foreignResourceIndex = clusterMap.getForeignReferenceOrZero(resourceIndex, clusterUID);
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            return foreignResourceIndex;
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private short getLocalReference(int resourceKey) throws DatabaseException {
+        return ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+    }
+    private int getLocalReferenceAnd(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(resourceKey);
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private int checkResourceKeyIsOursAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int clusterShortId = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterShortId)
+            return 0;
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        return resourceIndex;
+    }
+    private int getReferenceOrCreateIfForeign(int resourceKey, ClusterUID clusterUID, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) {
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            return clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private int getReferenceOrCreateIfForeign(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) {
+            ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            return clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private class ResourceIndexAndId {
+        ResourceIndexAndId(int reference, int index, ClusterUID clusterUID) {
+            this.reference = reference;
+            this.index = index;
+            this.clusterUID = clusterUID;
+        }
+        public final int reference;
+        public final int index;
+        public final ClusterUID clusterUID;
+    }
+    private ResourceIndexAndId checkResourceKeyAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) { // foreign resource
+            ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey);
+            ClusterUID clusterUID = foreignCluster.getClusterUID();
+            int ref = clusterMap.getForeignReferenceOrCreateByResourceIndex(resourceIndex, clusterUID);
+            return new ResourceIndexAndId(ref, resourceIndex, clusterUID);
+        }
+        return new ResourceIndexAndId(resourceIndex, resourceIndex, getClusterUID());
+    }
+    
+    @Override
+    final public int execute(int resourceIndex) throws DatabaseException {
+        int key;
+        if(resourceIndex > 0) {
+            key = clusterBits | resourceIndex;
+        } else {
+            ClusterUID clusterUID = clusterMap.getResourceUID(resourceIndex).asCID();
+            ClusterI cluster = clusterSupport.getClusterByClusterUIDOrMake(clusterUID);
+            int foreingResourceIndex =  clusterMap.getForeignResourceIndex(resourceIndex);
+            key = ClusterTraits.createResourceKey(cluster.getClusterKey(), foreingResourceIndex);
+        }
+        if (DEBUG)
+            System.out.println("Cluster.execute key=" + key);
+        return key;
+    }
+    
+    private boolean addRelationInternal(int sReference, int pReference, int oReference, ClusterI.CompleteTypeEnum completeType)
+    throws DatabaseException {
+        int predicateIndex = resourceTable.addStatement(sReference, pReference,
+                oReference, predicateTable, objectTable, completeType, completeTable);
+        if (0 == predicateIndex)
+            return true; // added to resourceTable
+        else if (0 > predicateIndex)
+            return false; // old complete statemenent
+        int newPredicateIndex = predicateTable.addPredicate(predicateIndex,
+                pReference, oReference, objectTable);
+        if (0 == newPredicateIndex)
+            return false;
+        if (predicateIndex != newPredicateIndex)
+            resourceTable.setPredicateIndex(sReference, newPredicateIndex);
+        return true;
+    }
+    private boolean removeRelationInternal(int sResourceIndex, int pResourceIndex,
+            int oResourceIndex, ClusterI.CompleteTypeEnum completeType, ClusterSupport support)
+    throws DatabaseException {
+        int predicateIndex = resourceTable.getPredicateIndex(sResourceIndex);
+        if (0 == predicateIndex || ClusterI.CompleteTypeEnum.NotComplete != completeType)
+            return resourceTable.removeStatementFromCache(sResourceIndex,
+                    pResourceIndex, oResourceIndex, completeType, completeTable);
+        PredicateTable.Status ret = predicateTable.removePredicate(predicateIndex, pResourceIndex, oResourceIndex, objectTable);
+        switch (ret) {
+            case NothingRemoved:
+                return false;
+            case PredicateRemoved: {
+                if (0 == predicateTable.getPredicateSetSize(predicateIndex))
+                    resourceTable.setPredicateIndex(sResourceIndex, 0);
+                // intentionally dropping to next case
+            } default:
+                break;
+        }
+        resourceTable.removeStatement(sResourceIndex,
+                pResourceIndex, oResourceIndex,
+                completeType, completeTable,
+                predicateTable, objectTable, this, support);
+        return true;
+    }
+    @Override
+    public void load() {
+        throw new Error("Not supported.");
+    }
+
+    @Override
+    public void load(Callback<DatabaseException> r) {
+        throw new Error("Not supported.");
+    }
+
+    public int makeResourceKey(int resourceIndex) throws DatabaseException {
+        int key = 0;
+        if (resourceIndex > 0) // local resource
+            key = ClusterTraits.createResourceKey(clusterKey, resourceIndex);
+        else {
+               ClusterUID clusterUID = clusterMap.getResourceUID(resourceIndex).asCID();
+               int clusterKey = clusterSupport.getClusterKeyByClusterUIDOrMake(clusterUID);
+               int foreingResourceIndex =  clusterMap.getForeignResourceIndex(resourceIndex);
+               key = ClusterTraits.createResourceKey(clusterKey, foreingResourceIndex);
+        }
+        if (0 == key)
+            throw new DatabaseException("Failed to make resource key from " + resourceIndex);
+        return key;
+    }
+    @Override
+    public ClusterBig toBig(ClusterSupport support) throws DatabaseException {
+        throw new Error("Not implemented");
+    }
+    @Override
+    public void load(ClusterSupport session, Runnable callback) {
+        throw new Error("Not implemented");
+    }
+    @Override
+    public ClusterI getClusterByResourceKey(int resourceKey,
+            ClusterSupport support) {
+        throw new Error("Not implemented");
+    }
+    @Override
+    public void increaseReferenceCount(int amount) {
+        throw new Error("Not implemented");
+    }
+    @Override
+
+    public void decreaseReferenceCount(int amount) {
+        throw new Error("Not implemented");
+    }
+    @Override
+    public int getReferenceCount() {
+        throw new Error("Not implemented");
+    }
+    @Override
+    public void releaseMemory() {
+    }
+    @Override
+    public void compact() {
+        clusterMap.compact();
+    }
+    public boolean contains(int resourceKey) {
+        return ClusterTraitsBase.isCluster(clusterBits, resourceKey);
+    }
+    @Override
+    public ClusterTypeEnum getType() {
+        return ClusterTypeEnum.BIG;
+    }
+    @Override
+    public boolean getImmutable() {
+        int status = resourceTable.getClusterStatus();
+        return (status & ClusterStatus.ImmutableMaskSet) == 1;
+    }
+    @Override
+    public void setImmutable(boolean immutable, ClusterSupport support) {
+        int status = resourceTable.getClusterStatus();
+        if (immutable)
+            status |= ClusterStatus.ImmutableMaskSet;
+        else
+            status &= ClusterStatus.ImmutableMaskClear;
+        resourceTable.setClusterStatus(status);
+        support.setImmutable(this, immutable);
+    }
+    
+    @Override
+    public ClusterTables store() throws IOException {
+
+       ClusterTables result = new ClusterTables();
+
+               int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE);
+
+       int byteSize = valueTable.getTableSize();
+       byte[] byteBytes = new byte[byteSize];
+       valueTable.store(byteBytes, 0);
+               
+       //FileUtils.writeFile(bytes, valueTable.table);
+       
+       result.bytes = byteBytes;
+               
+       int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize(); 
+       long[] longBytes = new long[longSize];
+
+       longBytes[0] = 0;
+       longBytes[1] = LONG_HEADER_VERSION;
+       longBytes[2] = 0;
+       longBytes[3] = clusterUID.second;
+
+//        Bytes.writeLE8(longBytes, 0, 0);
+//        Bytes.writeLE8(longBytes, 8, LONG_HEADER_VERSION);
+//        Bytes.writeLE8(longBytes, 16, 0);
+//        Bytes.writeLE8(longBytes, 24, clusterUID.second);
+
+       int longPos = resourceTable.store(longBytes, LONG_HEADER_SIZE);
+       foreignTable.store(longBytes, longPos);
+       
+       result.longs = longBytes;
+       
+//     FileUtils.writeFile(longs, longBytes);
+
+       int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize();
+       int[] intBytes = new int[intSize];
+       int intPos = INT_HEADER_SIZE;
+       intPos = predicateTable.store(intBytes, intPos);
+       intPos = objectTable.store(intBytes, intPos);
+       intPos = completeTable.store(intBytes, intPos);
+       // write header
+               for(int i=0;i<INT_HEADER_SIZE;i++) {
+                       int v = headerTable[i];
+                       intBytes[i] = v;
+//                     Bytes.writeLE(intBytes, i<<2, v);
+               }
+       
+               result.ints = intBytes;
+
+//     FileUtils.writeFile(ints, intBytes);
+       
+       for(int i=0;i<INT_HEADER_SIZE;i++)
+               headerTable[i] = currentHeader[i];
+       
+       return result;
+       
+    }
+    
+    @Override
+    protected int getResourceTableCount() {
+       return resourceTable.getTableCount();
+    }
+    @Override
+    public boolean getDeleted() {
+        int status = resourceTable.getClusterStatus();
+        return (status & ClusterStatus.DeletedMaskSet) == ClusterStatus.DeletedMaskSet;
+    }
+    @Override
+    public void setDeleted(boolean deleted, ClusterSupport support) {
+        int status = resourceTable.getClusterStatus();
+        if (deleted)
+            status |= ClusterStatus.DeletedMaskSet;
+        else
+            status &= ClusterStatus.DeletedMaskClear;
+        resourceTable.setClusterStatus(status);
+        support.setDeleted(this, deleted);
+    }
+    @Override
+    public Table<?> getPredicateTable() {
+        return predicateTable;
+    }
+    @Override
+    public Table<?> getForeignTable() {
+        return foreignTable;
+    }
+    @Override
+    public Table<?> getCompleteTable() {
+        return completeTable;
+    }
+    @Override
+    public Table<?> getValueTable() {
+        return valueTable;
+    }
+    @Override
+    public Table<?> getObjectTable() {
+        return objectTable;
+    }
+}
+
+class SizeOfPredicateTable implements ClusterI.ObjectProcedure<Integer> {
+    private final ResourceTable mrResourceTable;
+    private final PredicateTable mrPredicateTable;
+    private int size = 0;
+    SizeOfPredicateTable(ResourceTable resourceTable
+            , PredicateTable predicateTable) {
+        mrResourceTable = resourceTable;
+        mrPredicateTable = predicateTable;
+    }
+    @Override
+    public boolean execute(Integer i, int resourceRef) {
+        int predicateIndex = mrResourceTable.getPredicateIndex(resourceRef);
+        if (0 == predicateIndex)
+            return false; // continue loop
+        size += mrPredicateTable.getPredicateSetSize(predicateIndex);
+        return false; // continue loop
+    }
+    
+    public int getSize() {
+        return size;
+    }
+    
+}
+
+class SizeOfObjectTable implements ClusterI.ObjectProcedure<Integer> {
+    private final ResourceTable mrResourceTable;
+    private final PredicateTable mrPredicateTable;
+    private final ObjectTable mrObjectTable;
+    private int size = 0;
+    SizeOfObjectTable(ResourceTable resourceTable
+            , PredicateTable predicateTable, ObjectTable objectTable) {
+        mrResourceTable = resourceTable;
+        mrPredicateTable = predicateTable;
+        mrObjectTable = objectTable;
+    }
+
+    @Override
+    public boolean execute(Integer i, int resourceRef) {
+        int predicateIndex = mrResourceTable.getPredicateIndex(resourceRef);
+        if (0 == predicateIndex)
+            return false; // continue loop
+        ClusterI.PredicateProcedure<Object> procedure = new PredicateProcedure<Object>() {
+            @Override
+            public boolean execute(Object context, int pRef, int oIndex) {
+                if (ClusterTraits.statementIndexIsDirect(oIndex))
+                    return false; // no table space reserved, continue looping
+                int objectIndex;
+                try {
+                    objectIndex = ClusterTraits.statementIndexGet(oIndex);
+                    size += mrObjectTable.getObjectSetSize(objectIndex);
+                } catch (DatabaseException e) {
+                    e.printStackTrace();
+                }
+                return false; // continue looping
+            }
+        };
+        try {
+            mrPredicateTable.foreachPredicate(predicateIndex, procedure, null, null, null);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+        }
+        return false; // continue loop
+    }
+    
+    public int getSize() {
+        return size;
+    }
+    
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java
new file mode 100644 (file)
index 0000000..353d938
--- /dev/null
@@ -0,0 +1,226 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.cluster;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.UUID;
+
+import org.simantics.acorn.internal.Change;
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.InvalidClusterException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.impl.Modifier;
+import org.simantics.db.service.ClusterCollectorPolicy.CollectorCluster;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.db.service.ClusteringSupport.Id;
+import org.simantics.utils.strings.AlphanumComparator;
+
+public abstract class ClusterImpl extends ClusterBase implements Modifier, CollectorCluster {
+    protected static final int LONG_HEADER_SIZE = 7;
+    protected static final long LONG_HEADER_VERSION = 1;
+    protected static ClusterUID checkValidity(long type, long[] longs, int[] ints, byte[] bytes)
+    throws InvalidClusterException {
+        if (longs.length < LONG_HEADER_SIZE)
+            throw new InvalidClusterException("Header size mismatch. Expected=" + ClusterImpl.LONG_HEADER_SIZE + ", got=" + longs.length);
+        if (longs[0] != type)
+            throw new InvalidClusterException("Type mismatch. Expected=" + type + ", got=" + longs[0] + " " + ClusterUID.make(longs[2], longs[3]));
+        if (longs[1] != ClusterImpl.LONG_HEADER_VERSION)
+            throw new InvalidClusterException("Header size mismatch. Expected=" + ClusterImpl.LONG_HEADER_VERSION + ", got=" + longs[1]);
+        return ClusterUID.make(longs[2], longs[3]);
+    }
+    protected static Id getUniqueId(long[] longs) {
+        return new IdImpl(new UUID(longs[3], longs[4]));
+    }
+    static final boolean DEBUG = false;
+    final public IClusterTable clusterTable;
+    // This can be null iff the cluster has been converted to big
+    public Change change = new Change();
+    public ClusterChange cc;
+    public byte[] foreignLookup;
+    
+    private boolean dirtySizeInBytes = true;
+    private long sizeInBytes = 0;
+    
+    protected ClusterImpl() {
+        clusterTable = null;
+    }
+    
+    public ClusterImpl(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport support) {
+        super(support, clusterUID, clusterKey);
+//        SessionImplSocket session = (SessionImplSocket)support.getSession();
+//        if(session != null)
+               this.clusterTable = clusterTable;
+//        else
+    }
+    
+    public static ClusterImpl dummy() {
+       return new ClusterSmall();
+    }
+    
+    public static ClusterImpl make(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) {
+        return new ClusterSmall(clusterUID, clusterKey, support, clusterTable);
+    }
+    public static ClusterSmall proxy(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, long clusterId, ClusterSupport2 support) {
+        if (DEBUG)
+            new Exception("Cluster proxy for " + clusterUID).printStackTrace();
+        return new ClusterSmall(null, clusterUID, clusterKey, support);
+    }
+    public static ClusterImpl make(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey)
+    throws DatabaseException {
+        if (longs[0] == 0)
+            return new ClusterBig(clusterTable, longs, ints, bytes, support, clusterKey);
+        else
+            return new ClusterSmall(clusterTable, longs, ints, bytes, support, clusterKey);
+    }
+
+//    public boolean virtual = false;
+    
+    @Override
+    public boolean hasVirtual() {
+       return false;
+//        return clusterTable.hasVirtual(clusterKey);
+    }
+
+    @Override
+    public void markVirtual() {
+//        clusterTable.markVirtual(clusterKey);
+//        virtual = true;
+    }
+    
+    @Override
+    public boolean isWriteOnly() {
+        return false;
+    }
+    @Override
+    public boolean isLoaded() {
+        return true;
+    }
+    
+    @Override
+    public void resized() {
+        dirtySizeInBytes = true;
+//        if(clusterTable != null)
+//             clusterTable.setDirtySizeInBytes(true);
+    }
+    
+    public long getCachedSize() {
+        if(dirtySizeInBytes) {
+            try {
+                sizeInBytes = getUsedSpace();
+                //System.err.println("recomputed size of cluster " + getClusterId() + " => " + sizeInBytes);
+            } catch (DatabaseException e) {
+                Logger.defaultLogError(e);
+            }
+            dirtySizeInBytes = false;
+        }
+        return sizeInBytes;
+    }
+
+    protected void calculateModifiedId() {
+//        setModifiedId(new IdImpl(UUID.randomUUID()));
+    }
+    
+    public static class ClusterTables {
+       public byte[] bytes;
+       public int[] ints;
+       public long[] longs;
+    }
+    
+    public byte[] storeBytes() throws IOException {
+       throw new UnsupportedOperationException();
+    }
+
+    public ClusterTables store() throws IOException {
+       throw new UnsupportedOperationException();
+    }
+    
+    abstract protected int getResourceTableCount();
+    
+    public String dump(final ClusterSupport support) {
+
+       StringBuilder sb = new StringBuilder();
+       for(int i=1;i<getResourceTableCount();i++) {
+               sb.append(""+i+"\n");
+               final int resourceKey = i;
+               final ArrayList<String> stms = new ArrayList<String>();
+               try {
+                       
+                       byte[] value = getValue(i, support);
+                       if(value != null)
+                               sb.append(" bytes: " + Arrays.toString(value) + "\n");
+                       
+                               forPredicates(i, new PredicateProcedure<Integer>() {
+
+                                       @Override
+                                       public boolean execute(Integer c, final int predicateKey, int objectIndex) {
+                                               
+                                               try {
+                                                       
+                                                       forObjects(resourceKey, predicateKey, objectIndex, new ObjectProcedure<Integer>() {
+
+                                                               @Override
+                                                               public boolean execute(Integer context, int objectKey) throws DatabaseException {
+                                                                       
+                                                                       ClusterUID puid = support.getClusterByResourceKey(predicateKey).getClusterUID();
+                                                                       ClusterUID ouid = support.getClusterByResourceKey(objectKey).getClusterUID();
+                                                                       
+                                                                       stms.add(" " + puid + " " + (predicateKey&0xFFF) + " " + ouid + " " + (objectKey&0xFFF)); 
+                                                                       
+                                                                       return false;
+                                                                       
+                                                               }
+                                                               
+                                                       }, 0, support);
+                                               } catch (DatabaseException e) {
+                                                       e.printStackTrace();
+                                               }
+                                               
+                                               return false;
+                                               
+                                       }
+
+                               },0,support);
+                               
+                               Collections.sort(stms, AlphanumComparator.COMPARATOR);
+                               
+                               for(String s : stms) {
+                                       sb.append(s);
+                                       sb.append("\n");
+                               }
+                               
+                       } catch (DatabaseException e) {
+                               e.printStackTrace();
+                       }
+       }
+       
+       return sb.toString();
+       
+    }
+    
+    abstract public boolean isValueEx(int resourceIndex) throws DatabaseException;
+
+    abstract public ClusterI addRelation(int resourceKey, ClusterUID puid, int predicateKey, ClusterUID ouid, int objectKey, ClusterSupport support) throws DatabaseException;
+    
+    @Override
+    public IClusterTable getClusterTable() {
+        return clusterTable;
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java
new file mode 100644 (file)
index 0000000..726071d
--- /dev/null
@@ -0,0 +1,1304 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.cluster;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterStream;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.acorn.internal.DebugPolicy;
+import org.simantics.db.Resource;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.ExternalValueException;
+import org.simantics.db.exception.ValidationException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.ClusterTraitsBase;
+import org.simantics.db.impl.ForEachObjectContextProcedure;
+import org.simantics.db.impl.ForEachObjectProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueContextProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueProcedure;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.impl.Table;
+import org.simantics.db.impl.TableHeader;
+import org.simantics.db.impl.graph.ReadGraphImpl;
+import org.simantics.db.procedure.AsyncContextMultiProcedure;
+import org.simantics.db.procedure.AsyncMultiProcedure;
+import org.simantics.db.procore.cluster.ClusterMapSmall;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.procore.cluster.ClusterTraitsSmall;
+import org.simantics.db.procore.cluster.CompleteTableSmall;
+import org.simantics.db.procore.cluster.ForeignTableSmall;
+import org.simantics.db.procore.cluster.ObjectTable;
+import org.simantics.db.procore.cluster.OutOfSpaceException;
+import org.simantics.db.procore.cluster.PredicateTable;
+import org.simantics.db.procore.cluster.ResourceTableSmall;
+import org.simantics.db.procore.cluster.ValueTableSmall;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.db.service.ResourceUID;
+import org.simantics.utils.datastructures.Callback;
+
+import gnu.trove.map.hash.TIntShortHashMap;
+import gnu.trove.procedure.TIntProcedure;
+import gnu.trove.set.hash.TIntHashSet;
+
+final public class ClusterSmall extends ClusterImpl {
+    private static final int TABLE_HEADER_SIZE = TableHeader.HEADER_SIZE + TableHeader.EXTRA_SIZE;
+    private static final int RESOURCE_TABLE_OFFSET = 0;
+    private static final int PREDICATE_TABLE_OFFSET = RESOURCE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int OBJECT_TABLE_OFFSET = PREDICATE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int VALUE_TABLE_OFFSET = OBJECT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int FLAT_TABLE_OFFSET = VALUE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int COMPLETE_TABLE_OFFSET = FLAT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int FOREIGN_TABLE_OFFSET = COMPLETE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int INT_HEADER_SIZE = FOREIGN_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private final int clusterBits;
+    private final ResourceTableSmall resourceTable;
+    private final PredicateTable predicateTable;
+    private final ObjectTable objectTable;
+    private final ValueTableSmall valueTable;
+    private final ForeignTableSmall foreignTable;
+    private final CompleteTableSmall completeTable;
+    private final ClusterMapSmall clusterMap;
+    private final int[] headerTable;
+    public final ClusterSupport2 clusterSupport;
+    private boolean proxy;
+    private boolean deleted = false;
+    
+    protected ClusterSmall() {
+        this.proxy = true;
+        this.headerTable = null;
+        this.resourceTable = null;
+        this.foreignTable = null;
+        this.predicateTable = null;
+        this.objectTable = null;
+        this.valueTable = null;
+        this.completeTable = null;
+        this.clusterMap = null;
+        this.clusterSupport = null;
+        this.clusterBits = 0;
+        this.importance = 0;
+    }
+    
+    public ClusterSmall(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) {
+        super(clusterTable, clusterUID, clusterKey, support);
+        if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+            new Exception(clusterUID.toString()).printStackTrace();
+        this.proxy = true;
+        this.headerTable = null;
+        this.resourceTable = null;
+        this.foreignTable = null;
+        this.predicateTable = null;
+        this.objectTable = null;
+        this.valueTable = null;
+        this.completeTable = null;
+        this.clusterMap = null;
+        this.clusterSupport = support;
+        this.clusterBits = 0;
+        this.importance = 0;
+//        new Exception("ClusterSmall " + clusterKey).printStackTrace();
+    }
+    ClusterSmall(ClusterUID clusterUID, int clusterKey, ClusterSupport2 support, IClusterTable clusterTable) {
+        super(clusterTable, clusterUID, clusterKey, support);
+        if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+            new Exception(clusterUID.toString()).printStackTrace();
+        this.proxy = false;
+        this.clusterSupport = support;
+        this.headerTable = new int[INT_HEADER_SIZE];
+        this.resourceTable = new ResourceTableSmall(this, headerTable, RESOURCE_TABLE_OFFSET);
+        this.foreignTable = new ForeignTableSmall(this, headerTable, FOREIGN_TABLE_OFFSET);
+        this.predicateTable = new PredicateTable(this, headerTable, PREDICATE_TABLE_OFFSET);
+        this.objectTable = new ObjectTable(this, headerTable, OBJECT_TABLE_OFFSET);
+        this.valueTable = new ValueTableSmall(this, headerTable, VALUE_TABLE_OFFSET);
+        this.completeTable = new CompleteTableSmall(this, headerTable, COMPLETE_TABLE_OFFSET);
+        this.clusterMap = new ClusterMapSmall(this, foreignTable);
+        this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+//        if(clusterTable != null)
+//             this.importance = -clusterTable.timeCounter();
+//        else
+               this.importance = 0;
+//        new Exception("ClusterSmall " + clusterKey).printStackTrace();
+    }
+    protected ClusterSmall(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey)
+    throws DatabaseException {
+        super(clusterTable, checkValidity(-1, longs, ints, bytes), clusterKey, support);
+        this.proxy = false;
+        this.clusterSupport = support;
+        if (ints.length < INT_HEADER_SIZE)
+            throw new IllegalArgumentException("Too small integer table for cluster.");
+        this.headerTable = ints;
+        if(DebugPolicy.REPORT_CLUSTER_EVENTS) new Exception(Long.toString(clusterId)).printStackTrace();
+        this.resourceTable = new ResourceTableSmall(this, ints, RESOURCE_TABLE_OFFSET, longs);
+        this.foreignTable = new ForeignTableSmall(this, headerTable, FOREIGN_TABLE_OFFSET, longs);
+        this.predicateTable = new PredicateTable(this, ints, PREDICATE_TABLE_OFFSET, ints);
+        this.objectTable = new ObjectTable(this, ints, OBJECT_TABLE_OFFSET, ints);
+        this.valueTable = new ValueTableSmall(this, ints, VALUE_TABLE_OFFSET, bytes);
+        this.completeTable = new CompleteTableSmall(this, headerTable, COMPLETE_TABLE_OFFSET, ints);
+        this.clusterMap = new ClusterMapSmall(this, foreignTable);
+        this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+//        if(clusterTable != null) {
+//             this.importance = clusterTable.timeCounter();
+//             clusterTable.markImmutable(this, getImmutable());
+//        }
+//        new Exception("ClusterSmall " + clusterKey).printStackTrace();
+    }
+    void analyse() {
+        System.out.println("Cluster " + clusterId);
+        System.out.println("-size:" + getUsedSpace());
+        System.out.println(" -rt:" + (resourceTable.getTableCapacity() * 8 + 8));
+        System.out.println(" -ft:" + foreignTable.getTableCapacity() * 8);
+        System.out.println(" -pt:" + predicateTable.getTableCapacity() * 4);
+        System.out.println(" -ot:" + objectTable.getTableCapacity() * 4);
+        System.out.println(" -ct:" + completeTable.getTableCapacity() * 4);
+        System.out.println(" -vt:" + valueTable.getTableCapacity());
+
+        System.out.println("-resourceTable:");
+        System.out.println(" -resourceCount=" + resourceTable.getResourceCount());
+        System.out.println(" -size=" + resourceTable.getTableSize());
+        System.out.println(" -capacity=" + resourceTable.getTableCapacity());
+        System.out.println(" -count=" + resourceTable.getTableCount());
+        System.out.println(" -size=" + resourceTable.getTableSize());
+        //resourceTable.analyse();
+    }
+    public void checkDirectReference(int dr)
+    throws DatabaseException {
+        if (!ClusterTraits.statementIndexIsDirect(dr))
+            throw new ValidationException("Reference is not direct. Reference=" + dr);
+        if (ClusterTraits.isFlat(dr))
+            throw new ValidationException("Reference is flat. Reference=" + dr);
+        if (ClusterTraits.isLocal(dr)) {
+            if (dr < 1 || dr > resourceTable.getUsedSize())
+                throw new ValidationException("Illegal local reference. Reference=" + dr);
+        } else {
+            int fi = ClusterTraits.getForeignIndexFromReference(dr);
+            int ri = ClusterTraits.getResourceIndexFromForeignReference(dr);
+            if (fi < 1 || fi > foreignTable.getUsedSize())
+                throw new ValidationException("Illegal foreign reference. Reference=" + dr + " foreign index=" + fi);
+            if (ri < 1 || ri > ClusterTraits.getMaxNumberOfResources())
+                throw new ValidationException("Illegal foreign reference. Reference=" + dr + " resource index=" + ri);
+        }
+    }
+    public void checkPredicateIndex(int pi)
+    throws DatabaseException {
+        //        predicateTable.checkPredicateSetIndex(this, pi);
+    }
+    public void checkObjectSetReference(int or)
+    throws DatabaseException {
+        if (ClusterTraits.statementIndexIsDirect(or))
+            throw new ValidationException("Illegal object set reference. Reference=" + or);
+        int oi = ClusterTraits.statementIndexGet(or);
+        this.objectTable.checkObjectSetIndex(this, oi);
+    }
+
+    public void checkValueInit()
+    throws DatabaseException {
+        valueTable.checkValueInit();
+    }
+    public void checkValue(int capacity, int index)
+    throws DatabaseException {
+        valueTable.checkValue(capacity, index);
+    }
+    public void checkValueFini()
+    throws DatabaseException {
+        valueTable.checkValueFini();
+    }
+    public void checkForeingIndex(int fi)
+    throws DatabaseException {
+        if (fi<1 || fi > foreignTable.getUsedSize())
+            throw new ValidationException("Illegal foreign index=" + fi);
+    }
+    public void checkCompleteSetReference(int cr)
+    throws DatabaseException {
+        if (!ClusterTraits.completeReferenceIsMultiple(cr))
+            throw new ValidationException("Illegal complete set reference. Reference=" + cr);
+        int ci = cr;
+        this.completeTable.checkCompleteSetIndex(this, ci);
+    }
+    public void check()
+    throws DatabaseException {
+//        this.completeTable.check(this);
+//        this.objectTable.check(this);
+//        // Must be after object table check.
+//        this.predicateTable.check(this);
+//        this.resourceTable.check(this);
+    }
+    @Override
+    public CompleteTypeEnum getCompleteType(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceRef = getLocalReference(resourceKey);
+        CompleteTypeEnum ct = resourceTable.getCompleteType(resourceRef);
+        if (DEBUG)
+            System.out.println("ClusterSmall.getCompleteType rk=" + resourceKey + " ct=" + ct);
+        return ct;
+    }
+
+    @Override
+    public int getCompleteObjectKey(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceIndexOld = getLocalReference(resourceKey);
+        short completeRef = resourceTable.getCompleteObjectRef(resourceIndexOld);
+        int clusterIndex;
+        int resourceIndex;
+        if (0 == completeRef)
+            throw new DatabaseException("Resource's complete object refernce is null. Resource key=" + resourceKey + ".");
+        ClusterI.CompleteTypeEnum completeType = resourceTable.getCompleteType(resourceIndexOld);
+        if (completeType == ClusterI.CompleteTypeEnum.NotComplete)
+            throw new DatabaseException("Resource has multiple complete objects. Resource key=" + resourceKey + ".");
+        if (ClusterTraitsSmall.resourceRefIsLocal(completeRef)) {
+            clusterIndex = clusterKey;
+            resourceIndex = completeRef;
+        } else { // Resource has one complete statement.
+            ResourceUID resourceUID = clusterMap.getForeignResourceUID(completeRef);
+            ClusterUID uid = resourceUID.asCID();
+            clusterIndex = clusterSupport.getClusterKeyByUID(0, uid.second);
+            //ClusterI c = clusterTable.getClusterByClusterUIDOrMakeProxy(uid);
+            //clusterIndex = c.getClusterKey();
+            //assert(clusterIndex == clusterTable.getClusterByClusterUIDOrMakeProxy(uid).getClusterKey());
+            resourceIndex = resourceUID.getIndex();
+        }
+        int key = ClusterTraits.createResourceKey(clusterIndex, resourceIndex);
+        if (DEBUG)
+            System.out.println("ClusterSmall.complete object rk=" + resourceKey + " ck=" + key);
+        return key;
+    }
+
+    @Override
+    public boolean isComplete(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceRef = getLocalReference(resourceKey);
+        final ClusterI.CompleteTypeEnum completeType = resourceTable.getCompleteType(resourceRef);
+        boolean complete = completeType != ClusterI.CompleteTypeEnum.NotComplete;
+        if (DEBUG)
+            System.out.println("ClusterSmall.key=" + resourceKey + " isComplete=" + complete);
+        return complete;
+    }
+    public int getSingleObject(int resourceKey, int predicateKey, int objectIndex, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.getSingleObject: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+            final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        }
+        return objectTable.getSingleObject(objectIndex, support, this);
+    }
+
+    public void forObjects(ReadGraphImpl graph, int resourceKey, int predicateKey, int objectIndex, AsyncMultiProcedure<Resource> procedure,
+            ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+            final int pRef = getInternalReferenceOrZero2(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+            return;
+        }
+        objectTable.foreachObject(graph, objectIndex, procedure, this);
+    }
+
+    public <C> void forObjects(ReadGraphImpl graph, int resourceKey, int predicateKey, int objectIndex, C context, AsyncContextMultiProcedure<C, Resource> procedure,
+            ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+            final int pRef = getInternalReferenceOrZero2(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+            return;
+        }
+        objectTable.foreachObject(graph, objectIndex, context, procedure, this);
+    }
+    
+    @Override
+    public <Context> boolean forObjects(int resourceKey, int predicateKey, int objectIndex, ObjectProcedure<Context> procedure,
+            Context context, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.forObjects2: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+            final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        }
+        return objectTable.foreachObject(objectIndex, procedure, context, support, this);
+    }
+
+    @Override
+    public int getSingleObject(int resourceKey, int predicateKey, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+        final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+        final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+        final int completeType = ClusterTraitsBase.getCompleteTypeIntFromResourceKey(predicateKey);
+        final ClusterI.CompleteTypeEnum pCompleteType = CompleteTypeEnum.make(completeType);
+        if (completeType > 0)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+        if (0 == predicateIndex) // All relevant data is in resource table.
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+
+    @Override
+    public <T> int getSingleObject(int resourceKey, ForPossibleRelatedValueProcedure<T> procedure, ClusterSupport support) throws DatabaseException {
+        final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+        final int predicateKey = procedure.predicateKey;
+        int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+        short pRef = 0;
+        if(procedure.clusterKey[0] == clusterKey) {
+            pRef = (short)procedure.predicateReference[0];
+        } else {
+            pRef = getInternalReferenceOrZero2(predicateKey, support);
+            procedure.clusterKey[0] = clusterKey;
+            procedure.predicateReference[0] = pRef;
+        }
+        
+        final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+        if (CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+        if (0 == predicateIndex) // All relevant data is in resource table.
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+
+    @Override
+    public <C, T> int getSingleObject(int resourceKey, ForPossibleRelatedValueContextProcedure<C, T> procedure, ClusterSupport support) throws DatabaseException {
+        final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+        final int predicateKey = procedure.predicateKey;
+        int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+        short pRef = 0;
+        if(procedure.clusterKey[0] == clusterKey) {
+            pRef = (short)procedure.predicateReference[0];
+        } else {
+            pRef = getInternalReferenceOrZero2(predicateKey, support);
+            procedure.clusterKey[0] = clusterKey;
+            procedure.predicateReference[0] = pRef;
+        }
+        final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+        if (CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+        if (0 == predicateIndex) // All relevant data is in resource table.
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+
+    @Override
+    public void forObjects(ReadGraphImpl graph, int resourceKey,
+            int predicateKey, AsyncMultiProcedure<Resource> procedure) throws DatabaseException {
+       
+       throw new UnsupportedOperationException();
+       
+//        SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//        ClusterSupport support = session.clusterTranslator;
+//        if (DEBUG)
+//            System.out.println("ClusterSmall.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+//        final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+//        final int pRef = getInternalReferenceOrZero2(predicateKey, support);
+//        final int completeType = ClusterTraitsBase.getCompleteTypeIntFromResourceKey(predicateKey);
+//        final ClusterI.CompleteTypeEnum pCompleteType = CompleteTypeEnum.make(completeType);
+//        if (completeType > 0) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+//        if (0 == predicateIndex) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+//        forObjects(graph, resourceKey, predicateKey, objectIndex, procedure, support);
+    }
+
+    public void forObjects(ReadGraphImpl graph, int resourceKey, ForEachObjectProcedure procedure) throws DatabaseException {
+       
+       throw new UnsupportedOperationException();
+
+//        final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+//        final int predicateKey = procedure.predicateKey;
+//        int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+//        int pRef = 0;
+//        if(procedure.clusterKey[0] == clusterKey) {
+//            pRef = procedure.predicateReference[0];
+//        } else {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            pRef = getInternalReferenceOrZero2(predicateKey, support);
+//            procedure.clusterKey[0] = clusterKey;
+//            procedure.predicateReference[0] = pRef;
+//        }
+//        final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+//        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+//        if (0 == predicateIndex) {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int hashBase = predicateIndex + predicateTable.offset;
+//        if (predicateTable.table[hashBase-1] < 0) {
+//            int objectIndex = TableIntArraySet2.get(predicateTable.table, hashBase, pRef & 0xFFFF);
+//            //int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            forObjects(graph, resourceKey, predicateKey, objectIndex, procedure, support);
+//        } else {
+//            procedure.finished(graph);
+////            graph.dec();
+//        }
+    }
+
+    public <C> void forObjects(ReadGraphImpl graph, int resourceKey, C context, ForEachObjectContextProcedure<C> procedure) throws DatabaseException {
+       
+       throw new UnsupportedOperationException();
+
+//        final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+//        final int predicateKey = procedure.predicateKey;
+//        int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+//        int pRef = 0;
+//        if(procedure.clusterKey[0] == clusterKey) {
+//            pRef = procedure.predicateReference[0];
+//        } else {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            pRef = getInternalReferenceOrZero2(predicateKey, support);
+//            procedure.clusterKey[0] = clusterKey;
+//            procedure.predicateReference[0] = pRef;
+//        }
+//        
+//        final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+//        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+//        if (0 == predicateIndex) {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int hashBase = predicateIndex + predicateTable.offset;
+//        if(predicateTable.table[hashBase-1] < 0) {
+//            int objectIndex = TableIntArraySet2.get(predicateTable.table, hashBase, pRef & 0xFFFF);
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            forObjects(graph, resourceKey, predicateKey, objectIndex, context, procedure, support);
+//        } else {
+//            int objectIndex = TableIntSet2.get(predicateTable.table, hashBase, pRef & 0xFFFF);
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            forObjects(graph, resourceKey, predicateKey, objectIndex, context, procedure, support);
+//        }
+    }
+    @Override
+    public <Context> boolean forObjects(int resourceKey, int predicateKey,
+            ObjectProcedure<Context> procedure, Context context, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.forObjects4: rk=" + resourceKey + " pk=" + predicateKey);
+        final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+        final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+        // PredicateType is complete i.e. all relevant data is in resource table.
+        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { 
+            if (DEBUG)
+                System.out.println("ClusterSmall.forObjects: complete type was " + pCompleteType + " cluster=" + getClusterUID());
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        }
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex) { // All relevant data is in resource table.
+            if (DEBUG)
+                System.out.println("ClusterSmall.forObjects: no predicate table " + pCompleteType);
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        }
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+        return forObjects(resourceKey, predicateKey, objectIndex, procedure, context, support);
+    }
+    @Override
+    public <Context> boolean forPredicates(int resourceKey,
+            PredicateProcedure<Context> procedure, Context context, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.forPredicates: rk=" + resourceKey );
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.foreachPredicate(resourceIndex,
+                    procedure, context, support, this, completeTable);
+        else {
+            boolean broken = resourceTable.foreachPredicate(resourceIndex,
+                    procedure, context, support, this, completeTable);
+            if (broken)
+                return true;
+        }
+        return predicateTable.foreachPredicate(predicateIndex,
+                procedure, context, support, this);
+    }
+    
+    @Override
+    public ClusterI addRelation(int sResourceKey, ClusterUID puid, int pResourceKey, ClusterUID ouid, int oResourceKey, ClusterSupport support) throws DatabaseException {
+
+        if(proxy) {
+               throw new UnsupportedOperationException();
+//            ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+//            return cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+        }
+
+        //        check();
+        boolean ret;
+        try {
+            short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+            short pri = getReferenceOrCreateIfForeign(pResourceKey, puid, support, ClusterStream.NULL_OPERATION);
+            short ori = getReferenceOrCreateIfForeign(oResourceKey, ouid, support, ClusterStream.NULL_OPERATION);
+            ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+            ret = addRelationInternal(sri, pri, ori, completeType);
+            calculateModifiedId();
+        } catch (OutOfSpaceException e) {
+            boolean streamOff = support.getStreamOff();
+            if (!streamOff) {
+                support.cancelStatement(this);
+                support.setStreamOff(true);
+            }
+            ClusterI cluster = toBig(clusterSupport);
+            if (!streamOff)
+                support.setStreamOff(false);
+            ClusterI cluster2 = cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+            if (cluster != cluster2)
+                throw new DatabaseException("Internal error. Contact application support.");
+            return cluster;
+        }
+//        check();
+        if (ret) {
+            support.addStatement(this);
+            return this;
+        } else {
+            support.cancelStatement(this);
+            return null;
+        }
+       
+    }
+
+    @Override
+    public ClusterI addRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) throws DatabaseException {
+       
+        if (DEBUG)
+            System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey);
+        
+        if(proxy) {
+               throw new UnsupportedOperationException();
+//            ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+//            return cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+        }
+
+        //        check();
+        boolean ret;
+        try {
+            short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+            short pri = getReferenceOrCreateIfForeign(pResourceKey, support, ClusterStream.NULL_OPERATION);
+            short ori = getReferenceOrCreateIfForeign(oResourceKey, support, ClusterStream.NULL_OPERATION);
+            ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+            ret = addRelationInternal(sri, pri, ori, completeType);
+            calculateModifiedId();
+        } catch (OutOfSpaceException e) {
+            boolean streamOff = support.getStreamOff();
+            if (!streamOff) {
+                support.cancelStatement(this);
+                support.setStreamOff(true);
+            }
+            ClusterI cluster = toBig(clusterSupport);
+            if (!streamOff)
+                support.setStreamOff(false);
+            ClusterI cluster2 = cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+            if (cluster != cluster2)
+                throw new DatabaseException("Internal error. Contact application support.");
+            return cluster;
+        }
+//        check();
+        if (ret) {
+            support.addStatement(this);
+            return this;
+        } else {
+            support.cancelStatement(this);
+            return null;
+        }
+    }
+    @Override
+    public boolean removeRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+        //        check();
+        short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.REMOVE_OPERATION);
+        short pri = getInternalReferenceOrZeroAnd(pResourceKey, support, ClusterStream.NULL_OPERATION);
+        short ori = getInternalReferenceOrZeroAnd(oResourceKey, support, ClusterStream.NULL_OPERATION);
+        boolean ret = false;
+        if (0 != pri && 0 != ori) {
+            ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+            ret = removeRelationInternal(sri, pri, ori, completeType, support);
+            calculateModifiedId();
+        }
+        if (ret)
+            support.removeStatement(this);
+        else
+            support.cancelStatement(this);
+        //        check();
+        return ret;
+    }
+    @Override
+    public void denyRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+        short s = checkResourceKeyIsOursAndGetResourceIndexIf(sResourceKey, support);
+        ResourceReferenceAndCluster p = checkResourceKeyAndGetResourceIndexIf(pResourceKey, support);
+        ResourceReferenceAndCluster o = checkResourceKeyAndGetResourceIndexIf(oResourceKey, support);
+        if (0 == s || 0 == p.reference || 0 == o.reference)
+            return;
+        //        check();
+        ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+        boolean ret = removeRelationInternal(s, p.reference, o.reference, completeType, support);
+        if (ret) {
+            support.addStatementIndex(this, sResourceKey, getClusterUID(), ClusterChange.REMOVE_OPERATION);
+            support.addStatementIndex(this, pResourceKey, p.clusterUID, ClusterStream.NULL_OPERATION);
+            support.addStatementIndex(this, oResourceKey, o.clusterUID, ClusterStream.NULL_OPERATION);
+            support.removeStatement(this);
+        }
+        calculateModifiedId();
+        //        check();
+        return;
+    }
+    @Override
+    public InputStream getValueStream(int resourceKey, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.getValue " + resourceKey);
+        int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+        try {
+            byte[] buffer = resourceTable.getValue(valueTable, resourceIndex);
+            if(buffer == null) return null;
+            return new ByteArrayInputStream(buffer);
+        } catch (ExternalValueException e) {
+            return support.getValueStreamEx(resourceIndex, clusterUID.second);
+        }
+    }
+    @Override
+    public byte[] getValue(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.getValue " + resourceKey);
+        int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+        try {
+            return resourceTable.getValue(valueTable, resourceIndex);
+        } catch (ExternalValueException e) {
+               return clusterSupport.impl.getResourceFile(clusterUID.asBytes(), resourceIndex);
+            //return support.getValueEx(resourceIndex, clusterUID.second);
+        }
+    }
+    @Override
+    public boolean hasValue(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(resourceKey);
+        return resourceTable.hasValue(resourceIndex);
+    }
+    @Override
+    public boolean removeValue(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(resourceKey, support, ClusterChange.DELETE_OPERATION);
+        support.removeValue(this);
+        calculateModifiedId();
+        return resourceTable.removeValue(valueTable, resourceIndex);
+    }
+    @Override
+    public ClusterI setValue(int rResourceId, byte[] value, int length, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.SET_OPERATION);
+        support.setValue(this, getClusterId(), value, length);
+        try {
+            resourceTable.setValue(valueTable, resourceIndex, value, length);
+            calculateModifiedId();
+            return this;
+        } catch (OutOfSpaceException e) {
+            boolean streamOff = support.getStreamOff();
+            if (!streamOff)
+                support.setStreamOff(true);
+            ClusterI cluster = toBig(support);
+            cluster.setValue(rResourceId, value, length, support);
+            if (!streamOff)
+                support.setStreamOff(false);
+            return cluster;
+        }
+    }
+    @Override
+    public ClusterI modiValueEx(int rResourceId, long voffset, int length, byte[] value, int offset, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.MODI_OPERATION);
+        support.modiValue(this, getClusterId(), voffset, length, value, offset);
+        resourceTable.setValueEx(valueTable, resourceIndex);
+        calculateModifiedId();
+        return this;
+    }
+    @Override
+    public byte[] readValueEx(int rResourceId, long voffset, int length, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(rResourceId);
+        boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+        if (!isExternal)
+            throw new DatabaseException("ClusterI.readValue supported only for external value. Resource key=" + rResourceId);
+        return support.getValueEx(resourceIndex, getClusterId(), voffset, length);
+    }
+    @Override
+    public boolean isValueEx(int resourceKey) throws DatabaseException {
+        int resourceIndex = getLocalReference(resourceKey);
+       return resourceTable.isValueEx(valueTable, resourceIndex);
+    }
+    @Override
+    public long getValueSizeEx(int rResourceId, ClusterSupport support)
+    throws DatabaseException, ExternalValueException {
+        int resourceIndex = getLocalReference(rResourceId);
+        boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+        if (!isExternal)
+            throw new ExternalValueException("ClusterI.getValueSizeEx supported only for external value. Resource key=" + rResourceId);
+        return support.getValueSizeEx(resourceIndex, getClusterId());
+    }
+    @Override
+    public void setValueEx(int rResourceId)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(rResourceId);
+        resourceTable.setValueEx(valueTable, resourceIndex);
+    }
+    @Override
+    public int createResource(ClusterSupport support)
+    throws DatabaseException {
+
+        if(proxy) {
+               throw new UnsupportedOperationException();
+//             ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+//            return cluster.createResource(support);
+        }
+        
+        short resourceIndex = resourceTable.createResource();
+        calculateModifiedId();
+        if(DebugPolicy.REPORT_RESOURCE_ID_ALLOCATION)
+            System.out.println("[RID_ALLOCATION]: ClusterSmall[" + clusterId + "] allocates " + resourceIndex);
+        support.createResource(this, resourceIndex, getClusterId());
+        return ClusterTraits.createResourceKey(clusterKey, resourceIndex);
+    }
+    @Override
+    public boolean hasResource(int resourceKey, ClusterSupport support) {
+        int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey);
+        if (this.clusterKey != clusterKey) // foreign resource
+            return false;
+        int resourceIndex;
+        try {
+            resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        } catch (DatabaseException e) {
+            return false;
+        }
+        if (resourceIndex > 0 & resourceIndex <= resourceTable.getTableCount())
+            return true;
+        else
+            return false;
+    }
+    @Override
+    public int getNumberOfResources(ClusterSupport support)
+    throws DatabaseException  {
+        
+        if(proxy) {
+               throw new UnsupportedOperationException();
+//            ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+//            return cluster.getNumberOfResources(support);
+        }
+        
+        return resourceTable.getUsedSize();
+    }
+
+    public int getNumberOfResources() {
+        
+        if(proxy) throw new IllegalStateException();
+        
+        return resourceTable.getUsedSize();
+        
+    }
+
+    @Override
+    public long getUsedSpace() {
+        if(isEmpty()) return 0;
+        long rt = resourceTable.getTableCapacity() * 8 + 8; // (8 = cluster id)
+        long ft = foreignTable.getTableCapacity() * 8;
+        long pt = predicateTable.getTableCapacity() * 4;
+        long ot = objectTable.getTableCapacity() * 4;
+        long ct = completeTable.getTableCapacity() * 4;
+        long vt = valueTable.getTableCapacity() * 1;
+        long cm = clusterMap.getUsedSpace();
+        return rt + ft + pt + ot + ct + vt + cm;
+    }
+    @Override
+    public boolean isEmpty() {
+        if(resourceTable == null) return true;
+        return resourceTable.getTableCount() == 0;
+    }
+    @Override
+    public void printDebugInfo(String message, ClusterSupport support)
+    throws DatabaseException {
+        throw new DatabaseException("Not implemented!");
+    }
+    private short getInternalReferenceOrZero2(int resourceKey, ClusterSupport support) throws DatabaseException {
+        int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+        if (!ClusterTraitsBase.isCluster(clusterBits, resourceKey)) {
+            return clusterMap.getForeignReferenceOrZero(resourceKey);
+        } else {
+            return (short)resourceIndex;
+        }
+    }
+    private short getInternalReferenceOrZeroAnd(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) { // foreign resource
+            ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+            short foreignRef = clusterMap.getForeignReferenceOrZero(resourceKey);
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            return foreignRef;
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return (short)resourceIndex;
+    }
+    private final short getLocalReference(int resourceKey) throws DatabaseException {
+        return ClusterTraits.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+    }
+    private final short getLocalReferenceAnd(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        short resourceIndex = getLocalReference(resourceKey);
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private short checkResourceKeyIsOursAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int clusterShortId = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterShortId)
+            return 0;
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        return (short)resourceIndex;
+    }
+    private short getReferenceOrCreateIfForeign(int resourceKey, ClusterUID clusterUID, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) {
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            short ref = clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+            return ref;
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private short getReferenceOrCreateIfForeign(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) {
+            ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            short ref = clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+            return ref;
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private class ResourceReferenceAndCluster {
+        ResourceReferenceAndCluster(short reference, ClusterUID clusterUID) {
+            this.reference = reference;
+            this.clusterUID = clusterUID;
+        }
+        public final short reference;
+        public final ClusterUID clusterUID;
+    }
+    private ResourceReferenceAndCluster checkResourceKeyAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) { // foreign resource
+            ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey);
+            ClusterUID clusterUID = foreignCluster.getClusterUID();
+            short ref = clusterMap.getForeignReferenceOrZero(resourceKey);
+            return new ResourceReferenceAndCluster(ref, clusterUID);
+        }
+        return new ResourceReferenceAndCluster(resourceIndex, getClusterUID());
+    }
+
+    static long fTime = 0;
+    
+    @Override
+    final public int execute(int resourceReference) throws DatabaseException {
+        short resourceRef = (short)resourceReference;
+        int key;
+        if (ClusterTraitsSmall.resourceRefIsLocal(resourceRef)) {
+            key = clusterBits | resourceRef;
+        } else {
+            short foreignIndex = ClusterTraitsSmall.resourceRefGetForeignIndex((short)resourceRef);
+            //long start = System.nanoTime();
+            ResourceUID resourceUID = foreignTable.getResourceUID(foreignIndex);
+            int clusterKey = clusterSupport.getClusterKeyByClusterUIDOrMake(resourceUID.asCID());
+//            ClusterBase cluster = clusterSupport.getClusterByClusterUIDOrMake(resourceUID.asCID());
+            key = ClusterTraitsBase.createResourceKey(clusterKey, resourceUID.getIndex());
+            //fTime += System.nanoTime() - start;
+            //System.err.println("fTime: " + 1e-9*fTime);
+        }
+        if (DEBUG)
+            System.out.println("ClusterSmall.execute key=" + key);
+        return key;
+    }
+
+    private boolean addRelationInternal(short sReference, short pReference, short oReference, ClusterI.CompleteTypeEnum completeType)
+    throws DatabaseException {
+        int predicateIndex = resourceTable.addStatement(sReference, pReference, oReference, predicateTable, objectTable, completeType, completeTable);
+        if (0 == predicateIndex)
+            return true; // added to resourceTable
+        else if (0 > predicateIndex)
+            return false; // old complete statemenent
+        int newPredicateIndex = predicateTable.addPredicate(predicateIndex, 0xFFFF & pReference, 0xFFFF & oReference, objectTable);
+        if (0 == newPredicateIndex)
+            return false;
+        if (predicateIndex != newPredicateIndex)
+            resourceTable.setPredicateIndex(sReference, newPredicateIndex);
+        return true;
+    }
+    private boolean removeRelationInternal(int sResourceIndex, short pResourceIndex,
+            short oResourceIndex, ClusterI.CompleteTypeEnum completeType, ClusterSupport support)
+    throws DatabaseException {
+        int predicateIndex = resourceTable.getPredicateIndex(sResourceIndex);
+        if (0 == predicateIndex || ClusterI.CompleteTypeEnum.NotComplete != completeType)
+            return resourceTable.removeStatementFromCache(sResourceIndex,
+                    pResourceIndex, oResourceIndex, completeType, completeTable);
+        PredicateTable.Status ret = predicateTable.removePredicate(predicateIndex, 0xFFFF & pResourceIndex, 0xFFFF & oResourceIndex, objectTable);
+        switch (ret) {
+        case NothingRemoved:
+            return false;
+        case PredicateRemoved: {
+            if (0 == predicateTable.getPredicateSetSize(predicateIndex))
+                resourceTable.setPredicateIndex(sResourceIndex, 0);
+            // intentionally dropping to next case
+        } default:
+            break;
+        }
+        resourceTable.removeStatement(sResourceIndex,
+                pResourceIndex, oResourceIndex,
+                completeType, completeTable,
+                predicateTable, objectTable, support);
+        return true;
+    }
+    @Override
+    public void load() {
+        throw new Error("Not supported.");
+    }
+
+    @Override
+    public void load(Callback<DatabaseException> r) {
+        throw new Error("Not supported.");
+    }
+
+    public boolean contains(int resourceKey) {
+        return ClusterTraitsBase.isCluster(clusterBits, resourceKey);
+    }
+    @Override
+    public void load(final ClusterSupport support, final Runnable callback) {
+
+       throw new UnsupportedOperationException();
+
+//     try {
+//            clusterTable.load2(clusterId, clusterKey);
+//            callback.run();
+//        } catch (DatabaseException e) {
+//            e.printStackTrace();
+//        }
+        
+    }
+    @Override
+    public ClusterI getClusterByResourceKey(int resourceKey,
+            ClusterSupport support) {
+        throw new Error();
+    }
+    @Override
+    public void increaseReferenceCount(int amount) {
+        throw new Error();
+    }
+    @Override
+    public void decreaseReferenceCount(int amount) {
+        throw new Error();
+    }
+    @Override
+    public int getReferenceCount() {
+        throw new Error();
+    }
+    @Override
+    public void releaseMemory() {
+    }
+    @Override
+    public void compact() {
+        clusterMap.compact();
+    }
+    @Override
+    public boolean isLoaded() {
+        return !proxy;
+    }
+
+//    public ClusterImpl tryLoad(SessionImplSocket sessionImpl) {
+//
+//     throw new UnsupportedOperationException();
+//        assert(Constants.ReservedClusterId != clusterId);
+//
+//        return clusterTable.tryLoad(clusterId, clusterKey);
+//        
+//    }
+    
+
+    @Override
+    public ClusterBig toBig(ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG) {
+            System.out.println("DEBUG: toBig cluster=" + clusterId);
+            new Exception().printStackTrace();
+        }
+        ClusterBig big = new ClusterBig(clusterSupport, getClusterUID(), clusterKey, (ClusterSupport2)support);
+        big.cc = this.cc;
+//        if(big.cc != null)
+//             big.cc.clusterImpl = this;
+        resourceTable.toBig(big, support, this);
+        big.foreignLookup = this.foreignLookup;
+        big.change = this.change;
+        this.cc = null;
+        this.foreignLookup = null;
+        this.change = null;
+        return big;
+    }
+    
+    @Override
+    public ClusterTypeEnum getType() {
+        return ClusterTypeEnum.SMALL;
+    }
+    @Override
+    public boolean getImmutable() {
+        int status = resourceTable.getClusterStatus();
+        return (status & ClusterStatus.ImmutableMaskSet) == 1;
+    }
+    @Override
+    public void setImmutable(boolean immutable, ClusterSupport support) {
+        if(resourceTable != null) {
+            int status = resourceTable.getClusterStatus();
+            if (immutable)
+                status |= ClusterStatus.ImmutableMaskSet;
+            else
+                status &= ClusterStatus.ImmutableMaskClear;
+            resourceTable.setClusterStatus(status);
+        }
+        support.setImmutable(this, immutable);
+    }
+    
+    @Override
+    public String toString() {
+        try {
+            final TIntHashSet set = new TIntHashSet();
+            TIntShortHashMap map = foreignTable.getResourceHashMap();
+            map.forEachKey(new TIntProcedure() {
+                @Override
+                public boolean execute(int value) {
+                    set.add(value & 0xfffff000);
+                    return true;
+                }
+            });
+            return "ClusterSmall[" + getClusterUID() + " - " + getClusterId() + " - " + getNumberOfResources() + " - " + foreignTable.getResourceHashMap().size() + " - " + set.size() + "]";
+        } catch (DatabaseException e) {
+            return "ClusterSmall[" + getNumberOfResources() + "]";
+        }
+    }
+    
+    // Memory map
+    // bytes (b) | headers(i) | predicateTable (i) | objectTable (i) | completeTable (i) | resourceTable (l) | foreignTable (l)
+
+    @Override
+    public byte[] storeBytes() throws IOException {
+
+       int byteSize = valueTable.getTableSize();
+       int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize(); 
+       int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize();
+
+       byte[] raw = new byte[12 + byteSize + 8*longSize + 4*intSize];
+
+               int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE);
+               
+               Bytes.writeLE(raw, 0, byteSize);
+               Bytes.writeLE(raw, 4, intSize);
+               Bytes.writeLE(raw, 8, longSize);
+               
+               int rawPos = valueTable.storeBytes(raw, 0, 12);
+               
+               int intBase = rawPos;
+               
+               rawPos += 4*INT_HEADER_SIZE;
+               rawPos = predicateTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos);
+               rawPos = objectTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos);
+               rawPos = completeTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos);
+
+               int longBase = rawPos;
+               
+               rawPos += 8*LONG_HEADER_SIZE;
+               rawPos = resourceTable.storeBytes(raw, (rawPos-longBase)>>3, rawPos);
+               rawPos = foreignTable.storeBytes(raw, (rawPos-longBase)>>3, rawPos);
+               
+               Bytes.writeLE8(raw, longBase, -1);
+               Bytes.writeLE8(raw, longBase+8, LONG_HEADER_VERSION);
+               Bytes.writeLE8(raw, longBase+16, 0);
+               Bytes.writeLE8(raw, longBase+24, clusterUID.second);
+
+       // write header
+               for(int i=0;i<INT_HEADER_SIZE;i++) {
+                       int v = headerTable[i];
+                       Bytes.writeLE(raw, intBase, v);
+                       intBase+=4;
+               }
+
+               for(int i=0;i<INT_HEADER_SIZE;i++)
+               headerTable[i] = currentHeader[i];
+               
+       return raw;
+       
+    }
+
+    @Override
+    public ClusterTables store() throws IOException {
+
+       ClusterTables result = new ClusterTables();
+       
+               int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE);
+
+       int byteSize = valueTable.getTableSize();
+       byte[] byteBytes = new byte[byteSize];
+       valueTable.store(byteBytes, 0);
+       
+       result.bytes = byteBytes;
+
+       int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize(); 
+       long[] longBytes = new long[longSize];
+
+       longBytes[0] = -1;
+       longBytes[1] = LONG_HEADER_VERSION;
+       longBytes[2] = 0;
+       longBytes[3] = clusterUID.second;
+       
+       int longPos = resourceTable.store(longBytes, LONG_HEADER_SIZE);
+       foreignTable.store(longBytes, longPos);
+       
+       result.longs = longBytes;
+       
+       int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize();
+       int[] intBytes = new int[intSize];
+       int intPos = INT_HEADER_SIZE;
+       intPos = predicateTable.store(intBytes, intPos);
+       intPos = objectTable.store(intBytes, intPos);
+       intPos = completeTable.store(intBytes, intPos);
+       // write header
+               for(int i=0;i<INT_HEADER_SIZE;i++) {
+                       int v = headerTable[i];
+                       intBytes[i] = v;
+                       //Bytes.writeLE(intBytes, i<<2, v);
+               }
+               
+               result.ints = intBytes;
+
+       for(int i=0;i<INT_HEADER_SIZE;i++)
+               headerTable[i] = currentHeader[i];
+       
+       return result;
+
+    }
+    
+    @Override
+    protected int getResourceTableCount() {
+       return resourceTable.getTableCount();
+    }
+    
+    @Override
+    public boolean getDeleted() {
+        if (deleted) return true;
+        int status = resourceTable.getClusterStatus();
+        return (status & ClusterStatus.DeletedMaskSet) == ClusterStatus.DeletedMaskSet;
+    }
+    @Override
+    public void setDeleted(boolean set, ClusterSupport support) {
+        deleted = set;
+        if(resourceTable != null) {
+            int status = resourceTable.getClusterStatus();
+            if (set)
+                status |= ClusterStatus.DeletedMaskSet;
+            else
+                status &= ClusterStatus.DeletedMaskClear;
+            resourceTable.setClusterStatus(status);
+        }
+        if (null != support)
+            support.setDeleted(this, set);
+    }
+
+    @Override
+    public Table<?> getPredicateTable() {
+        return predicateTable;
+    }
+
+    @Override
+    public Table getForeignTable() {
+        return foreignTable;
+    }
+
+    @Override
+    public int makeResourceKey(int pRef) throws DatabaseException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public Table<?> getCompleteTable() {
+        return completeTable;
+    }
+
+    @Override
+    public Table<?> getValueTable() {
+        return valueTable;
+    }
+
+    @Override
+    public Table<?> getObjectTable() {
+        return objectTable;
+    }
+    
+}
+
+class ClusterStatus {
+    public static final int ImmutableMaskClear = 0xFFFFFFFE;
+    public static final int ImmutableMaskSet = 0x00000001;
+    public static final int DeletedMaskClear = 0xFFFFFFFD;
+    public static final int DeletedMaskSet = 0x00000002;
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java
new file mode 100644 (file)
index 0000000..63c73dd
--- /dev/null
@@ -0,0 +1,229 @@
+package org.simantics.acorn.internal;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.FileVisitOption;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.EnumSet;
+import java.util.Properties;
+
+import org.simantics.acorn.GraphClientImpl2;
+import org.simantics.db.Database;
+import org.simantics.db.DatabaseUserAgent;
+import org.simantics.db.ServiceLocator;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.server.ProCoreException;
+
+/**
+ * @author Tuukka Lehtonen
+ */
+public class AcornDatabase implements Database {
+
+    private final Path folder;
+
+    private DatabaseUserAgent userAgent;
+
+    public AcornDatabase(Path folder) {
+        this.folder = folder;
+    }
+
+    @Override
+    public DatabaseUserAgent getUserAgent() {
+        return userAgent;
+    }
+
+    @Override
+    public void setUserAgent(DatabaseUserAgent dbUserAgent) {
+        userAgent = dbUserAgent;
+    }
+
+    @Override
+    public Status getStatus() {
+        return Status.Local;
+    }
+
+    @Override
+    public File getFolder() {
+        return folder.toFile();
+    }
+
+    @Override
+    public boolean isFolderOk() {
+        return isFolderOk(folder.toFile());
+    }
+
+    @Override
+    public boolean isFolderOk(File aFolder) {
+        if (!aFolder.isDirectory())
+            return false;
+        return true;
+    }
+
+    @Override
+    public boolean isFolderEmpty() {
+        return isFolderEmpty(folder.toFile());
+    }
+
+    @Override
+    public boolean isFolderEmpty(File aFolder) {
+        Path path = aFolder.toPath();
+        if (!Files.isDirectory(path))
+            return false;
+        try (DirectoryStream<Path> folderStream = Files.newDirectoryStream(path)) {
+            return !folderStream.iterator().hasNext();
+        } catch (IOException e) {
+            Logger.defaultLogError("Failed to open folder stream. folder=" + path, e);
+            return false;
+        }
+    }
+
+    @Override
+    public void initFolder(Properties properties) throws ProCoreException {
+        try {
+            Files.createDirectories(folder);
+        } catch (IOException e) {
+            throw new ProCoreException(e);
+        }
+    }
+
+    @Override
+    public void deleteFiles() throws ProCoreException {
+        // TODO: somehow check that the acorn client is not active.
+        deleteTree(folder);
+    }
+
+    @Override
+    public void start() throws ProCoreException {
+    }
+
+    @Override
+    public boolean isRunning() throws ProCoreException {
+        return true;
+    }
+
+    @Override
+    public boolean tryToStop() throws ProCoreException {
+        return true;
+//        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void connect() throws ProCoreException {
+    }
+
+    @Override
+    public boolean isConnected() throws ProCoreException {
+        return true;
+    }
+
+    @Override
+    public String execute(String command) throws ProCoreException {
+        throw new UnsupportedOperationException("execute(" + command + ")");
+    }
+
+    @Override
+    public void disconnect() throws ProCoreException {
+    }
+
+    @Override
+    public void clone(File to, int revision, boolean saveHistory) throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public Path createFromChangeSets(int revision) throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void deleteGuard() throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public Path dumpChangeSets() throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void purgeDatabase() throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long serverGetTailChangeSetId() throws ProCoreException {
+        // "We have it all"
+        // But after purging we don't so beware.
+        // TODO: beware for purge
+        return 1;
+    }
+
+    @Override
+    public Session newSession(ServiceLocator locator) throws ProCoreException {
+        try {
+            return new GraphClientImpl2(this, folder, locator);
+        } catch (IOException e) {
+            throw new ProCoreException(e);
+        }
+    }
+
+    @Override
+    public Journal getJournal() throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    private static void deleteTree(Path path) throws ProCoreException {
+        if (!Files.exists(path))
+            return;
+
+        class Visitor extends SimpleFileVisitor<Path> {
+            @Override
+            public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+                try {
+                    Files.delete(file);
+                } catch (IOException ioe) {
+                    ioe.printStackTrace();
+                    throw ioe;
+                }
+                return FileVisitResult.CONTINUE;
+            }
+            @Override
+            public FileVisitResult postVisitDirectory(Path dir, IOException e) throws IOException {
+                if (e == null) {
+                    try {
+                        Files.delete(dir);
+                    } catch (IOException ioe) {
+                        ioe.printStackTrace();
+                        throw ioe;
+                    }
+                    return FileVisitResult.CONTINUE;
+                }
+                throw e;
+            }
+        }
+        try {
+            Visitor v = new Visitor();
+            EnumSet<FileVisitOption> opts = EnumSet.noneOf(FileVisitOption.class);
+            Files.walkFileTree(path, opts, Integer.MAX_VALUE, v);
+        } catch (IOException e) {
+            throw new ProCoreException("Could not delete " + path, e);
+        }
+    }
+
+       @Override
+       public String getCompression() {
+               return "LZ4";
+       }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java
new file mode 100644 (file)
index 0000000..b6cb59b
--- /dev/null
@@ -0,0 +1,62 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.internal;
+
+import org.eclipse.core.runtime.Plugin;
+import org.osgi.framework.BundleContext;
+
+/**
+ * @author Antti Villberg
+ */
+public class Activator extends Plugin {
+
+    // The plug-in ID
+    public static final String BUNDLE_ID = "org.simantics.acorn"; //$NON-NLS-1$
+    // The shared instance
+    private static Activator plugin;
+
+    /**
+     * The constructor
+     */
+    public Activator() {
+    }
+
+    /*
+     * (non-Javadoc)
+     * @see org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext)
+     */
+    @Override
+    public void start(BundleContext context) throws Exception {
+        super.start(context);
+        plugin = this;
+    }
+
+    /*
+     * (non-Javadoc)
+     * @see org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext)
+     */
+    @Override
+    public void stop(BundleContext context) throws Exception {
+        plugin = null;
+        super.stop(context);
+    }
+
+    /**
+     * Returns the shared instance
+     *
+     * @return the shared instance
+     */
+    public static Activator getDefault() {
+        return plugin;
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java
new file mode 100644 (file)
index 0000000..3de77d2
--- /dev/null
@@ -0,0 +1,119 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+/*
+ * Created on Jan 21, 2005
+ * 
+ * Copyright Toni Kalajainen
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.simantics.acorn.internal;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Bijection map is a Map that has no values or keys, only 1:1 mappings
+ * of values. These value/keys will be called with left and right side
+ * values.
+ * 
+ * Each value can exist only once on a side
+ * 
+ * @author Toni Kalajainen
+ */
+public class BijectionMap<L, R> {
+
+    /** The keys of tableLeft are left-side-values and
+     * values are right-side-values */
+    private final Map<L, R> tableLeft = new HashMap<L, R>();
+    /** The keys of tableRight are right-side-values and
+     * values on it are left-side-values */
+    private final Map<R, L> tableRight = new HashMap<R, L>();
+
+    public boolean containsLeft(L leftValue)
+    {
+        return tableLeft.containsKey(leftValue);
+    }
+
+    public boolean containsRight(R rightValue)
+    {
+        return tableRight.containsKey(rightValue);
+    }
+
+    public void map(L leftValue, R rightValue)
+    {
+        // Remove possible old mapping
+        R oldRight = tableLeft.remove(leftValue);
+        if (oldRight != null) {
+            tableRight.remove(oldRight);
+        } else {
+            L oldLeft = tableRight.remove(rightValue);
+            if (oldLeft != null) {
+                tableLeft.remove(oldLeft);
+            }
+        }
+
+        tableLeft.put(leftValue, rightValue);
+        tableRight.put(rightValue, leftValue);
+    }
+
+    public int size()
+    {
+        return tableLeft.size();
+    }
+
+    public L getLeft(R rightValue) {
+        return tableRight.get(rightValue);
+    }
+
+    public R getRight(L leftValue) {
+        return tableLeft.get(leftValue);
+    }
+
+    public R removeWithLeft(L leftValue) {
+        R rightValue = tableLeft.remove(leftValue);
+        if (rightValue!=null)
+            tableRight.remove(rightValue);
+        return rightValue;
+    }
+
+    public L removeWithRight(R rightValue) {
+        L leftValue = tableRight.remove(rightValue);
+        if (leftValue!=null)
+            tableLeft.remove(leftValue);
+        return leftValue;
+    }
+
+    public Set<L> getLeftSet() {
+        return tableLeft.keySet();
+    }
+
+    public Set<R> getRightSet() {
+        return tableRight.keySet();
+    }
+
+    public void clear() {
+        tableLeft.clear();
+        tableRight.clear();
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java
new file mode 100644 (file)
index 0000000..305e315
--- /dev/null
@@ -0,0 +1,70 @@
+package org.simantics.acorn.internal;
+
+import org.simantics.db.service.ClusterUID;
+
+final public class Change {
+    
+    byte op0;
+    int key0;
+    int key1;
+    int key2;
+    ClusterUID clusterUID1;
+    ClusterUID clusterUID2;
+    byte[] lookup1;
+    byte[] lookup2;
+    byte lookIndex1;
+    byte lookIndex2;
+    int lastArg = 0;
+
+    @Override
+    public String toString() {
+        return "Change " + (key0&0xffff) + " " + (key1&0xffff) + " " + (key2&0xffff) + " " + clusterUID2 + " " + clusterUID2;
+    }
+    
+    public final void init() {
+        lastArg = 0;
+    }
+
+    public final void initValue() {
+        lastArg = 0;
+    }
+
+    final void addStatementIndex0(int key, byte op) {
+        assert (op != 0);
+        key0 = key;
+        op0 = op;
+    }
+
+    final void addStatementIndex1(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) {
+        key1 = key;
+        clusterUID1 = clusterUID;
+        lookIndex1 = lookIndex;
+        lookup1 = lookup;
+//        if(lookIndex > 0)
+//            System.err.println("statementIndex1 " + pos + " " + lookIndex);
+    }
+
+    final void addStatementIndex2(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) {
+        key2 = key;
+        clusterUID2 = clusterUID;
+        lookIndex2 = lookIndex;
+        lookup2 = lookup;
+    }
+
+    final public void addStatementIndex(int key, ClusterUID clusterUID, byte op) {
+
+        // new Exception("lastArg=" + lastArg).printStackTrace();
+
+        assert (lastArg < 3);
+
+        if (0 == lastArg)
+            addStatementIndex0(key, op);
+        else if (1 == lastArg)
+            addStatementIndex1(key, clusterUID, (byte)0, null);
+        else if (2 == lastArg)
+            addStatementIndex2(key, clusterUID, (byte)0, null);
+
+        lastArg++;
+
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java
new file mode 100644 (file)
index 0000000..b1fbb5d
--- /dev/null
@@ -0,0 +1,735 @@
+package org.simantics.acorn.internal;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.simantics.acorn.internal.ClusterStream.ClusterEnum;
+import org.simantics.acorn.internal.ClusterStream.Data;
+import org.simantics.acorn.internal.ClusterStream.DebugInfo;
+import org.simantics.acorn.internal.ClusterStream.OpEnum;
+import org.simantics.acorn.internal.ClusterStream.StmEnum;
+import org.simantics.compressions.Compressions;
+import org.simantics.db.exception.RuntimeDatabaseException;
+import org.simantics.db.impl.ClusterTraitsBase;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.procore.cluster.ClusterTraitsSmall;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Pair;
+
+import gnu.trove.map.hash.TIntByteHashMap;
+import gnu.trove.map.hash.TLongIntHashMap;
+
+
+public final class ClusterChange {
+       
+    public static final int VERSION = 1;
+    public static final byte ADD_OPERATION = 2;
+    public static final byte REMOVE_OPERATION = 3;
+    public static final byte DELETE_OPERATION = 5;
+
+    public static final boolean DEBUG = false;
+    public static final boolean DEBUG_STAT = false;
+    public static final boolean DEBUG_CCS = false;
+
+    private static DebugInfo sum = new DebugInfo();
+
+    public final TIntByteHashMap foreignTable = new TIntByteHashMap();
+    private final DebugInfo info;
+//    private final GraphSession graphSession;
+    public final ClusterUID clusterUID;
+    private final int SIZE_OFFSET;
+//    private final int HEADER_SIZE;
+    // How much buffer is used before stream is flushed to server. The bigger the better.
+    public static final int MAX_FIXED_BYTES = (1<<15) + (1<<14);
+    private static final int MAX_FIXED_OPERATION_SIZE = 17 + 16;
+    private static final int MAX_FIXED_OPERATION_SIZE_AND_ROOM_FOR_ERROR = MAX_FIXED_OPERATION_SIZE + 36;
+    private int nextSize = MAX_FIXED_BYTES;
+    int byteIndex = 0;
+    private byte[] bytes = null; // Operation data.
+//    private final byte[] header;
+    private boolean flushed = false;
+    private ArrayList<Pair<ClusterUID, byte[]>> stream;
+
+//    public ClusterImpl clusterImpl;
+
+    public ClusterChange( ArrayList<Pair<ClusterUID, byte[]>>  stream, ClusterUID clusterUID) {
+        this.clusterUID = clusterUID;
+        long[] longs = new long[ClusterUID.getLongLength()];
+        clusterUID.toLong(longs, 0);
+        this.stream = stream;
+//        this.graphSession = clusterStream.graphSession;
+        info = new DebugInfo();
+//        HEADER_SIZE = 8 + longs.length * 8;
+//        header = new byte[HEADER_SIZE];
+        SIZE_OFFSET = 0;
+//        Bytes.writeLE(header, SIZE_OFFSET + 0, 0); // Correct byte vector size is set with setHeaderVectorSize() later.
+//        Bytes.writeLE(header, SIZE_OFFSET + 4, VERSION);
+//        for (int i=0, offset=8; i<longs.length; ++i, offset+=8)
+//            Bytes.writeLE(header, offset, longs[i]);
+        //initBuffer();
+//        this.clusterStream = clusterStream;
+//        this.clusterChange2 = new ClusterChange2(clusterUID, clusterImpl);
+//        clusterStream.changes.add(this);
+    }
+
+//    private void setHeaderVectorSize(int size) {
+//        if (size < 0)
+//            throw new RuntimeDatabaseException("Change set size can't be negative.");
+//        int len = size + HEADER_SIZE - SIZE_OFFSET - 4;
+//        Bytes.writeLE(header, SIZE_OFFSET, len);
+//    }
+    @Override
+    public String toString() {
+        return super.toString() + " cluster=" + clusterUID + " off=" + byteIndex;
+    }
+    final public void initBuffer() {
+        flushed = false;
+        if (null == bytes || bytes.length < nextSize) {
+            bytes = new byte[nextSize];
+            nextSize = MAX_FIXED_BYTES;
+        }
+        byteIndex = 0;
+    }
+    private final void clear() {
+//     if(clusterImpl != null && clusterImpl.change != null)
+//             clusterImpl.change.init();
+        foreignTable.clear();
+        //initBuffer();
+        bytes = null;
+        byteIndex = 0;
+        if (DEBUG_STAT)
+            info.clear();
+    }
+    private final void checkInitialization() {
+//        if (0 == byteIndex)
+//            clusterStream.changes.addChange(this);
+    }
+    private final void printlnd(String s) {
+        System.out.println("DEBUG: ClusterChange " + clusterUID + ": " + s);
+    }
+    public final void createResource(short index) {
+        checkInitialization();
+        if (DEBUG)
+            printlnd("New ri=" + index + " offset=" + byteIndex);
+        if (index > ClusterTraits.getMaxNumberOfResources())
+            throw new RuntimeDatabaseException("Illegal resource index=" + index + ".");
+        checkBufferSpace(null);
+        bytes[byteIndex++] = (byte)52;
+        bytes[byteIndex++] = (byte)index;
+        bytes[byteIndex++] = (byte)(index>>>8);
+    }
+    void flushCollect(Change c) {
+       throw new UnsupportedOperationException();
+//        flushInternal(graphSession, clusterUID);
+//        if (DEBUG)
+//            printlnd("Cluster change data was flushed.");
+//        if (null != c) {
+//            if (DEBUG)
+//                printlnd("Clearing lookup for " + c.toString());
+//            c.lookup1 = null;
+//            c.lookup2 = null;
+//        }
+//        if (null != clusterImpl) {
+//            clusterImpl.foreignLookup = null;
+//        }
+    }
+
+    private final boolean checkBufferSpace(Change c) {
+//        clusterStream.changes.checkFlush();
+        if(bytes == null) initBuffer();
+        if (MAX_FIXED_BYTES - byteIndex > MAX_FIXED_OPERATION_SIZE_AND_ROOM_FOR_ERROR) {
+            return false;
+        }
+        flush();
+//        initBuffer();
+        return true;
+    }
+
+    private final void checkBufferSpace(int size) {
+        if(bytes == null) initBuffer();
+        if (bytes.length - byteIndex >= size)
+            return;
+        nextSize = Math.max(MAX_FIXED_BYTES, size);
+        flush();
+        initBuffer();
+    }
+
+    public final void addChange(Change c) {
+        checkInitialization();
+        checkBufferSpace(c);
+        byte operation = c.op0;
+        if(operation == ADD_OPERATION)
+            addStm(c, StmEnum.Add);
+        else if (operation == REMOVE_OPERATION)
+            addStm(c, StmEnum.Remove);
+        else if (operation == DELETE_OPERATION) {
+            if (DEBUG)
+                printlnd("Delete value offset=" + byteIndex + " " + c);
+            addByte(OpEnum.Delete.getOrMask());
+            addShort(ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0));
+        }
+        c.lastArg = 0;
+    }
+
+    private final void addForeignLong(short index, ClusterUID clusterUID) {
+        byteIndex = clusterUID.toByte(bytes, byteIndex);
+        bytes[byteIndex++] = (byte)(index & 0xFF);
+        bytes[byteIndex++] = (byte)(index >>> 8);
+    }
+
+    private final ClusterEnum addIndexAndCluster(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) {
+        assert(!clusterUID.equals(ClusterUID.Null));
+        short resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(key);
+        if (clusterUID.equals(this.clusterUID)) {
+            bytes[byteIndex++] = (byte)(resourceIndex & 0xFF);
+            bytes[byteIndex++] = (byte)(resourceIndex >>> 8);
+            return ClusterEnum.Local;
+        }
+
+        byte foreign = 0;
+        if(lookIndex > 0) {
+            if(lookup != null)
+                foreign = lookup[lookIndex];
+        } else {
+            foreign = foreignTable.get(key);
+        }
+        if (0 != foreign) {
+            if (foreign > 256)
+                throw new RuntimeDatabaseException("Internal error, contact application support." +
+                "Too big foreing index=" + foreign + " max=256");
+            --foreign;
+            bytes[byteIndex++] = foreign;
+            return ClusterEnum.ForeignShort;
+        } else {
+            byte position = (byte) (foreignTable.size() + 1);
+            if(lookup != null)
+                lookup[lookIndex] = position;
+            foreignTable.put(key, position);
+            if (DEBUG_STAT)
+                info.sForeign = foreignTable.size();
+            if (clusterUID.equals(ClusterUID.Null))
+                throw new RuntimeDatabaseException("Internal error, contact application support." +
+                "Cluster unique id not defined for foreing cluster.");
+            addForeignLong(resourceIndex, clusterUID);
+            return ClusterEnum.ForeignLong;
+        }
+    }
+
+    private final void addByte(byte b) {
+        bytes[byteIndex++] = b;
+    }
+
+    private final void addShort(short s) {
+        bytes[byteIndex++] = (byte)(s & 0xFF);
+        bytes[byteIndex++] = (byte)(s >>> 8);
+    }
+
+//    private final void addShort(int s) {
+//        bytes[byteIndex++] = (byte) (s & 0xFF);
+//        bytes[byteIndex++] = (byte) ((s >>> 8) & 0xFF);
+//    }
+
+    private final void addInt(int i) {
+//        System.err.println("addInt " + i + " " + i);
+        bytes[byteIndex++] = (byte) (i & 0xFF);
+        bytes[byteIndex++] = (byte) ((i >>> 8) & 0xFF);
+        bytes[byteIndex++] = (byte) ((i >>> 16) & 0xFF);
+        bytes[byteIndex++] = (byte) ((i >>> 24) & 0xFF);
+        // buffer.asIntBuffer().put(i);
+        // buffer.position(buffer.position()+4);
+    }
+
+//    private void addLong6(long l) {
+////        System.err.println("addLong " + l);
+//        bytes[byteIndex++] = (byte) (l & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF);
+//        // buffer.asLongBuffer().put(l);
+//        // buffer.position(buffer.position() + 6);
+//    }
+
+    private void addLong7(long l) {
+        bytes[byteIndex++] = (byte) (l & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 48) & 0xFF);
+        // buffer.asLongBuffer().put(l);
+        // buffer.position(buffer.position() + 7);
+    }
+
+//    private void addLong(long l) {
+//        bytes[byteIndex++] = (byte) (l & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 48) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 56) & 0xFF);
+//    }
+    private final byte bufferPop() {
+        return bytes[--byteIndex];
+    }
+
+    final class DebugStm {
+        StmEnum e;
+        int r;
+        int p;
+        int o;
+        ClusterUID pc;
+        ClusterUID oc;
+
+        DebugStm(StmEnum e, int r, int p, ClusterUID pc, int o, ClusterUID oc) {
+            this.e = e;
+            this.r = r;
+            this.p = p;
+            this.o = o;
+            this.pc = pc;
+            this.oc = oc;
+        }
+
+        @Override
+        public String toString() {
+            short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(r);
+            short pi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(p);
+            short oi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(o);
+            return "" + e + " rk=" + r + " ri=" + ri + " rc=" + clusterUID
+            + " pk=" + p + " pi=" + pi + " pc=" + pc
+            + " ok=" + o + " oi=" + oi + " oc=" + oc;
+        }
+
+        public String toString2() {
+            return "" + e + " r=" + r + " rc=" + clusterUID + " p=" + p
+                    + " pc=" + pc + " o=" + o + " oc=" + oc;
+        }
+
+        public String toString3() {
+            short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(r);
+            short pi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(p);
+            short oi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(o);
+            return "" + e + " ri=" + ri
+            + " pi=" + pi + " pc=" + pc
+            + " oi=" + oi + " oc=" + oc;
+        }
+    }
+
+    private List<DebugStm> debugStms = new ArrayList<DebugStm>();
+
+    @SuppressWarnings("unused")
+    private final void addStm(Change c, StmEnum stmEnum) {
+
+        if (DEBUG_STAT)
+            ++info.nStms;
+        if (DEBUG || DEBUG_CCS) {
+            DebugStm d = new DebugStm(stmEnum, c.key0, c.key1, c.clusterUID1, c.key2, c.clusterUID2);
+            if (DEBUG_CCS)
+                debugStms.add(d);
+            if (DEBUG) {
+                printlnd(d.toString3() + " offset=" + byteIndex);
+            }
+        }
+        // int opPos = buffer.position();
+        int opPos = byteIndex++;
+        // buffer.put((byte)0); // operation code
+        // addByte((byte)0);
+
+        boolean done = true;
+
+        ClusterEnum a = addIndexAndCluster(c.key1, c.clusterUID1, c.lookIndex1, c.lookup1);
+        byte ab = 0;
+
+        // ForeignShort = byte
+        // Local = short
+        // ForeignLong = 8 byte
+        if (a != ClusterEnum.ForeignShort) {
+            ab = bufferPop();
+            done = false;
+        }
+
+        ClusterEnum b = addIndexAndCluster(c.key2, c.clusterUID2, c.lookIndex2, c.lookup2);
+        byte bb = 0;
+        if (b != ClusterEnum.ForeignShort) {
+            bb = bufferPop();
+            done = false;
+        }
+
+        int ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0);
+        if (ClusterTraitsSmall.isIllegalResourceIndex(ri))
+            throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + ri);
+        bytes[byteIndex++] = (byte)ri; // index low byte
+        if(!done) {
+            Data data = ClusterEnum.getData(stmEnum, a, b);
+            int left = 6 - data.bits;
+            int op = ri >>> (8 + left);
+            ri >>>= 8;
+            ri &= (1 << left) - 1;
+            if (a != ClusterEnum.ForeignShort) {
+                ri |= ab << left;
+                left += 6;
+            }
+            if (b != ClusterEnum.ForeignShort) {
+                ri |= bb << left;
+                left += 6;
+            }
+            switch (data.bytes) {
+                default:
+                    throw new RuntimeDatabaseException("Assertion error. Illegal number of bytes=" + data.bytes);
+                case 2:
+                    bytes[byteIndex++] = (byte)(ri & 0xFF);
+                    bytes[byteIndex++] = (byte)((ri >>> 8) & 0xFF);
+                    break;
+                case 1:
+                    bytes[byteIndex++] = (byte)(ri & 0xFF);
+                    break;
+                case 0:
+                    break;
+            }
+            op |= data.mask;
+            this.bytes[opPos] = (byte)op;
+        } else {
+            if (stmEnum == StmEnum.Add)
+                bytes[opPos] = (byte)((ri >>> 8) + 64);
+            else
+                bytes[opPos] = (byte)((ri >>> 8) + 128);
+        }
+        if (DEBUG_STAT) {
+            if (a == ClusterEnum.Local && b == ClusterEnum.Local) {
+                ++info.nLocal;
+            } else if (a == ClusterEnum.Local || b == ClusterEnum.Local) {
+                ++info.nPartly;
+            } else {
+                ++info.nForeign;
+            }
+        }
+        if (foreignTable.size() > 252)
+               flush();
+//             throw new UnsupportedOperationException();
+            //flushInternal(graphSession, clusterUID);
+    }
+
+    private final int modiValue(int ri, long value_offset, byte[] bytes, int offset, int size) {
+        if (DEBUG)
+            printlnd("Modify value ri=" + ri + " vo=" + value_offset + " size=" + size + " total=" + bytes.length);
+        if (ClusterTraitsBase.isIllegalResourceIndex(ri))
+            throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + ri);
+        if (value_offset > (1L << 58 - 1))
+            throw new RuntimeDatabaseException("Illegal value offset="
+                    + value_offset);
+        if (size < 0 || size > MAX_FIXED_BYTES - 1)
+            throw new RuntimeDatabaseException("Illegal value size=" + size);
+        if (offset + size > bytes.length)
+            throw new RuntimeDatabaseException("Illegal value size=" + size);
+        checkBufferSpace(12 + size);
+        addByte(OpEnum.Modify.getOrMask());
+        ri |= (value_offset >>> 56) << 14; // add top two bits
+        addShort((short) ri);
+        value_offset &= (1L << 56) - 1;
+        addLong7(value_offset);
+        addShort((short) size);
+        if (DEBUG)
+            System.out.println("Modify value fixed part end offset=" + byteIndex);
+        int copied = Math.min(size, this.bytes.length - byteIndex);
+        System.arraycopy(bytes, offset, this.bytes, byteIndex, copied);
+        byteIndex += size;
+        return copied;
+    }
+
+//    private final void modiValueBig(int ri, long voffset, int left, byte[] bytes, int offset) {
+//        checkBufferSpace(0);
+//        int current = Math.min(this.bytes.length - byteIndex - 12, left);
+//        if(current >= 0) {
+//            int written = modiValue(ri, voffset, bytes, offset, current);
+//            voffset += written;
+//            offset += written;
+//            left -= written;
+//        }
+////        flushInternal(graphSession, clusterUID);
+//        while (left > 0) {
+//            int length = Math.min(left, (1 << 16) - 1);
+//            if (DEBUG)
+//                printlnd("Modify big value ri=" + ri + " vo=" + voffset + " len=" + length);
+//            int psize = length + 12;
+////            setHeaderVectorSize(psize);
+//            byte[] message = new byte[psize/*+HEADER_SIZE*/];
+////            System.arraycopy(header, 0, message, 0, HEADER_SIZE);
+//            int to = 0;
+//            Bytes.write(message, to++, OpEnum.Modify.getOrMask());
+//            short index = (short)(ri | (voffset >>> 56)<<14); // add top two bits
+//            Bytes.writeLE(message, to, index); to += 2;
+//            Bytes.writeLE7(message, to, voffset & ((1L << 56) - 1)); to += 7;
+//            Bytes.writeLE(message, to, (short)length); to += 2;
+//            System.arraycopy(bytes, offset, message, to, length);
+////            graphSession.updateCluster(new UpdateClusterFunction(message));
+//            voffset += length;
+//            offset += length;
+//            left -= length;
+//        }
+//    }
+
+    private final int setValueBig(int ri, byte[] bytes, int length_) {
+        checkBufferSpace(12);
+        int sum = 0;
+        int voffset = 0;
+        int offset = 0;
+        int left = length_;
+        while (left > 0) {
+            int length = Math.min(left, MAX_FIXED_BYTES - 12 - byteIndex);
+            if (DEBUG)
+                printlnd("Set big value ri=" + ri + " vo=" + voffset + " len=" + length);
+            int written = modiValue(ri, voffset, bytes, offset, length);
+            sum += written;
+            voffset += written;
+            offset += written;
+            left -= written;
+            checkBufferSpace(12);
+        }
+        return sum;
+    }
+
+    private final int setValueSmall(int ri, byte[] bytes, int length) {
+        checkBufferSpace(5 + length);
+        int pos = byteIndex;
+        int i = length << 14 | ri;
+        if (length < 32) {
+            byte op = (byte) (OpEnum.SetShort.getOrMask() | length >>> 2);
+            addByte(op);
+            short s = (short) i;
+            addShort(s);
+        } else {
+            addByte(OpEnum.Set.getOrMask());
+            addInt(i);
+        }
+        System.arraycopy(bytes, 0, this.bytes, byteIndex, length);
+        byteIndex += length;
+        int len = byteIndex - pos;
+        return len;
+    }
+
+    final void setValue(short index, byte[] bytes) {
+        setValue(index, bytes, bytes.length);
+    }
+
+    final public void setValue(short index, byte[] bytes, int length) {
+        checkInitialization();
+        if (ClusterTraitsBase.isIllegalResourceIndex(index))
+            throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + index);
+        if (DEBUG)
+            printlnd("Set value ri=" + index
+                    + " len=" + length
+                    + " bytes=" + Arrays.toString(Arrays.copyOfRange(bytes, 0, Math.min(10, length))));
+        int len;
+        /*
+         * The limit for the cluster stream is (1<18)-1 but this avoids the
+         * conversion to big cluster.
+         */
+        if (length > ClusterTraitsSmall.VALUE_SIZE_MAX)
+            len = setValueBig(index, bytes, length);
+        else
+            len = setValueSmall(index, bytes, length);
+        if (DEBUG_STAT) {
+            ++info.nValues;
+            info.sValues += len + length;
+        }
+    }
+
+//    final void setValue(Change c, byte[] bytes, int length) {
+//        short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0);
+//        setValue(ri, bytes, length);
+//        c.initValue();
+//    }
+
+//    final void modiValue(Change c, long voffset, int length, byte[] bytes, int offset) {
+//        checkInitialization();
+//        int ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0);
+//        if (DEBUG)
+//            printlnd("Modify value ri=" + ri
+//                    + " voff=" + voffset
+//                    + " vlen=" + length
+//                    + " blen=" + bytes.length
+//                    + " boff=" + offset
+//                    + " bytes=" + Arrays.toString(Arrays.copyOfRange(bytes, 0, Math.min(10, bytes.length))));
+//        modiValueBig(ri, voffset, length, bytes, offset);
+//        c.init();
+//        if (DEBUG_STAT) {
+//            ++info.nValues;
+//            info.sValues += length;
+//        }
+//    }
+    final void setImmutable(boolean immutable) {
+        checkInitialization();
+//        clusterChange2.setImmutable(immutable);
+    }
+    final void undoValueEx(int resourceIndex) {
+        checkInitialization();
+//        clusterChange2.undoValueEx(resourceIndex);
+    }
+    final void setDeleted(boolean deleted) {
+        checkInitialization();
+//        clusterChange2.setDeleted(deleted);
+    }
+    final void corrupt() {
+        checkInitialization();
+        addByte((byte)0);
+    }
+    
+    public byte[] getBytes() {
+       byte[] copy = new byte[byteIndex];
+       System.arraycopy(bytes, 0, copy, 0, byteIndex);
+       return copy;
+    }
+    
+    /**
+     * @param graphSession
+     * @param clusterId
+     * @return true if actually flushed something
+     */
+    final boolean flush(/*GraphSession graphSession,*/ ClusterUID clusterUID) {
+       throw new UnsupportedOperationException();
+//        if (byteIndex > 0) {
+//            if(DebugPolicy.REPORT_CLUSTER_STREAM)
+//                System.err.println("Flush cluster change set stream " + this);
+//            setHeaderVectorSize(byteIndex);
+//            byte[] copy = new byte[byteIndex + HEADER_SIZE];
+//            System.arraycopy(header, 0, copy, 0, HEADER_SIZE);
+//            System.arraycopy(bytes, 0, copy, HEADER_SIZE, byteIndex);
+//            UpdateClusterFunction updateClusterFunction = new UpdateClusterFunction(copy);
+//            if (DEBUG_CCS) {
+//                for (DebugStm stm : debugStms)
+//                    printlnd(stm.toString2());
+//                debugStms.clear();
+//            }
+//            if (DEBUG_STAT) {
+//                info.tot = updateClusterFunction.operation.length;
+//                printlnd("ReallyFlush: " + info.toString());
+//                sum.add(info);
+//                printlnd("ReallyFlush sum: " + sum.toString());
+//            }
+//            // long start = System.nanoTime();
+//            graphSession.updateCluster(updateClusterFunction);
+//            // long duration = System.nanoTime() - start;
+//            // duration2 += duration;
+//            // System.err.println("updateCluster " + 1e-9*duration);
+//            // System.err.println("updateCluster total " + 1e-9*duration2);
+//            clear();
+//            clusterChange2.flush(graphSession);
+//            return true;
+//        } else if (clusterChange2.isDirty()) {
+//            clusterChange2.flush(graphSession);
+//            clear();
+//            return true;
+//        } else if (flushed) {
+//            flushed = false;
+//            return true;
+//        } else {
+//            return true;
+//        }
+    }
+
+    final void flushInternal(ClusterUID clusterUID) {
+       throw new UnsupportedOperationException();
+//        flush(graphSession, clusterUID);
+//        flushed = true;
+    }
+
+    final class ForeignTable {
+        private final TLongIntHashMap table = new TLongIntHashMap();
+
+        private long createKey(short index, long cluster) {
+            assert (cluster <= (1L << 48) - 1);
+            return (cluster << 14) | index;
+        }
+
+        public int get(short index, long cluster) {
+            int value = table.get(createKey(index, cluster));
+            if (DEBUG)
+                printlnd("ForeignTable get c=" + clusterUID + " i="
+                        + (value - 1) + " r=" + index + " rc=" + cluster);
+            return value;
+        }
+
+        public int put(short index, long cluster, int value) {
+            if (DEBUG)
+                printlnd("ForeignTable put c=" + clusterUID + " i="
+                        + (value - 1) + " r=" + index + " rc=" + cluster);
+            return table.put(createKey(index, cluster), value);
+        }
+
+        public int size() {
+            return table.size();
+        }
+
+        public void clear() {
+            table.clear();
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return 31*clusterUID.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object object) {
+        if (this == object)
+            return true;
+        else if (object == null)
+            return false;
+        else if (!(object instanceof ClusterChange))
+            return false;
+        ClusterChange r = (ClusterChange)object;
+        return r.clusterUID.equals(clusterUID);
+    }
+    
+    public void flush() {
+       
+       if(byteIndex > 0) {
+               
+               final ClusterUID cuid = clusterUID;
+                       
+                       byte[] block = getBytes();
+                       byte[] raw = new byte[block.length + 28];
+                       Bytes.writeLE(raw, 0, 1);
+                       System.arraycopy(cuid.asBytes(), 0, raw, 4, 16);
+                       Bytes.writeLE(raw, 20, block.length);
+                       System.arraycopy(block, 0, raw, 24, block.length);
+                       Bytes.writeLE(raw, 24+block.length, 0);
+
+                       ByteBuffer rawBB = ByteBuffer.wrap(raw);
+                       ByteBuffer outputBB = ByteBuffer.allocate(raw.length + raw.length/8);
+                       //outputBB.order(ByteOrder.LITTLE_ENDIAN);
+                       int compressedSize = Compressions.get(Compressions.LZ4).compressBuffer(rawBB, 0, raw.length, outputBB, 0);
+
+                       byte[] data_ = null;
+                       if(compressedSize < raw.length) {
+                               data_ = new byte[compressedSize];
+                               outputBB.get(data_,0,compressedSize);   
+                       } else {
+                               data_ = raw;
+                       }
+
+                       byte[] data = new byte[data_.length+24];
+                       Bytes.writeLE(data, 0, 0);
+                       Bytes.writeLE(data, 4, 0);
+                       Bytes.writeLE(data, 8, raw.length);
+                       Bytes.writeLE(data, 12, raw.length);
+                       Bytes.writeLE(data, 16, data_.length);
+                       System.arraycopy(data_, 0, data, 20, data_.length);
+                       Bytes.writeLE(data, 20+data_.length, 0);
+                       
+                       stream.add(Pair.make(clusterUID, data));
+                       clear();
+                       initBuffer();
+               
+       }
+    }
+    
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java
new file mode 100644 (file)
index 0000000..472b4d7
--- /dev/null
@@ -0,0 +1,70 @@
+package org.simantics.acorn.internal;
+
+
+public class ClusterChange2 {
+    public static final int VERSION = 2;
+    public static final byte SET_IMMUTABLE_OPERATION = 1; // <byte : 0 = false>
+    public static final byte UNDO_VALUE_OPERATION = 2; // <int : resource index>
+    private static final int INCREMENT = 1<<10;
+//    private boolean dirty = false;
+//    private byte[] bytes;
+//    private int byteIndex;
+//    private ClusterUID clusterUID;
+//    private ClusterImpl cluster;
+//    ClusterChange2(ClusterUID clusterUID, ClusterImpl cluster) {
+//        this.clusterUID = clusterUID;
+//        this.cluster = cluster;
+//        init();
+//    }
+//    void init() {
+////        System.err.println("clusterChange2 dirty " + cluster.clusterId);
+//        dirty = false;
+//        bytes = new byte[INCREMENT];
+//        byteIndex = 0;
+//        addInt(0); // Size of byte vector. Set by flush.
+//        addInt(VERSION);
+//        byteIndex = clusterUID.toByte(bytes, 8);
+//    }
+//    boolean isDirty() {
+//        return dirty;
+//    }
+//    void flush(GraphSession graphSession) {
+////        System.err.println("flush2 clusterChange2 " + dirty + this);
+//        if (!dirty)
+//            return;
+//        Bytes.writeLE(bytes, 0, byteIndex - 4);
+//        byte[] ops = Arrays.copyOf(bytes, byteIndex);
+////        System.err.println("flush2 clusterChange2 " + cluster.clusterId + " " + ops.length + " bytes.");
+//        graphSession.updateCluster(new UpdateClusterFunction(ops));
+//        init();
+//    }
+//    void setImmutable(boolean immutable) {
+//        dirty = true;
+//        addByte(SET_IMMUTABLE_OPERATION);
+//        addByte((byte)(immutable ? -1 : 0));
+//    }
+//    void undoValueEx(int resourceIndex) {
+//        dirty = true;
+//        addByte(UNDO_VALUE_OPERATION);
+//        addInt(resourceIndex);
+//    }
+//    private final void checkSpace(int len) {
+//        if (bytes.length - byteIndex > len)
+//            return;
+//       bytes = Arrays.copyOf(bytes, bytes.length + len + INCREMENT);
+//    }
+//    private final void addByte(byte value) {
+//        checkSpace(1);
+//        bytes[byteIndex++] = value;
+//    }
+//    private final void addInt(int value) {
+//        checkSpace(4);
+//        Bytes.writeLE(bytes, byteIndex, value);
+//        byteIndex += 4;
+//    }
+////    private void addLong(long value) {
+////        checkSpace(8);
+////        Bytes.writeLE(bytes, byteIndex, value);
+////        byteIndex += 8;
+////    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java
new file mode 100644 (file)
index 0000000..2b1ae19
--- /dev/null
@@ -0,0 +1,437 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.internal;
+
+import java.util.ArrayList;
+
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.ClusterUID;
+
+final public class ClusterStream {
+
+//    // public static long duration2 = 0;
+//
+    public static final boolean DEBUG = false;
+    public static final byte NULL_OPERATION = 0;
+    public static final byte CREATE_OPERATION = 1;
+    public static final byte SET_OPERATION = 4;
+    public static final byte MODI_OPERATION = 6;
+    public static final byte KILL_OPERATION = 7;
+//    boolean off = false;
+//    public GraphSession graphSession;
+//    final SessionImplSocket session;
+////    private int flushCount = 0;
+//    final private boolean alwaysOff;
+//    private int stamp;
+//    private int acceptedStamp;
+//    private boolean dirty = false;
+////    final private ArrayList<ClusterChange> clusterChanges = new ArrayList<ClusterChange>();
+//    
+//    final ClusterChangeManager changes = new ClusterChangeManager();
+//    
+////    final TLongObjectHashMap<ClusterChange> clusterChanges = new TLongObjectHashMap<ClusterChange>();
+//
+//    // private final Change lastChange = new Change();
+//    ClusterStream(SessionImplSocket session, GraphSession graphSession,
+//            boolean alwaysOff) {
+//        this.session = session;
+//        this.graphSession = graphSession;
+//        this.alwaysOff = alwaysOff;
+//    }
+//
+//    
+//    boolean isDirty() {
+//     return dirty;
+//    }
+//    
+//    void markDirty() {
+//     dirty = true;
+//    }
+//    
+//    void setOff(boolean value) {
+//        if (alwaysOff) {
+//            off = true;
+//        } else {
+//            off = value;
+//        }
+//    }
+//
+//    boolean getOff() {
+//        return off;
+//    }
+//
+//    void createResource(ClusterChange cc, short operationIndex, ClusterUID clusterUID) {
+//        if (off)
+//            return;
+//        assert (null != cc);
+//        assert (0 != operationIndex);
+//        assert (!ClusterUID.Null.equals(clusterUID));
+//        if (DEBUG)
+//            System.out.println("DEBUG: Created resource index=" + operationIndex + " cluster=" + clusterUID);
+//        cc.createResource(operationIndex);
+//    }
+//
+//    final void addStatementIndex(Change change, int key, ClusterUID clusterUID, byte op) {
+//        if (off)
+//            return;
+//        assert (key > 0);
+//        assert (null != change);
+//        assert (!ClusterUID.Null.equals(clusterUID));
+//        change.addStatementIndex(key, clusterUID, op);
+//    }
+//
+//    void addStatement(ClusterChange cc, Change change) {
+//        if (off)
+//            return;
+//        assert (null != cc);
+//        assert (null != change);
+//        cc.addChange(change);
+//    }
+//
+//    void cancelStatement(Change change) {
+//        if (off)
+//            return;
+//        assert (null != change);
+//        change.init();
+//    }
+//
+//    void removeStatement(ClusterChange cc, Change change, long clusterId) {
+//        if (off)
+//            return;
+//        assert (null != cc);
+//        assert (null != change);
+//        cc.addChange(change);
+//    }
+//
+//    void cancelValue(Change change) {
+//        if (off)
+//            return;
+//        assert (null != change);
+//        change.init();
+//    }
+//
+//    void removeValue(ClusterChange cc, Change change, long clusterId) {
+//        if (off)
+//            return;
+//        // ClusterChange cc = getClusterChange(clusterId);
+//        assert (null != cc);
+//        assert (null != change);
+//        cc.addChange(change);
+//    }
+//
+//    void setValue(ClusterChange cc, Change change, long clusterId, byte[] bytes, int length) {
+//        if (off)
+//            return;
+//        assert (null != cc);
+//        assert (null != change);
+//        // ClusterChange cc = getClusterChange(clusterId);
+//        cc.setValue(change, bytes, length);
+//    }
+//
+//    void modiValue(ClusterChange cc, Change change, long clusterId,
+//            long voffset, int length, byte[] bytes, int offset) {
+//        assert (null != cc);
+//        assert (null != change);
+//        cc.modiValue(change, voffset, length, bytes, offset);
+//    }
+//
+//    void undoValueEx(ClusterChange cc, Change change, int resourceIndex) {
+//        cc.undoValueEx(resourceIndex);
+//    }
+//    void setImmutable(ClusterChange cc, Change change, long clusterId, boolean immutable) {
+//        if (off)
+//            return;
+//        cc.setImmutable(immutable);
+//    }
+//    public void corruptCluster(ClusterChange cc, long clusterId)
+//            throws DatabaseException {
+//        if (off)
+//            return;
+//        if (DEBUG)
+//            System.out.println("ClusterStream.corrupt cid=" + clusterId + ".");
+//        assert (null != cc);
+//        cc.corrupt();
+//    }
+//
+//    int getStamp() {
+//        return stamp;
+//    }
+//
+//    void flush() {
+//        if (off)
+//            return;
+////        flushCount++;
+//        return;
+//    }
+//
+//    void flush(long clusterId) {
+//        if (off)
+//            return;
+//        ClusterUID clusterUID = session.clusterTable.clusterIds.getClusterUID(clusterId);
+//        ArrayList<ClusterChange> ccs = new ArrayList<ClusterChange>();
+//        for(ClusterChange cc : changes.get()) {
+//            if(cc.clusterUID.equals(clusterUID)) {
+//                if (cc.flush(graphSession, cc.clusterUID)) {
+//                    ccs.add(cc);
+//                    if (stamp == acceptedStamp)
+//                        ++stamp;
+//                } else {
+////                    System.err.println("kasdi");
+//                }
+//            }
+//        }
+//        changes.remove(ccs);
+//    }
+//
+//    /**
+//     * @return true if the stream has accepted all changes
+//     */
+//    public boolean reallyFlush() {
+//        // Last possibility to mark clusters immutable before write only clusters are gone 
+//        session.handleCreatedClusters();
+//        // These shall be requested from server
+//        session.clusterTable.removeWriteOnlyClusters();
+//        if (!off && changes.size() > 0) {
+//            for(ClusterChange cc : changes.get()) {
+//                if (cc.flush(graphSession, cc.clusterUID))
+//                    if (stamp == acceptedStamp)
+//                        ++stamp;
+//            }
+//            changes.clear();
+//        }
+//        dirty = false;
+//        return hasAcceptedAllChanges();
+//    }
+//
+//    /**
+//     * Clear all changes and set stream status to empty.
+//     */
+//    public void clear() {
+//        changes.clear();
+//        acceptedStamp = stamp;
+//        dirty = false;
+//    }
+//
+//    private boolean hasAcceptedAllChanges() {
+//        return stamp == acceptedStamp;
+//    }
+//
+//    void accept() {
+//        acceptedStamp = stamp;
+//    }
+//
+//
+
+    static class DebugInfo {
+        long nStms;
+        long nLocal;
+        long nPartly;
+        long nForeign;
+        long nValues;
+        long sValues;
+        long sForeign;
+        long tot;
+
+        void clear() {
+            nStms = 0;
+            nLocal = 0;
+            nPartly = 0;
+            nForeign = 0;
+            sForeign = 0;
+            nValues = 0;
+            sValues = 0;
+            tot = 0;
+        }
+
+        void add(DebugInfo di) {
+            nStms += di.nStms;
+            nLocal += di.nLocal;
+            nPartly += di.nPartly;
+            nForeign += di.nForeign;
+            sForeign += di.sForeign;
+            nValues += di.nValues;
+            sValues += di.sValues;
+            tot += di.tot;
+        }
+
+        @Override
+        public String toString() {
+            return "val=" + nValues + " stm=" + nStms + " loc=" + nLocal
+                    + " par=" + nPartly + " ful=" + nForeign + " for="
+                    + sForeign + " vat=" + sValues + " tot=" + tot;
+        }
+    }
+
+    enum StmEnum {
+        Add(0, (byte) 0), Remove(1, (byte) 0x20);
+        StmEnum(int ordinal, byte mask) {
+            this.ordinal = ordinal;
+            this.mask = mask;
+        }
+
+        public int ordinal;
+        private byte mask;
+
+        byte getOrMask() {
+            return mask;
+        }
+    }
+
+    final static class Data {
+        
+        final byte mask; // or mask for operation code (don't care bits are zero)
+        final short bits; // how many bits are reserved for resource index (0,2,4,6)
+        final int bytes;
+
+        Data(int mask, int bits, ClusterEnum a, ClusterEnum b) {
+            this.mask = (byte) (mask << bits);
+            this.bits = (short) bits;
+            this.bytes = bytes(bits, a, b);
+        }
+        
+        private static int bytes(int bits, ClusterEnum a, ClusterEnum b) {
+            int left = 6 - bits;
+            if (a != ClusterEnum.ForeignShort) {
+                left += 6;
+            }
+            if (b != ClusterEnum.ForeignShort) {
+                left += 6;
+            }
+            int bytes = left >>> 3;
+            if ((left & 7) != 0)
+                bytes++;
+            return bytes;
+        }
+        
+    }
+
+    enum ClusterEnum {
+        Local(0), ForeignShort(1), ForeignLong(2);
+        public int ordinal;
+
+        ClusterEnum(int ordinal) {
+            this.ordinal = ordinal;
+        }
+
+        static Data[][][] maps = new Data[2][3][3];
+        static {
+               // mask: 00000000
+               // op: 000000|r12-13
+               // p1
+               // o1
+               // r0-7
+               // o2 | p2 | r8-11
+            maps[StmEnum.Add.ordinal][Local.ordinal][Local.ordinal] = new Data(
+                    0, 2, Local, Local);
+               // mask: 11000000
+               // op: 1100 | r10-13
+               // p1
+               // o for index
+               // r0-7
+               // p2 | ri 8-9
+            maps[StmEnum.Add.ordinal][Local.ordinal][ForeignShort.ordinal] = new Data(
+                    12, 4, Local, ForeignShort);
+               // mask: 00001000
+            // op: 000010 | r12-13 
+            maps[StmEnum.Add.ordinal][Local.ordinal][ForeignLong.ordinal] = new Data(
+                    2, 2, Local, ForeignLong);
+               // mask: 11010000
+            // op: 1101 | r10-13 
+            maps[StmEnum.Add.ordinal][ForeignShort.ordinal][Local.ordinal] = new Data(
+                    13, 4, ForeignShort, Local);
+
+            // mask: 01000000
+            // op: 01 | r8-13
+            // p for index
+            // o for index
+               // r0-7
+            maps[StmEnum.Add.ordinal][ForeignShort.ordinal][ForeignShort.ordinal] = new Data(
+                    1, 6, ForeignShort, ForeignShort);
+            // mask: 11100000
+            // op: 1110 | r10-13 
+            maps[StmEnum.Add.ordinal][ForeignShort.ordinal][ForeignLong.ordinal] = new Data(
+                    14, 4, ForeignShort, ForeignLong);
+            // mask: 00010000
+            // op: 000100 | r12-13 
+            maps[StmEnum.Add.ordinal][ForeignLong.ordinal][Local.ordinal] = new Data(
+                    4, 2, ForeignLong, Local);
+            // mask: 11110000
+            // op: 1111 | r10-13 
+            maps[StmEnum.Add.ordinal][ForeignLong.ordinal][ForeignShort.ordinal] = new Data(
+                    15, 4, ForeignLong, ForeignShort);
+            // mask: 00011000
+            // op: 000110 | r12-13 
+            maps[StmEnum.Add.ordinal][ForeignLong.ordinal][ForeignLong.ordinal] = new Data(
+                    6, 2, ForeignLong, ForeignLong);
+
+            // mask: 00000100
+            // op: 000001 | r12-13 
+            maps[StmEnum.Remove.ordinal][Local.ordinal][Local.ordinal] = new Data(
+                    1, 2, Local, Local);
+            // mask: 01100001
+               // op: 01100001
+               // p1
+               // o for index
+               // r0-7
+               // p2 | ri 8-13
+            maps[StmEnum.Remove.ordinal][Local.ordinal][ForeignShort.ordinal] = new Data(
+                    49, 0, Local, ForeignShort);
+            // mask: 00001100
+            // op: 000011 | r12-13 
+            maps[StmEnum.Remove.ordinal][Local.ordinal][ForeignLong.ordinal] = new Data(
+                    3, 2, Local, ForeignLong);
+            // mask: 00100000
+            // op: 0010 | r10-13 
+            maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][Local.ordinal] = new Data(
+                    2, 4, ForeignShort, Local);
+            // mask: 10000000
+            // op: 10 | r8-13
+            maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][ForeignShort.ordinal] = new Data(
+                    2, 6, ForeignShort, ForeignShort);
+            // mask: 00110010
+            // op: 00110010
+            maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][ForeignLong.ordinal] = new Data(
+                    50, 0, ForeignShort, ForeignLong);
+            // mask: 00010100
+            // op: 000101 | r12-13 
+            maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][Local.ordinal] = new Data(
+                    5, 2, ForeignLong, Local);
+            // mask: 00110011
+            // op: 00110011
+            maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][ForeignShort.ordinal] = new Data(
+                    51, 0, ForeignLong, ForeignShort);
+            // mask: 00011100
+            // op: 000111 | r12-13 
+            maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][ForeignLong.ordinal] = new Data(
+                    7, 2, ForeignLong, ForeignLong);
+        }
+
+        static Data getData(StmEnum s, ClusterEnum a, ClusterEnum b) {
+            return maps[s.ordinal][a.ordinal][b.ordinal];
+            // return maps.get(s).get(a).get(b);
+        }
+    }
+
+    enum OpEnum {
+        Create((byte) 52), Set((byte) 53), SetShort((byte) 56), Delete(
+                (byte) 54), Modify((byte) 55);
+        OpEnum(byte mask) {
+            this.mask = mask;
+        }
+
+        public byte getOrMask() {
+            return mask;
+        }
+
+        private byte mask;
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java
new file mode 100644 (file)
index 0000000..7cd007a
--- /dev/null
@@ -0,0 +1,340 @@
+package org.simantics.acorn.internal;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.db.Session;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.service.ClusterUID;
+
+import gnu.trove.map.hash.TIntObjectHashMap;
+
+public class ClusterSupport2 implements ClusterSupport, IClusterTable {
+       
+       final private static boolean DEBUG = false;
+
+       public ClusterManager impl;
+       
+       public TIntObjectHashMap<ClusterUID> uidCache = new TIntObjectHashMap<ClusterUID>(); 
+       
+       public ClusterSupport2(ClusterManager impl) {
+               this.impl = impl;
+       }
+       
+       @Override
+       public int createClusterKeyByClusterUID(ClusterUID clusterUID, long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) {
+               try {
+            return impl.getClusterByClusterUIDOrMake(clusterUID);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            return null;
+        }
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterId(long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterKey(int clusterKey) {
+               throw new UnsupportedOperationException();
+       }
+
+       ReentrantReadWriteLock uidLock = new ReentrantReadWriteLock();
+       ReadLock uidRead = uidLock.readLock();
+       WriteLock uidWrite = uidLock.writeLock();
+       
+       @Override
+       public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException {
+
+               ClusterUID cuid;
+               
+               uidRead.lock();
+               cuid = uidCache.get(resourceKey >> 12);
+               uidRead.unlock();
+               if(cuid != null) return cuid;
+               uidWrite.lock();
+               cuid = uidCache.get(resourceKey >> 12);
+               if(cuid == null) {
+                       cuid = impl.getClusterUIDByResourceKeyWithoutMutex(resourceKey); 
+                       uidCache.put(resourceKey >> 12, cuid);
+               }
+               uidWrite.unlock();
+               
+               return cuid;
+               
+       }
+       
+       @Override
+       public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+               return impl.getClusterKeyByClusterUIDOrMakeWithoutMutex(clusterUID);
+       }
+       
+    @Override
+    public int getClusterKeyByClusterUIDOrMake(long id1, long id2) {
+               throw new UnsupportedOperationException();
+    }
+
+       @Override
+       public ClusterBase getClusterByResourceKey(int resourceKey) {
+               throw new UnsupportedOperationException();
+//             return impl.getClusterByResourceKey(resourceKey);
+       }
+
+       @Override
+       public long getClusterIdOrCreate(ClusterUID clusterUID) {
+               return impl.getClusterIdOrCreate(clusterUID);
+       }
+
+       @Override
+       public void addStatement(Object cluster) {
+               // nop
+       }
+
+       @Override
+       public void cancelStatement(Object cluster) {
+               // nop
+       }
+
+       @Override
+       public void removeStatement(Object cluster) {
+               // nop
+       }
+
+       @Override
+       public void removeValue(Object cluster) {
+               // nop
+       }
+
+       @Override
+       public void setImmutable(Object cluster, boolean immutable) {
+               // nop
+       }
+
+       @Override
+       public void setDeleted(Object cluster, boolean deleted) {
+               // TODO Auto-generated method stub
+               
+       }
+
+       
+       
+       @Override
+       public void cancelValue(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setValue(Object cluster, long clusterId, byte[] bytes,
+                       int length) {
+               // nop
+       }
+
+       @Override
+       public void modiValue(Object _cluster, long clusterId, long voffset,
+                       int length, byte[] bytes, int offset) {
+               // nop
+       }
+
+       @Override
+       public void createResource(Object cluster, short resourceIndex,
+                       long clusterId) {
+               // No op
+       }
+
+       @Override
+       public void addStatementIndex(Object cluster, int resourceKey,
+                       ClusterUID clusterUID, byte op) {
+               // No op
+       }
+
+       @Override
+       public void setStreamOff(boolean setOff) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public boolean getStreamOff() {
+               return true;
+       }
+
+       
+    private static class ResourceSegment {
+        public long   valueSize;
+
+        public byte[] bytes;
+
+        ResourceSegment(long valueSize, byte[] bytes) {
+            this.valueSize = valueSize;
+            this.bytes = bytes;
+        }
+    }
+
+    public ResourceSegment getResourceSegment(int resourceIndex, ClusterUID clusterUID, long offset, short size)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("DEBUG: getResourceSegment ri=" + resourceIndex + " cid=" + clusterUID + " offset=" + offset + " size=" + size);
+        
+        org.simantics.db.Database.Session.ResourceSegment t = impl.getResourceSegment(clusterUID.asBytes(), resourceIndex, offset, size);
+        return new ResourceSegment(t.getValueSize(), t.getSegment());
+        
+    }
+    
+    protected byte[] getValueBig(ClusterBase cluster, int resourceIndex, int offset, int length) throws DatabaseException {
+    
+       assert(offset == 0);
+       assert(length == 0);
+       
+       ClusterUID clusterUID = cluster.clusterUID;
+       
+       return impl.getResourceFile(clusterUID.asBytes(), resourceIndex);
+       
+    }
+
+    protected InputStream getValueStreamBig(ClusterBase cluster, final int resourceIndex, int offset, int length) throws DatabaseException {
+
+       final ClusterUID clusterUID = cluster.clusterUID;
+       
+       if (DEBUG)
+               System.out.println("DEBUG: getResourceFile ri=" + resourceIndex + " cid=" + clusterUID + " off=" + offset + " len=" + length);
+       final int IMAX = 0xFFFF;
+       short slen = (short)Math.min(length != 0 ? length : IMAX, IMAX);
+       final ResourceSegment s = getResourceSegment(resourceIndex, clusterUID, offset, slen);
+       if (s.valueSize < 0)
+               throw new DatabaseException("Failed to get value for resource index=" + resourceIndex +
+                               " cluster=" + clusterUID + " off=" + offset + " len=" + length + " (1).");
+       int ilen = (int)slen & 0xFFFF;
+       assert(s.bytes.length <= ilen);
+       if (0 == length) {
+               if (s.valueSize > Integer.MAX_VALUE)
+                       throw new DatabaseException("Failed to get value for resource index=" + resourceIndex +
+                                       " cluster=" + clusterUID + " off=" + offset + " len=" + length +
+                                       ". Value size=" + s.valueSize + " (2).");
+               length = (int)s.valueSize;
+       }
+       long rSize = s.valueSize - offset;
+       if (rSize < length)
+               throw new DatabaseException("Failed to get value for resource index=" + resourceIndex +
+                               " cluster=" + clusterUID + " off=" + offset + " len=" + length +
+                               ". Value size=" + s.valueSize + " (3).");
+       else if (length <= IMAX)
+               return new ByteArrayInputStream(s.bytes);
+
+       final int finalLength = length;
+
+       return new InputStream() {
+
+               int left = finalLength;
+               long valueOffset = 0;
+               int offset = 0;
+               ResourceSegment _s = s;
+
+               @Override
+               public int read() throws IOException {
+
+                       if(left <= 0) throw new IllegalStateException();
+
+                       if(offset == _s.bytes.length) {
+                               short slen = (short)Math.min(left, IMAX);
+                               valueOffset += _s.bytes.length;
+                               try {
+                                       _s = getResourceSegment(resourceIndex, clusterUID, valueOffset, slen);
+                               } catch (DatabaseException e) {
+                                       throw new IOException(e);
+                               }
+                               offset = 0;
+                       }
+
+                       left--;
+                       int result = _s.bytes[offset++];
+                       if(result < 0) result += 256;
+                       return result;
+
+               }
+
+       };
+
+    }
+       
+       @Override
+       public InputStream getValueStreamEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               ClusterBase cluster = impl.getClusterByClusterUIDOrMakeProxy(ClusterUID.make(0, clusterId));
+               return getValueStreamBig(cluster, resourceIndex, 0, 0);
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               ClusterBase cluster = impl.getClusterByClusterUIDOrMakeProxy(ClusterUID.make(0, clusterId));
+               return getValueBig(cluster, resourceIndex, 0, 0);
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId, long voffset,
+                       int length) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public long getValueSizeEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int wait4RequestsLess(int limit) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public Session getSession() {
+               return null;
+       }
+
+       @Override
+       public IClusterTable getClusterTable() {
+               return this;
+       }
+
+       @Override
+       public <T extends ClusterI> T getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) {
+               try {
+            return (T)impl.getClusterByClusterUIDOrMakeProxy(clusterUID);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            return null;
+        }
+       }
+
+       @Override
+       public <T extends ClusterI> T getClusterProxyByResourceKey(int resourceKey) {
+               try {
+            return impl.getClusterProxyByResourceKey(resourceKey);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            return null;
+        }
+       }
+
+       @Override
+       public int getClusterKeyByUID(long id1, long id2) throws DatabaseException {
+               return impl.getClusterKeyByUID(id1, id2);
+       }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java
new file mode 100644 (file)
index 0000000..0044d72
--- /dev/null
@@ -0,0 +1,86 @@
+package org.simantics.acorn.internal;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.lru.CachingClusterSupport;
+import org.simantics.acorn.lru.ClusterUpdateOperation;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.service.ClusterUID;
+
+public class ClusterUpdateProcessor extends ClusterUpdateProcessorBase {
+       
+       final ClusterSupport support;
+       final ClusterUpdateOperation info;
+       private ClusterImpl cluster;
+       
+       public ClusterUpdateProcessor(ClusterManager client, ClusterSupport support, byte[] operations, ClusterUpdateOperation info) throws DatabaseException {
+               super(client, operations);
+               this.support = support;
+               this.info = info;
+       }
+       
+       @Override
+       void create() throws DatabaseException {
+               cluster.createResource(support);
+       }
+
+       @Override
+       void delete(int ri) throws DatabaseException {
+               
+               boolean oldValueEx = cluster.isValueEx(ri);
+               byte[] old = cluster.getValue(ri, support);
+               if(old != null) cluster.removeValue(ri, support);
+               info.ccs.oldValueEx.add(oldValueEx ? (byte)1 : 0);
+               info.ccs.oldValues.add(old);
+               
+       }
+
+       @Override
+       void modify(int resourceKey, long offset, int size, byte[] bytes, int pos)
+                       throws DatabaseException {
+               
+               cluster = (ClusterImpl)cluster.modiValueEx(resourceKey, offset, size, bytes, pos, support);
+               manager.modiFileEx(cluster.getClusterUID(), resourceKey, offset, size, bytes, pos, support);
+               
+       }
+
+       @Override
+       void set(int resourceKey, byte[] bytes, int length)
+                       throws DatabaseException {
+               
+               byte[] old = cluster.getValue(resourceKey, support);
+               boolean oldValueEx = cluster.isValueEx(resourceKey);
+               cluster = (ClusterImpl)cluster.setValue(resourceKey, valueBuffer, length, support);
+               info.ccs.oldValueEx.add(oldValueEx ? (byte)1 : 0);
+               info.ccs.oldValues.add(old);
+               
+       }
+
+       @Override
+       void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid)
+                       throws DatabaseException {
+               
+               ClusterImpl c = (ClusterImpl)cluster.addRelation(resourceKey, puid, predicateKey, ouid, objectKey, support);
+               if(c != null) cluster = c;
+               info.ccs.statementMask.add(c != null ? (byte)1 : 0);
+               
+       }
+
+       @Override
+       void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid)
+                       throws DatabaseException {
+               
+               boolean modified = cluster.removeRelation(resourceKey, predicateKey, objectKey, support);
+               info.ccs.statementMask.add(modified ? (byte)1 : 0);
+
+       }
+
+       public ClusterImpl process(ClusterImpl cluster) {
+               this.cluster = cluster;
+               process();
+               info.finish();
+               return this.cluster;
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java
new file mode 100644 (file)
index 0000000..7ce8673
--- /dev/null
@@ -0,0 +1,30 @@
+package org.simantics.acorn.internal;
+
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.lru.ClusterUpdateOperation;
+import org.simantics.db.impl.ClusterSupport;
+
+public class ClusterUpdateProcessor2 extends ClusterUpdateProcessorBase2 {
+
+       final ClusterSupport support;
+       final ClusterUpdateOperation info;
+       private ClusterImpl cluster;
+
+       public ClusterUpdateProcessor2(ClusterSupport support, byte[] operations, ClusterUpdateOperation info) {
+               super(operations);
+               this.support = support;
+               this.info = info;
+       }
+
+       public void process(ClusterImpl cluster) {
+               this.cluster = cluster;
+               process();
+               info.finish();
+       }
+
+       @Override
+       void setImmutable(boolean value) {
+               cluster.setImmutable(value, support);
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java
new file mode 100644 (file)
index 0000000..e0e733c
--- /dev/null
@@ -0,0 +1,475 @@
+package org.simantics.acorn.internal;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.internal.ClusterStream.ClusterEnum;
+import org.simantics.acorn.internal.ClusterStream.Data;
+import org.simantics.acorn.internal.ClusterStream.StmEnum;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+
+abstract public class ClusterUpdateProcessorBase {
+       
+       public final static boolean DEBUG = false;
+
+       final protected ClusterManager manager;
+       final public byte[] bytes;
+       private int pos = 0;
+       final private int len;
+       final private ClusterUID uid;
+       final private int clusterKey;
+       final public int version;
+       
+       final Map<ClusterUID, Integer> clusterKeyCache = new HashMap<ClusterUID, Integer>();
+       
+       public int getResourceKey(ClusterUID uid, int index) {
+               Integer match = clusterKeyCache.get(uid);
+               if(match != null) return match+index;
+               int key = manager.getResourceKeyWitoutMutex(uid, 0);
+               clusterKeyCache.put(uid, key);
+               return key+index;
+       }
+       
+       
+       public ClusterUpdateProcessorBase(ClusterManager client, byte[] operations) throws DatabaseException {
+               this.manager = client;
+               this.bytes = operations;
+               this.len = Bytes.readLE4(bytes, 0)+4; // whatta?
+               version = Bytes.readLE4(bytes, 4);
+               long cuid1 = Bytes.readLE8(bytes, 8);
+               long cuid2 = Bytes.readLE8(bytes, 16);
+               uid = ClusterUID.make(cuid1, cuid2);
+               pos = 24;
+               client.clusterLRU.acquireMutex();
+               try {
+                       clusterKey = client.clusterLRU.getClusterKeyByUID(cuid1, cuid2) << 12;
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       client.clusterLRU.releaseMutex();
+               }
+       }
+       
+       public ClusterUID getClusterUID() {
+               return uid;
+       }
+       
+       private void processCreate() {
+               int r = Bytes.readLE2(bytes, pos);
+               pos+=2;
+               if(DEBUG) System.err.println("DEBUG: New ri=" + r + " offset=" + (pos-3-24));
+               try {
+                       create();
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+               
+       }
+       
+       private void processDelete() {
+               
+               int ri = Bytes.readLE2(bytes, pos);
+               pos += 2;
+               
+               if(DEBUG) System.err.println("DEBUG: Delete " + ri);
+               
+               try {
+                       delete(ri);
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+               
+       }
+
+       private void processModify(int op) {
+               
+               int ri = Bytes.readLE2(bytes, pos);
+               pos+=2;
+               long offset = Bytes.readLE7(bytes, pos);
+               pos+=7;
+               int size = Bytes.readLE2(bytes, pos);
+               pos+=2;
+               
+               offset += (ri>>14) << 56;
+               ri = ri & 0x3FFF;
+               
+               if(size < 0)
+                       throw new IllegalStateException();
+               if(ri < 1)
+                       throw new IllegalStateException();
+               if(ri > 4095)
+                       throw new IllegalStateException();
+               
+               if(DEBUG) System.err.println("DEBUG: Modify " + ri + " " + offset + " " + size + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,size)));
+
+               try {
+                       modify(clusterKey + ri, offset, size, bytes, pos);
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+
+               pos += size;
+               
+       }
+
+       private void processSet(int op) {
+               
+               int s = Bytes.readLE4(bytes, pos);
+               int length = (s >> 14);
+               if(length < 1)
+                       throw new IllegalStateException();
+               int r = s & 0x3FFF;
+               
+               pos += 4;
+               System.arraycopy(bytes, pos, valueBuffer, 0, length);
+               pos += length;
+
+               if(DEBUG) System.err.println("DEBUG: Set " + r + " " + length + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,length)));
+
+               try {
+                       set(clusterKey+r, valueBuffer, length);
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+               
+       }
+
+       byte[] valueBuffer = new byte[65536];
+       
+       private void processSetShort(int op) {
+               
+               int s = Bytes.readLE2(bytes, pos);
+               int length = ((op&7)<<2) + (s >> 14);
+               if(length < 1)
+                       throw new IllegalStateException();
+               if(length > 31)
+                       throw new IllegalStateException();
+               int r = s & 0x3FFF;
+               
+               if(DEBUG) System.err.println("DEBUG: SetShort " + r + " " + length + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,length)));
+               pos += 2;
+
+               System.arraycopy(bytes, pos, valueBuffer, 0, length);
+               pos += length;
+               
+               try {
+                       set(clusterKey+r, valueBuffer, length);
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+               
+       }
+
+       private void processStatementResource(ClusterEnum enu, int pOrO) {
+               if(ClusterEnum.ForeignShort == enu) {
+                       int fs = bytes[pos++]&0xff;
+                       foreignRefs[pOrO] = fs;
+               } else if(ClusterEnum.Local == enu) {
+                       int lo = bytes[pos++]&0xff;
+                       lows[pOrO] = lo;
+               } else {
+                       long l1 = Bytes.readLE8(bytes, pos);
+                       pos += 8;
+                       long l2 = Bytes.readLE8(bytes, pos);
+                       pos += 8;
+                       ClusterUID cuid = ClusterUID.make(l1, l2);
+                       foreignClusters[foreignPos] = cuid;
+                       int lo = bytes[pos++]&0xff;
+                       foreignIndices[foreignPos] = lo;
+                       foreignRefs[pOrO] = foreignPos;
+                       foreignPos++;
+                       lows[pOrO] = lo;
+               }
+       }
+       
+       ClusterUID[] foreignClusters = new ClusterUID[256];
+       int[] foreignIndices = new int[256];
+       int foreignPos = 0;
+       int lows[] = new int[2];
+       int foreignRefs[] = new int[2];
+       
+       private void processStatement(int op, StmEnum stmEnum, ClusterEnum p, ClusterEnum o) {
+
+               int curPos = pos-1-24;
+               
+               processStatementResource(p, 0);
+               processStatementResource(o, 1);
+
+               int ri = bytes[pos++]&0xff;
+               int pi = 0;
+               int oi = 0;
+               
+               ClusterUID puid = uid;
+               ClusterUID ouid = puid;
+               
+               if(ClusterEnum.ForeignShort == p && ClusterEnum.ForeignShort == o) {
+                       ri |= (op&0x3F) << 8;
+               } else {
+                       Data data = ClusterEnum.getData(stmEnum, p, o);
+                       // data.left is the amount of bytes in last two bytes
+                       if(data.bytes == 0) {
+                               ri = ri | ((op&0x3F)<<8); 
+                       } else {
+                               int extra = 0;
+                               int opBits = data.bits;
+                               int extraBits = 6-opBits;
+                               if(data.bytes == 1) {
+                                       extra = bytes[pos++]&0xff;
+                                       int high = extra >> extraBits;
+                                       if(ClusterEnum.ForeignShort == p) {
+                                               oi = lows[1] + (high<<8);
+                                       } else {
+                                               pi = lows[0] + (high<<8);
+                                       }
+                               } else {
+                                       extra = Bytes.readLE2(bytes, pos);
+                                       pos += 2;
+                                       int high1 = (extra >> extraBits)&((1<<6)-1);
+                                       int high2 = (extra >> (extraBits+6))&((1<<6)-1);
+                                       if(ClusterEnum.ForeignShort == p) {
+                                               oi = lows[1] + (high1<<8);
+                                       } else {
+                                               pi = lows[0] + (high1<<8);
+                                               oi = lows[1] + (high2<<8);
+                                       }
+                               }
+                               ri = ri | ((extra&((1<<extraBits)-1))<<8) | ((op&((1<<opBits)-1))<<(8+extraBits)); 
+                       }
+               }
+
+               // Set foreigns
+               if(ClusterEnum.ForeignLong == p) {
+                       int ref = foreignRefs[0];
+                       foreignIndices[ref] = pi;
+                       puid = foreignClusters[ref];
+               }
+               if(ClusterEnum.ForeignLong == o) {
+                       int ref = foreignRefs[1];
+                       foreignIndices[ref] = oi;
+                       ouid = foreignClusters[ref]; 
+               }
+               // Get foreigns
+               if(ClusterEnum.ForeignShort == p) {
+                       int ref = foreignRefs[0];
+                       pi = foreignIndices[ref];
+                       puid = foreignClusters[ref]; 
+               }
+               if(ClusterEnum.ForeignShort == o) {
+                       int ref = foreignRefs[1];
+                       oi = foreignIndices[ref];
+                       ouid = foreignClusters[ref]; 
+               }
+
+               if(ri < 1)
+                       throw new IllegalStateException();
+               if(pi < 1)
+                       throw new IllegalStateException();
+               if(oi < 1)
+                       throw new IllegalStateException();
+               if(ri > 4095)
+                       throw new IllegalStateException();
+               if(pi > 4095)
+                       throw new IllegalStateException();
+               if(oi > 4095)
+                       throw new IllegalStateException();
+
+               if(StmEnum.Add == stmEnum) {
+                       
+                       if(DEBUG)
+               System.err.println("DEBUG: ClusterChange " + uid + ": Add ri=" + ri + " pi=" + pi + " oi=" + oi + " pc=" + puid + " oc=" + ouid + " offset=" + curPos + " " + p.ordinal + " " + o.ordinal);
+            
+                       int predicateKey = getResourceKey(puid, pi);
+                       int objectKey = getResourceKey(ouid, oi);       
+                       try {
+                               claim(clusterKey+ri, predicateKey, objectKey, puid, ouid);
+                       } catch (DatabaseException e) {
+                               e.printStackTrace();
+                       }
+                       
+               } else {
+                       
+                       if(DEBUG)
+               System.err.println("DEBUG: ClusterChange " + uid + ": Rem ri=" + ri + " pi=" + pi + " oi=" + oi + " pc=" + puid + " oc=" + ouid + " offset=" + curPos + " " + p.ordinal + " " + o.ordinal);
+            
+                       int predicateKey = getResourceKey(puid, pi);
+                       int objectKey = getResourceKey(ouid, oi);
+                       try {
+                               deny(clusterKey+ri, predicateKey, objectKey, puid, ouid);
+                       } catch (DatabaseException e) {
+                               e.printStackTrace();
+                       }
+                       
+               }
+               
+       }
+
+       public void process() {
+               
+               foreignPos = 0;
+
+               if(DEBUG) System.err.println("DEBUG: process " + uid + " " + len);
+               
+               // op resolution for statement operation:
+               
+               // 2 first bits
+               // op: 01 | r8-13
+               // op: 10 | r8-13
+
+               // 3 first bits (000)
+               // op: 000000 | r12-13
+               // op: 000001 | r12-13
+               // op: 000010 | r12-13 
+               // op: 000011 | r12-13
+               // op: 000100 | r12-13 
+               // op: 000101 | r12-13 
+               // op: 000110 | r12-13 
+               // op: 000111 | r12-13 
+
+               // 4 first bits
+               // op: 1100 | r10-13
+               // op: 1101 | r10-13 
+               // op: 1110 | r10-13 
+               // op: 1111 | r10-13
+               // op: 0010 | r10-13
+               
+               // 6 bits
+               // op: 00110001 = 49
+               // op: 00110010 = 50
+               // op: 00110011 = 51
+               // other: 0011xxxx
+               
+               while(pos < len) {
+               
+                       int op = bytes[pos++]&0xff;
+                       
+                       // common prefix: 0011
+                       switch(op) {
+                       
+                       case 49:
+                               processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.ForeignShort);
+                               break;
+                       case 50:
+                               processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.ForeignLong);
+                               break;
+                       case 51:
+                               processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.ForeignShort);
+                               break;
+                       // 52 = 32+16+4 = 00110100
+                       case 52:
+                               processCreate();
+                               break;
+                       // 53 = 32+16+4+1 = 00110101
+                       case 53:
+                               processSet(op);
+                               break;
+                       // 54 = 32+16+4+2 = 00110110
+                       case 54:
+                               processDelete();
+                               break;
+                       // 55 = 32+16+4+2+1 = 00110111
+                       case 55:
+                               processModify(op);
+                               break;
+                       default:
+                               
+                               int bits6 = ((int)op)&0xC0;
+                               switch(bits6) {
+                               
+                               case 0x40:
+                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.ForeignShort);
+                                       break;
+                               case 0x80:
+                                       processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.ForeignShort);
+                                       break;
+                               default:
+
+                                       int bits5 = ((int)op)&0xE0;
+                                       if(bits5 == 0) {
+
+                                               int bits2 = (((int)op)&0xFC) >> 2;      
+                                               
+                                               // 3 top bits are 0
+                                               // 6 bits of op
+                                               
+                                               switch(bits2) {
+
+                                               case 0:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.Local);
+                                                       break;
+                                               case 1:
+                                                       processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.Local);
+                                                       break;
+                                               case 2:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.ForeignLong);
+                                                       break;
+                                               case 3:
+                                                       processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.ForeignLong);
+                                                       break;
+                                               case 4:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.Local);
+                                                       break;
+                                               case 5:
+                                                       processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.Local);
+                                                       break;
+                                               case 6:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.ForeignLong);
+                                                       break;
+                                               case 7:
+                                                       processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.ForeignLong);
+                                                       break;
+                                               
+                                               }
+
+                                       } else {
+
+                                               // 4 top bits of op
+                                               // 4 low bits of payload
+
+                                               int bits4 = (((int)op)&0xF0)>>4;
+                                               switch(bits4) {
+                                               case 0b1100:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.ForeignShort);
+                                                       break;
+                                               case 0b1101:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.Local);
+                                                       break;
+                                               case 0b1110:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.ForeignLong);
+                                                       break;
+                                               case 0b1111:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.ForeignShort);
+                                                       break;
+                                               case 0b0010:
+                                                       processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.Local);
+                                                       break;
+                                               case 0b0011:
+                                                       int bits3 = (((int)op)&0xF8)>>3;
+                                                       if(bits3 == 7)
+                                                               processSetShort(op);
+                                                       break;
+                                               }
+
+                                       }
+                               
+                               }
+                               
+                       }
+                       
+               }
+               
+       }
+       
+       
+       abstract void create() throws DatabaseException;
+       abstract void delete(int resourceIndex) throws DatabaseException;
+       abstract void modify(int resourceKey, long offset, int size, byte[] bytes, int pos) throws DatabaseException;
+       abstract void set(int resourceKey, byte[] bytes, int length) throws DatabaseException;
+       
+       abstract void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) throws DatabaseException;
+       abstract void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) throws DatabaseException;
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java
new file mode 100644 (file)
index 0000000..e821b46
--- /dev/null
@@ -0,0 +1,63 @@
+package org.simantics.acorn.internal;
+
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+
+public abstract class ClusterUpdateProcessorBase2 {
+
+       final private byte[] bytes;
+       private int pos = 0;
+       final private int len;
+       final private ClusterUID uid;
+       
+       public ClusterUpdateProcessorBase2(byte[] operations) {
+               this.bytes = operations;
+               this.len = Bytes.readLE4(bytes, 0) + 4; // whatta?
+               int version = Bytes.readLE4(bytes, 4);
+               assert(version == ClusterChange2.VERSION);
+               long cuid1 = Bytes.readLE8(bytes, 8);
+               long cuid2 = Bytes.readLE8(bytes, 16);
+               pos = 24;
+               uid = ClusterUID.make(cuid1, cuid2);
+       }
+       
+       public ClusterUID getClusterUID() {
+               return uid;
+       }
+
+       private void processSetImmutable(int op) {
+               int value = bytes[pos++]&0xff;
+               setImmutable(value > 0);
+       }
+
+       private void processUndoValue(int op) {
+               Bytes.readLE4(bytes, pos);
+               pos+=4;
+       }
+
+       public void process() {
+               
+               while(pos < len) {
+               
+                       int op = bytes[pos++]&0xff;
+                       
+                       switch(op) {
+                       
+                       case ClusterChange2.SET_IMMUTABLE_OPERATION:
+                               processSetImmutable(op);
+                               break;
+                       case ClusterChange2.UNDO_VALUE_OPERATION:
+                               processUndoValue(op);
+                               break;
+                       default:
+                               throw new IllegalStateException();
+                               
+                       }
+                       
+               }
+               
+       }
+       
+       abstract void setImmutable(boolean value);
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java
new file mode 100644 (file)
index 0000000..d694abe
--- /dev/null
@@ -0,0 +1,19 @@
+package org.simantics.acorn.internal;
+
+
+/**
+ * @author Antti Villberg
+ */
+public final class DebugPolicy {
+
+    public static final boolean  REPORT_RESOURCE_ID_ALLOCATION = false;
+    public static final boolean  REPORT_CLUSTER_ID_ALLOCATION = false;
+    public static final boolean  REPORT_CLUSTER_EVENTS = false;
+    public static final boolean  REPORT_CLUSTER_LOADING = false;
+    public static final boolean  REPORT_CLUSTER_LOADING_STACKS = false;
+    public static final boolean  REPORT_CLUSTER_STREAM = false;
+    public static final boolean  CLUSTER_COLLECTION = false;
+    public static final boolean  LOG_SERVER_EVENTS = false;
+    public static final boolean  SHOW_SERVER_EVENTS = false; // Requires LOG_SERVER_EVENTS to be true.
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java
new file mode 100644 (file)
index 0000000..d545e51
--- /dev/null
@@ -0,0 +1,112 @@
+package org.simantics.acorn.internal;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.lru.ClusterChangeSet;
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.ClusterChangeSet.Entry;
+import org.simantics.acorn.lru.ClusterChangeSet.Type;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.ClusterUID;
+
+public class UndoClusterUpdateProcessor extends ClusterUpdateProcessorBase {
+       
+       public final static boolean DEBUG = false;
+
+       final private ClusterChangeSet ccs;
+       
+       private int oldValuesIndex = 0;
+       private int statementMaskIndex = 0;
+       
+       final public List<Entry> entries = new ArrayList<Entry>();
+       
+       public UndoClusterUpdateProcessor(ClusterManager client, ClusterStreamChunk chunk, ClusterChangeSet ccs) throws DatabaseException {
+               super(client, readOperation(client, chunk, ccs));
+               this.ccs = ccs;
+       }
+       
+       private static byte[] readOperation(ClusterManager manager, ClusterStreamChunk chunk, ClusterChangeSet ccs) {
+               
+//             ClusterStreamChunk chunk;
+//             manager.streamLRU.acquireMutex();
+//             try {
+//                     chunk = ccs.getChunk(manager);
+//             } catch (Throwable t) {
+//                     throw new IllegalStateException(t);
+//             } finally {
+//                     manager.streamLRU.releaseMutex();
+//             }
+//
+//             chunk.acquireMutex();
+//             try {
+//             chunk.ve
+                       chunk.makeResident();
+                       return chunk.getOperation(ccs.chunkOffset);
+//             } catch (Throwable t) {
+//                     throw new IllegalStateException(t);
+//             } finally {
+//                     chunk.releaseMutex();
+//             }
+       }
+       
+       @Override
+       void create() throws DatabaseException {
+       }
+
+       @Override
+       void delete(int ri) throws DatabaseException {
+               
+               byte[] old = ccs.oldValues.get(oldValuesIndex);
+               boolean oldValueEx = ccs.oldValueEx.get(oldValuesIndex) > 0;
+               oldValuesIndex++;
+               
+               if(old != null) {
+                       entries.add(new Entry(ri, oldValueEx, old, null));
+               }
+               
+       }
+
+       @Override
+       void modify(int resourceKey, long offset, int size, byte[] bytes, int pos)
+                       throws DatabaseException {
+               
+       }
+
+       @Override
+       void set(int resourceKey, byte[] bytes, int length)
+                       throws DatabaseException {
+
+               byte[] old = ccs.oldValues.get(oldValuesIndex);
+               boolean oldValueEx = ccs.oldValueEx.get(oldValuesIndex) > 0;
+               oldValuesIndex++;
+
+               entries.add(new Entry(resourceKey, oldValueEx, old, Arrays.copyOf(valueBuffer, length)));
+               
+       }
+
+       @Override
+       void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid)
+                       throws DatabaseException {
+               
+               boolean add = ccs.statementMask.get(statementMaskIndex++) > 0;
+               if(add) {
+                       entries.add(new Entry(Type.ADD, resourceKey, puid, predicateKey & 0xFFF, ouid, objectKey & 0xFFF));
+               }
+               
+       }
+
+       @Override
+       void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid)
+                       throws DatabaseException {
+               
+               boolean remove = ccs.statementMask.get(statementMaskIndex++) > 0;
+               if(remove) {
+                       entries.add(new Entry(Type.REMOVE, resourceKey, puid, predicateKey & 0xFFF, ouid, objectKey & 0xFFF));
+               }
+
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java
new file mode 100644 (file)
index 0000000..8a32ef2
--- /dev/null
@@ -0,0 +1,23 @@
+package org.simantics.acorn.lru;
+
+public class AccessTime {
+
+       private long last = 0;
+       
+       private static AccessTime INSTANCE = new AccessTime();
+       
+       private AccessTime() {
+               
+       }
+       
+       public static AccessTime getInstance() {
+               return INSTANCE;
+       }
+       
+       public synchronized long getAccessTime() {
+               long result = System.nanoTime();
+               last = Math.max(result, last+1);
+               return last;
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java
new file mode 100644 (file)
index 0000000..a2c4899
--- /dev/null
@@ -0,0 +1,160 @@
+package org.simantics.acorn.lru;
+
+import java.io.InputStream;
+
+import org.simantics.db.Session;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.service.ClusterUID;
+
+public class CachingClusterSupport implements ClusterSupport {
+       
+       private ClusterSupport backend;
+       
+       public CachingClusterSupport(ClusterSupport backend) {
+               this.backend = backend;
+       }
+
+       @Override
+       public int createClusterKeyByClusterUID(ClusterUID clusterUID, long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterId(long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterKey(int clusterKey) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int getClusterKeyByClusterUIDOrMake(long id1, long id2) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByResourceKey(int resourceKey) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public long getClusterIdOrCreate(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void addStatement(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void cancelStatement(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void removeStatement(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void cancelValue(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void removeValue(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setValue(Object cluster, long clusterId, byte[] bytes, int length) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void modiValue(Object cluster, long clusterId, long voffset, int length, byte[] bytes, int offset) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setImmutable(Object cluster, boolean immutable) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setDeleted(Object cluster, boolean deleted) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void createResource(Object cluster, short resourceIndex, long clusterId) {
+               backend.createResource(cluster, resourceIndex, clusterId);
+       }
+
+       @Override
+       public void addStatementIndex(Object cluster, int resourceKey, ClusterUID clusterUID, byte op) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setStreamOff(boolean setOff) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public boolean getStreamOff() {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public InputStream getValueStreamEx(int resourceIndex, long clusterId) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId, long voffset, int length) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public long getValueSizeEx(int resourceIndex, long clusterId) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int wait4RequestsLess(int limit) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public Session getSession() {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public IClusterTable getClusterTable() {
+               throw new UnsupportedOperationException();
+       }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java
new file mode 100644 (file)
index 0000000..12351a5
--- /dev/null
@@ -0,0 +1,113 @@
+package org.simantics.acorn.lru;
+
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import org.simantics.db.service.Bytes;
+import org.simantics.utils.datastructures.Pair;
+
+import gnu.trove.list.array.TByteArrayList;
+
+public class ChangeSetInfo extends LRUObject<Long, ChangeSetInfo> {
+
+       private byte[] metadataBytes;
+       private ArrayList<String> clusterChangeSetIds;
+       
+       // Stub
+       public ChangeSetInfo(LRU<Long, ChangeSetInfo> LRU, Path readDir, Long revision, int offset, int length) {
+               super(LRU, revision, readDir, "clusterStream", offset, length, false, false);
+               LRU.map(this);
+       }
+       
+       // New
+       public ChangeSetInfo(LRU<Long, ChangeSetInfo> LRU, Long revision, byte[] bytes, ArrayList<String> clusterChangeSetIds) {
+               super(LRU, revision, LRU.getDirectory(), "clusterStream", true, true);
+               this.metadataBytes = bytes;
+               this.metadataBytes = bytes;
+               this.clusterChangeSetIds = clusterChangeSetIds;
+               LRU.insert(this, accessTime);
+       }
+       
+       public ArrayList<String> getCSSIds() {
+               if(VERIFY) verifyAccess();
+               return clusterChangeSetIds;
+       }
+       
+       public byte[] getMetadataBytes() {
+               
+               if(VERIFY) verifyAccess();
+               
+               makeResident();
+               
+               return metadataBytes;
+               
+       }
+       
+       private static void writeLE(TByteArrayList bytes, int value) {
+               
+               bytes.add( (byte) (value & 0xFF));
+               bytes.add((byte) ((value >>> 8) & 0xFF));
+               bytes.add((byte) ((value >>> 16) & 0xFF));
+               bytes.add((byte) ((value >>> 24) & 0xFF));
+
+       }
+
+       @Override
+       protected Pair<byte[], Integer> toBytes() {
+               
+               TByteArrayList result = new TByteArrayList();
+               writeLE(result, metadataBytes.length);
+               result.add(metadataBytes);
+               writeLE(result, clusterChangeSetIds.size());
+               for(String id : clusterChangeSetIds) {
+                       byte[] bb = id.getBytes();
+                       writeLE(result, bb.length);
+                       result.add(bb);
+               }
+
+               release();
+               
+               byte[] ret = result.toArray();
+               
+               return Pair.make(ret, ret.length);
+               
+       }
+       
+       @Override
+       void release() {
+               clusterChangeSetIds = null;
+               metadataBytes = null;
+       }
+
+       @Override
+       public void fromFile(byte[] data) {
+               
+               clusterChangeSetIds = new ArrayList<String>();
+               
+               int metadataLength = Bytes.readLE4(data, 0);
+               metadataBytes = Arrays.copyOfRange(data, 4, 4+metadataLength);
+               int offset = 4+metadataLength;
+               int numberOfChangeSets = Bytes.readLE4(data, offset);
+               offset += 4;
+               for(int i=0;i<numberOfChangeSets;i++) {
+                       int length = Bytes.readLE4(data, offset);
+                       offset += 4;
+                       String id = new String(Arrays.copyOfRange(data, offset, offset+length));
+                       clusterChangeSetIds.add(id);
+                       offset += length;
+               }
+               
+       }
+
+       @Override
+       String getExtension() {
+               return "cs";
+       }
+       
+       @Override
+       protected boolean overwrite() {
+               return false;
+       }
+
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterChangeSet.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterChangeSet.java
new file mode 100644 (file)
index 0000000..d0f3013
--- /dev/null
@@ -0,0 +1,120 @@
+package org.simantics.acorn.lru;
+
+import java.util.ArrayList;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.internal.Change;
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.service.ClusterUID;
+
+import gnu.trove.list.array.TByteArrayList;
+
+public class ClusterChangeSet {
+
+       public enum Type {
+               ADD,REMOVE,VALUE
+       }
+       
+       public static class Entry {
+               
+               final Type type;
+               final short subject;
+               final short predicate;
+               final short object;
+               final ClusterUID predicateUID;
+               final ClusterUID objectUID;
+               final boolean oldValueEx;
+               final byte[] oldValue;
+               final byte[] newValue;
+               
+               public Entry(Type type, int subject, ClusterUID predicateUID, int predicate, ClusterUID objectUID, int object) {
+                       this.type = type;
+                       this.subject = (short)(subject & 0xFFF);
+                       this.predicate = (short)predicate;
+                       this.object = (short)object;
+                       this.predicateUID = predicateUID;
+                       this.objectUID = objectUID;
+                       this.oldValueEx = false;
+                       this.oldValue = null;
+                       this.newValue = null;
+               }
+
+               public Entry(int subject, boolean oldValueEx, byte[] oldValue, byte[] newValue) {
+                       if(oldValue == null && newValue == null)
+                               throw new IllegalStateException();
+                       this.type = Type.VALUE;
+                       this.subject = (short)(subject & 0xFFF);
+                       this.predicate = 0;
+                       this.object = 0;
+                       this.predicateUID = null;
+                       this.objectUID = null;
+                       this.oldValueEx = oldValueEx;
+                       this.oldValue = oldValue;
+                       this.newValue = newValue;
+               }
+               
+               public void process(ClusterManager clusters, ClusterChange cs, int clusterKey) {
+                       
+                       Entry e = this;
+                       
+                       if(e.type == Type.VALUE) {
+                               
+                               if(e.oldValue != null) {
+                                       cs.setValue(e.subject, e.oldValue, e.oldValue.length);
+                               } else {
+                                       Change change = new Change();
+                                       change.addStatementIndex(e.subject, null, ClusterChange.DELETE_OPERATION);
+                                       cs.addChange(change);
+                               }
+                               
+                       } else if(e.type == Type.ADD) {
+                               
+                               int s = ClusterTraits.createResourceKeyNoThrow(clusterKey, e.subject);
+                               int p = clusters.getResourceKey(e.predicateUID, e.predicate);
+                               int o = clusters.getResourceKey(e.objectUID, e.object);
+                               Change change = new Change();
+                               change.addStatementIndex(s, null, ClusterChange.REMOVE_OPERATION);
+                               change.addStatementIndex(p, e.predicateUID, (byte)0);
+                               change.addStatementIndex(o, e.objectUID, (byte)0);
+                               cs.addChange(change);
+                               
+                       } else if(e.type == Type.REMOVE) {
+                               
+                               int s = ClusterTraits.createResourceKeyNoThrow(clusterKey, e.subject);
+                               int p = clusters.getResourceKey(e.predicateUID, e.predicate);
+                               int o = clusters.getResourceKey(e.objectUID, e.object);
+                               Change change = new Change();
+                               change.addStatementIndex(s, null, ClusterChange.ADD_OPERATION);
+                               change.addStatementIndex(p, e.predicateUID, (byte)0);
+                               change.addStatementIndex(o, e.objectUID, (byte)0);
+                               cs.addChange(change);
+                               
+                       }
+                       
+               }
+               
+       }
+       
+       final public String id;
+       final public ClusterUID cuid;
+       public String chunkKey;
+       public int chunkOffset = -1;
+       
+       public TByteArrayList statementMask = new TByteArrayList();
+       public TByteArrayList oldValueEx = new TByteArrayList();
+       public ArrayList<byte[]> oldValues = new ArrayList<byte[]>();
+       
+       public ClusterChangeSet(String id ,ClusterUID cuid) {
+               this.id = id;
+               this.cuid = cuid;
+               String[] ss = id.split("\\.");
+               chunkKey = ss[0];
+               chunkOffset = Integer.parseInt(ss[1]);
+       }
+       
+       public ClusterStreamChunk getChunk(ClusterManager manager) {
+               return manager.streamLRU.get(chunkKey);
+       }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java
new file mode 100644 (file)
index 0000000..1cd5822
--- /dev/null
@@ -0,0 +1,346 @@
+package org.simantics.acorn.lru;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.Persistable;
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.cluster.ClusterSmall;
+import org.simantics.acorn.cluster.ClusterImpl.ClusterTables;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.compressions.CompressionCodec;
+import org.simantics.compressions.Compressions;
+import org.simantics.db.ClusterCreator;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Pair;
+
+public class ClusterInfo extends LRUObject<ClusterUID, ClusterInfo> implements Persistable {
+       
+       final private ClusterManager manager;
+       private ClusterImpl cluster;
+       public int changeSetId;
+       private ClusterUpdateState updateState;
+
+       public static final String COMPRESSION = "LZ4";
+       
+       // Stub
+       public ClusterInfo(ClusterManager manager, LRU<ClusterUID, ClusterInfo> LRU, Path readDirectory, ClusterUID uid, int offset, int length) {
+               super(LRU, uid, readDirectory, uid.toString() + ".cluster", offset, length, false, false);
+               this.manager = manager;
+               this.cluster = null;
+               LRU.map(this);
+       }
+       
+       // New
+       public ClusterInfo(ClusterManager manager, LRU<ClusterUID, ClusterInfo> LRU, ClusterImpl cluster) {
+               super(LRU, cluster.getClusterUID(), LRU.getDirectory(), cluster.getClusterUID().toString() + ".cluster", true, true);
+               this.manager = manager;
+               this.cluster = cluster;
+               LRU.insert(this, accessTime);
+               LRU.swap(getKey());
+       }
+       
+       public <T> T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException {
+               
+               // Updates have been ensured at this point
+                       
+               acquireMutex();
+
+               try {
+                       if(isResident()) {
+                               ClusterTables tables = cluster.store();
+                               return creator.create(uid, tables.bytes, tables.ints, tables.longs); 
+                       }
+               } catch (IOException e) {
+                       throw new DatabaseException(e);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+
+               // Ensure pending updates here - this may take some time
+               LRU.waitPending(this, false);
+
+               acquireMutex();
+               try {
+
+                       if(isResident()) {
+                               ClusterTables tables = cluster.store();
+                               return creator.create(uid, tables.bytes, tables.ints, tables.longs); 
+                       } else {
+                               byte[] data = readFile();
+                               ClusterTables tables = new ClusterTables();
+                               loadCluster(getKey(), manager.support, data, tables);
+                               return creator.create(uid, tables.bytes, tables.ints, tables.longs);
+                       }
+
+               } catch (IOException e) {
+                       throw new DatabaseException(e);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+               
+       }
+       
+       static class ClusterDecompressor {
+               
+               byte[] decompressBuffer = new byte[1024*1024];
+
+               public synchronized ClusterTables readCluster(ClusterUID uid, byte[] compressed) throws IOException {
+                       
+                       int deflatedSize = Bytes.readLE4(compressed, compressed.length-4);
+                       
+                       if(decompressBuffer.length < deflatedSize)
+                               decompressBuffer = new byte[Math.max(3*decompressBuffer.length / 2, deflatedSize)];
+                       
+                       CompressionCodec codec = Compressions.get(Compressions.LZ4);
+                       
+                       ByteBuffer input = ByteBuffer.wrap(compressed);
+                       ByteBuffer output = ByteBuffer.wrap(decompressBuffer);
+
+                       int decompressedSize = codec.decompressBuffer(input, 0, compressed.length-4, output, 0, decompressBuffer.length);
+                       assert(decompressedSize <= decompressBuffer.length);
+                       
+                       int byteLength = Bytes.readLE4(decompressBuffer, 0);
+                       int intLength = Bytes.readLE4(decompressBuffer, 4);
+                       int longLength = Bytes.readLE4(decompressBuffer, 8);
+                       
+                       byte[] bytes = new byte[byteLength];
+                       int[] ints = new int[intLength];
+                       long[] longs = new long[longLength];
+                       
+                       System.arraycopy(decompressBuffer, 12, bytes, 0, byteLength);
+                       
+                       int offset = 12+byteLength;
+                       for(int i=0;i<intLength;i++,offset+=4)
+                               ints[i] = Bytes.readLE4(decompressBuffer, offset);
+                       for(int i=0;i<longLength;i++,offset+=8)
+                               longs[i] = Bytes.readLE8(decompressBuffer, offset);
+                       
+                       ClusterTables result = new ClusterTables();
+                       result.bytes = bytes;
+                       result.ints = ints;
+                       result.longs = longs;
+                       return result;
+                       
+               }
+
+               
+       }
+       
+       private static ClusterDecompressor decompressor = new ClusterDecompressor();
+       
+       public void loadCluster(ClusterUID uid, ClusterSupport2 support, byte[] data, ClusterTables tables) throws IOException {
+
+               ClusterTables ts = decompressor.readCluster(uid, data);
+               tables.bytes = ts.bytes;
+               tables.ints = ts.ints;
+               tables.longs = ts.longs;
+
+       }
+       
+       public ClusterImpl loadCluster(ClusterUID uid, ClusterSupport2 support, byte[] data) throws IOException {
+               
+               ClusterTables tables = decompressor.readCluster(uid, data);
+               try {
+                       return ClusterImpl.make(support, tables.longs, tables.ints, tables.bytes, support, support.getClusterKeyByClusterUIDOrMake(uid));
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+                       return null;
+               }
+               
+       }
+       
+       @Override
+       public void fromFile(byte[] data) {
+               
+               try {
+                       cluster = loadCluster(getKey(), manager.support, data);
+               } catch (IOException e) {
+                       e.printStackTrace();
+               }
+               
+       }
+
+       @Override
+       protected Pair<byte[],Integer> toBytes() {
+               
+               try {
+
+                       byte[] raw = null;
+                       
+                       if(cluster instanceof ClusterSmall) {
+                               raw = cluster.storeBytes();
+                       } else {
+                       
+                               ClusterTables tables = cluster.store();
+                               
+                               raw = new byte[12 + tables.bytes.length + (tables.ints.length<<2) + (tables.longs.length<<3)]; 
+       
+                               Bytes.writeLE(raw, 0, tables.bytes.length);
+                               Bytes.writeLE(raw, 4, tables.ints.length);
+                               Bytes.writeLE(raw, 8, tables.longs.length);
+       
+                               System.arraycopy(tables.bytes, 0, raw, 12, tables.bytes.length);
+                               int offset = 12+tables.bytes.length;
+                               for(int i=0;i<tables.ints.length;i++,offset+=4)
+                                       Bytes.writeLE(raw, offset, tables.ints[i]);
+                               for(int i=0;i<tables.longs.length;i++,offset+=8)
+                                       Bytes.writeLE8(raw, offset, tables.longs[i]);
+                               
+                       }
+
+                       byte[] result = new byte[raw.length + raw.length/8];
+                       
+                       CompressionCodec codec = Compressions.get(Compressions.LZ4);
+                       ByteBuffer input = ByteBuffer.wrap(raw);
+                       ByteBuffer output = ByteBuffer.wrap(result);
+                       //ByteBuffer output = ByteBuffer.allocate(raw.length + raw.length/8);
+                       int compressedSize = codec.compressBuffer(input, 0, raw.length, output, 0);
+
+                       // We append inflated size - cannot prepend since decompression cannot handle offsets in input
+//                     final byte[] rawOutput = new byte[compressedSize+4];
+//                     output.get(rawOutput,0,compressedSize);
+//                     Bytes.writeLE(rawOutput, compressedSize, raw.length);
+//                     return Pair.make(rawOutput, rawOutput.length);
+
+                       Bytes.writeLE(result, compressedSize, raw.length);
+                       return Pair.make(result, compressedSize+4);
+
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       release();
+               }
+               
+       }
+       
+       @Override
+       void release() {
+               cluster = null;
+       }
+
+
+       @Override
+       String getExtension() {
+               return "cluster";
+       }
+       
+       public void scheduleUpdate() {
+
+               if(VERIFY) verifyAccess();
+
+               if(updateState == null)
+                       updateState = new ClusterUpdateState(getKey());
+               updateState.incRef();
+
+       }
+       
+       public ClusterImpl getForUpdate() {
+
+               try {
+                       
+                       acquireMutex();
+
+                       assert(updateState != null);
+                       
+                       makeResident();
+                       setDirty(true);
+                       updateState.beginUpdate();
+                       return cluster;
+                       
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       
+                       releaseMutex();
+                       
+               }
+
+       }
+       
+       public void update(ClusterImpl clu) {
+               
+               if(VERIFY) verifyAccess();
+               
+               makeResident();
+               
+               cluster = clu;
+               updateState.endUpdate();
+               updateState = updateState.decRef();
+
+       }
+       
+       public ClusterImpl getCluster() {
+
+               if(VERIFY) verifyAccess();
+
+               makeResident();
+               
+               return cluster;
+               
+       }
+       
+       @Override
+       public boolean canBePersisted() {
+               
+               if(VERIFY) verifyAccess();
+
+               ClusterUpdateState state = getUpdateState();
+               if(state != null) return false;
+               
+               if(!super.canBePersisted()) return false;
+               if(updateState == null) return true;
+               else return !updateState.inUpdate;
+               
+       }
+
+       private ClusterUpdateState getUpdateState() {
+               
+               if(VERIFY) verifyAccess();
+               
+               return updateState;
+               
+       }
+
+       private ClusterUpdateState getUpdateStateWithoutMutex() {
+               
+               try {
+                       acquireMutex();
+                       return getUpdateState();
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+               
+       }
+
+       /*
+        * This method blocks - no locks here
+        */
+       public void waitForUpdates() {
+               
+               ClusterUpdateState state = getUpdateStateWithoutMutex();
+               if(state != null) {
+                       long start = System.nanoTime();
+                       state.waitForUpdates();
+                       long duration = System.nanoTime() - start;
+                       System.err.println("Wait updates to cluster " + getKey() + " for " + (1e-6 * duration) + "ms.");
+               }
+               
+       }
+       
+       @Override
+       protected boolean overwrite() {
+               return true;
+       }
+       
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterLRU.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterLRU.java
new file mode 100644 (file)
index 0000000..1f0db54
--- /dev/null
@@ -0,0 +1,315 @@
+package org.simantics.acorn.lru;
+
+import java.nio.file.Path;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.internal.BijectionMap;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.exception.ClusterDoesNotExistException;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.service.ClusterUID;
+
+import gnu.trove.TIntIntHashMap;
+
+public class ClusterLRU extends LRU<ClusterUID, ClusterInfo> {
+
+       final private BijectionMap<ClusterUID, Integer> clusterMapping = new BijectionMap<ClusterUID, Integer>();
+       final private ClusterManager manager;
+       
+       public ClusterLRU(ClusterManager manager, String identifier, Path writeDir) {
+               
+               super(identifier, writeDir);
+               this.manager = manager;
+               
+               clusterMapping.map(ClusterUID.make(0,2), clusterMapping.size() + 1);
+
+       }
+
+       public ClusterInfo getOrCreate(ClusterUID uid, boolean makeIfNull) {
+               
+               try {
+                       
+                       acquireMutex();
+                       
+                       ClusterInfo info = get(uid);
+
+                       if (info == null) {
+                               
+                               if(!makeIfNull) throw new IllegalStateException("Asked for an existing cluster " + uid + " that was not found.");
+
+                               Integer clusterKey = clusterMapping.getRight(uid);
+                               if (clusterKey == null) {
+                                       clusterKey = clusterMapping.size() + 1;
+                                       clusterMapping.map(uid, clusterKey);
+                               }
+
+                               info = new ClusterInfo(manager, this, ClusterImpl.make(manager.support,
+                                               uid, clusterKey, manager.support));
+
+                       }
+
+                       return info;
+                       
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       
+                       releaseMutex();
+                       
+               }
+               
+       }
+
+       /*
+        * This method waits - we have no locks here
+        */
+       public void ensureUpdates(ClusterUID uid) throws DatabaseException {
+
+               ClusterInfo info = getWithoutMutex(uid);
+               if(info == null)
+                   throw new ClusterDoesNotExistException("Asked a cluster which does not exist: " + uid);
+               info.waitForUpdates();
+               
+       }
+
+       public ClusterInfo get(ClusterUID uid, boolean makeIfNull, boolean ensureUpdates) throws DatabaseException {
+
+               if (ensureUpdates) {
+                   try {
+                       ensureUpdates(uid);
+                   } catch (ClusterDoesNotExistException e) {
+                       if (makeIfNull) {
+                           Logger.defaultLogError("For debug purposes, creating cluster which does not exist", e);
+                       } else {
+                           throw e;
+                       }
+                   }
+               }
+               return getOrCreate(uid, makeIfNull);
+       }
+       
+       public ClusterInfo get(ClusterUID uid, boolean makeIfNull) throws DatabaseException {
+               return get(uid, makeIfNull, true);
+               
+       }
+
+       public int getResourceKey(ClusterUID uid, int index) {
+
+               if(VERIFY) verifyAccess();
+
+               Integer i = clusterMapping.getRight(uid);
+               if (i == null) {
+                       i = clusterMapping.size() + 1;
+                       clusterMapping.map(uid, i);
+               }
+               return (i << 12) + index;
+
+       }
+
+       public int getResourceKeyWithoutMutex(ClusterUID uid, int index) {
+               
+               acquireMutex();
+               try {
+                       return getResourceKey(uid, index);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+               
+       }
+
+       public int createClusterKeyByClusterUID(ClusterUID uid) {
+               
+               if(VERIFY) verifyAccess();
+               
+               Integer i = clusterMapping.getRight(uid);
+               if (i == null) {
+                       i = clusterMapping.size() + 1;
+                       clusterMapping.map(uid, i);
+               }
+               return i;
+               
+       }
+
+       public ClusterBase getClusterByClusterUIDOrMake(ClusterUID uid) throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+               
+               int key = createClusterKeyByClusterUID(uid);
+               return getClusterByClusterKey(key);
+               
+       }
+
+       public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+
+               if(VERIFY) verifyAccess();
+
+               return createClusterKeyByClusterUID(clusterUID);
+                       
+       }
+       
+       public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) {
+               acquireMutex();
+               try {
+                       return getClusterKeyByClusterUIDOrMake(clusterUID);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+       }
+
+       public ClusterBase getClusterByClusterKey(int clusterKey) throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+               
+               ClusterUID uid = clusterMapping.getLeft(clusterKey);
+               ClusterInfo info = get(uid, true);
+               info.acquireMutex();
+               try {
+                       return info.getCluster();
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+               
+       }
+
+       public ClusterUID getClusterUIDByResourceKey(int resourceKey)
+                       throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+               
+               int clusterKey = resourceKey >> 12;
+               return clusterMapping.getLeft(clusterKey);
+               
+       }
+       
+       public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws DatabaseException {
+               acquireMutex();
+               try {
+                       return getClusterUIDByResourceKey(resourceKey);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+       }
+
+       @SuppressWarnings("unchecked")
+       public <T extends ClusterI> T getClusterByClusterUIDOrMakeProxy(ClusterUID uid) throws DatabaseException {
+               return (T) getClusterByClusterUIDOrMake(uid);
+       }
+
+       @SuppressWarnings("unchecked")
+       public <T extends ClusterI> T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+
+               return (T) getClusterByClusterKey(resourceKey >> 12);
+               
+       }
+
+       public int getClusterKeyByUID(long id1, long id2) throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+
+               return getClusterKeyByClusterUIDOrMake(ClusterUID.make(id1, id2));
+               
+       }
+       
+       public int getClusterKeyByUIDWithoutMutex(long id1, long id2) throws DatabaseException {
+
+               acquireMutex();
+               try {
+                       return getClusterKeyByClusterUIDOrMake(ClusterUID.make(id1, id2));
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+
+       }
+
+       
+       public static void main(String[] args) throws Exception {
+               
+               long start = System.nanoTime();
+               
+               final TIntIntHashMap map = new TIntIntHashMap(0, 0.9f);
+
+               AtomicInteger counter = new AtomicInteger(0);
+               AtomicBoolean written = new AtomicBoolean(false);
+               
+               //final Semaphore ws = new Semaphore(1);
+               
+               Thread write = new Thread() {
+                       
+                       @Override
+                       public void run() {
+                               try {
+                                       for(int i=0;i<100000000;i++) {
+                                               synchronized(map) {
+//                                             ws.acquire();
+                                               map.put(i, i);
+//                                             ws.release();
+                                               }
+                                               //if((i & 0xfffff) == 0) System.err.println("Write " + i);
+                                               counter.incrementAndGet();
+                                       }
+                                       written.set(true);
+                               } catch (Throwable e) {
+                                       e.printStackTrace();
+                               }
+                       }
+                       
+               };
+               write.start();
+               
+               Thread read = new Thread() {
+                       
+                       @Override
+                       public void run() {
+                               try {
+                                       while(!written.get()) {
+                                               double r = Math.random();
+                                               double max = counter.get();
+                                               int key = (int)(max*r);
+                                               int value = map.get(key);
+                                               if(key != value) {
+                                                       //System.err.println("Read failed " + key + " vs. " + value);
+                                                       //ws.acquire();
+                                                       synchronized(map) {
+                                                               value = map.get(key);
+                                                               if(key != value) {
+                                                                       System.err.println("Read failed for real " + key + " vs. " + value);
+                                                               }
+                                                               //ws.release();
+                                                       }
+                                               }
+                                               //if((key & 0xfffff) == 0) System.err.println("Read " + key);
+                                       }
+                               } catch (Throwable e) {
+                                       e.printStackTrace();
+                               }
+                       }
+                       
+               };
+               read.start();
+               
+               write.join();
+               read.join();
+               
+               long duration = System.nanoTime() - start;
+               System.err.println("took " + 1e-9*duration + "s.");
+               
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java
new file mode 100644 (file)
index 0000000..23cbfb1
--- /dev/null
@@ -0,0 +1,302 @@
+package org.simantics.acorn.lru;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+import java.util.ArrayList;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.Persistable;
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.UndoClusterUpdateProcessor;
+import org.simantics.compressions.CompressionCodec;
+import org.simantics.compressions.Compressions;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.Bytes;
+import org.simantics.utils.datastructures.Pair;
+
+import gnu.trove.list.array.TByteArrayList;
+
+public class ClusterStreamChunk extends LRUObject<String, ClusterStreamChunk> implements Persistable {
+
+       // 500KB is a fine chunk
+       private static int MAX_CHUNK_SIZE = 500*1024;
+
+       int size = 0;
+       final private ClusterManager manager;
+       private boolean committed = false;
+       
+       public int nextToProcess = 0;
+       
+       public ArrayList<ClusterUpdateOperation> operations = new ArrayList<ClusterUpdateOperation>();
+       
+       // Stub
+       public ClusterStreamChunk(ClusterManager manager, LRU<String, ClusterStreamChunk> LRU, Path readDir, String id, int offset, int length) {
+               super(LRU, id, readDir, "clusterStream", offset, length, false, false);
+               this.manager = manager;
+               LRU.map(this);
+       }
+       
+       // Creation
+       public ClusterStreamChunk(ClusterManager manager, LRU<String, ClusterStreamChunk> LRU, String id) {
+               super(LRU, id, LRU.getDirectory(), "clusterStream", true, true);
+               this.manager = manager;
+               LRU.insert(this, accessTime);
+       }
+       
+       public UndoClusterUpdateProcessor getUndoProcessor(ClusterManager clusters, int chunkOffset, String ccsId) throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+               
+               makeResident(true);
+
+               ClusterUpdateOperation op = operations.get(chunkOffset);
+               if(op == null) throw new IllegalStateException("Cluster Update Operation " + ccsId + " was not found.");
+               if(op.ccs == null) throw new IllegalStateException("Cluster ChangeSet " + ccsId + " was not found.");
+
+               UndoClusterUpdateProcessor proc = new UndoClusterUpdateProcessor(clusters, this, op.ccs);
+               if(proc.version != ClusterChange.VERSION)
+                       return null;
+
+               // This cluster and CCS can still be under preparation => wait
+               clusters.clusterLRU.ensureUpdates(proc.getClusterUID());
+
+               proc.process();
+
+               cancelForceResident();
+               
+               return proc;
+               
+       }
+       
+       public void addOperation(ClusterUpdateOperation op) {
+               if(committed)
+                       throw new IllegalStateException();
+               operations.add(op);
+               size += op.data.length;
+//             if(isCommitted()) {
+//                     LRU.refresh(this);
+//             }
+       }
+       
+       public byte[] getOperation(int index) {
+               return operations.get(index).data;
+       }
+       
+       public void commit() {
+               committed = true;
+       }
+       
+       public boolean isCommitted() {
+               if(size > MAX_CHUNK_SIZE) committed = true;
+               return committed;
+       }
+       
+       @Override
+       public boolean canBePersisted() {
+               if(!super.canBePersisted()) return false;
+               if(!isCommitted()) return false;
+               for(ClusterUpdateOperation op : operations) {
+                       if(!op.finished) return false;
+               }
+               return true;
+       }
+
+       private static void writeLE(TByteArrayList bytes, int value) {
+               
+               bytes.add( (byte) (value & 0xFF));
+               bytes.add((byte) ((value >>> 8) & 0xFF));
+               bytes.add((byte) ((value >>> 16) & 0xFF));
+               bytes.add((byte) ((value >>> 24) & 0xFF));
+
+       }
+
+       final public static void writeLE8(TByteArrayList bytes, long value) {
+               
+               bytes.add( (byte) (value & 0xFF));
+               bytes.add((byte) ((value >>> 8) & 0xFF));
+               bytes.add((byte) ((value >>> 16) & 0xFF));
+               bytes.add((byte) ((value >>> 24) & 0xFF));
+               bytes.add((byte) ((value >>> 32) & 0xFF));
+               bytes.add((byte) ((value >>> 40) & 0xFF));
+               bytes.add((byte) ((value >>> 48) & 0xFF));
+               bytes.add((byte) ((value >>> 56) & 0xFF));
+               
+       }
+       
+       @Override
+       protected Pair<byte[], Integer> toBytes() {
+               
+               assert(isCommitted());
+               
+               TByteArrayList raw = new TByteArrayList();
+               
+               writeLE(raw, operations.size());
+               
+               for(ClusterUpdateOperation op : operations) {
+                       
+                       writeLE(raw, op.data.length);
+                       raw.add(op.data);
+                       op.data = null;
+                       
+                       writeLE(raw, op.ccs.statementMask.size());
+                       raw.add(op.ccs.statementMask.toArray());
+                       writeLE(raw, op.ccs.oldValueEx.size());
+                       raw.add(op.ccs.oldValueEx.toArray());
+                       writeLE(raw, op.ccs.oldValues.size());
+                       
+                       for(byte[] oldValue : op.ccs.oldValues) {
+                               int len = (oldValue != null ? oldValue.length : -1);
+                               writeLE(raw, len);
+                               if(oldValue != null) {
+                                       raw.add(oldValue);
+                               }
+                       }
+                       
+               }
+               
+               byte[] raw_ = raw.toArray();
+               CompressionCodec codec = Compressions.get(Compressions.LZ4);
+               ByteBuffer input = ByteBuffer.wrap(raw_);
+               ByteBuffer output = ByteBuffer.allocate(raw_.length + raw_.length/8);
+               int compressedSize = codec.compressBuffer(input, 0, raw_.length, output, 0);
+
+               // We append inflated size - cannot prepend since decompression cannot handle offsets in input
+               final byte[] rawOutput = new byte[compressedSize+4];
+               output.get(rawOutput,0,compressedSize);
+               Bytes.writeLE(rawOutput, compressedSize, raw_.length);
+
+               release();
+               
+               return Pair.make(rawOutput, rawOutput.length);
+               
+       }
+       
+       @Override
+       void release() {
+               
+               for(ClusterUpdateOperation op : operations) {
+                       op.data = null;
+                       op.ccs = null;
+               }
+
+       }
+
+       static class StreamDecompressor {
+               
+//             byte[] decompressBuffer = new byte[1024*1024];
+
+               public synchronized byte[] decompressBuffer(byte[] compressed) throws IOException {
+                       
+                       int deflatedSize = Bytes.readLE4(compressed, compressed.length-4);
+                       
+                       byte[] result = new byte[deflatedSize];
+                       
+//                     if(decompressBuffer.length < deflatedSize)
+//                             decompressBuffer = new byte[Math.max(3*decompressBuffer.length / 2, deflatedSize)];
+                       
+                       CompressionCodec codec = Compressions.get(Compressions.LZ4);
+                       
+                       ByteBuffer input = ByteBuffer.wrap(compressed);
+                       ByteBuffer output = ByteBuffer.wrap(result);
+
+                       int decompressedSize = codec.decompressBuffer(input, 0, compressed.length-4, output, 0, result.length);
+                       assert(decompressedSize  == deflatedSize);
+                       
+                       return result;
+                       
+               }
+
+               
+       }
+       
+       private static StreamDecompressor decompressor = new StreamDecompressor();
+       
+       @Override
+       public void fromFile(byte[] data_) {
+
+               try {
+                       
+                       byte[] data = decompressor.decompressBuffer(data_);
+                       
+                       operations = new ArrayList<ClusterUpdateOperation>();
+                       
+                       int offset = 0;
+                       int opLen = Bytes.readLE4(data, offset);
+                       offset += 4;
+                       
+                       for(int i=0;i<opLen;i++) {
+                               
+                               int len = Bytes.readLE4(data, offset);
+                               offset += 4;
+                               byte[] bytes  = new byte[len];
+                               System.arraycopy(data, offset, bytes, 0, len);
+                               offset += len;
+                               
+                               ClusterUpdateOperation op = new ClusterUpdateOperation(manager, bytes);
+                               
+                               String ccsKey = getKey() + "." + i;
+                               
+                               op.ccs = new ClusterChangeSet(ccsKey, op.uid);
+                               op.chunk = this;
+                               
+                               int statementMaskLen = Bytes.readLE4(data, offset);
+                               offset += 4;
+                               op.ccs.statementMask = new TByteArrayList(statementMaskLen);
+                               for(int j=0;j<statementMaskLen;j++)
+                                       op.ccs.statementMask.add(data[offset++]);
+                               
+                               int oldValueExLen = Bytes.readLE4(data, offset);
+                               offset += 4;
+                               op.ccs.oldValueEx = new TByteArrayList(oldValueExLen);
+                               for(int j=0;j<oldValueExLen;j++)
+                                       op.ccs.oldValueEx.add(data[offset++]);
+                               
+                               int oldValuesSize = Bytes.readLE4(data, offset);
+                               offset += 4;
+                               
+                               op.ccs.oldValues = new ArrayList<>(oldValuesSize);
+                               for(int j=0;j<oldValuesSize;j++) {
+
+                                       int oldValueSize = Bytes.readLE4(data, offset);
+                                       offset += 4;
+
+                                       if(oldValueSize == -1) {
+                                               op.ccs.oldValues.add(null);
+                                       } else {
+                                               byte[] oldValue = new byte[oldValueSize];
+                                               System.arraycopy(data, offset, oldValue, 0, oldValueSize);
+                                               offset += oldValueSize;
+                                               op.ccs.oldValues.add(oldValue);
+                                       }
+
+                               }
+                               
+                               operations.add(op);
+                               
+                       }
+                       
+               } catch (IOException e) {
+                       
+                       throw new IllegalStateException(e);
+                       
+               }
+               
+       }
+
+       @Override
+       String getExtension() {
+               return "stream";
+       }
+       
+       @Override
+       public String toString() {
+               return  "ClusterUpdateOperationChunk " + getKey();
+       }
+       
+       @Override
+       protected boolean overwrite() {
+               return false;
+       }
+       
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateOperation.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateOperation.java
new file mode 100644 (file)
index 0000000..40a44bc
--- /dev/null
@@ -0,0 +1,91 @@
+package org.simantics.acorn.lru;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterChange2;
+import org.simantics.acorn.internal.ClusterUpdateProcessor;
+import org.simantics.acorn.internal.ClusterUpdateProcessor2;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+
+final public class ClusterUpdateOperation {
+       
+       final public ClusterUID uid;
+       final protected ClusterManager manager;
+       final protected ClusterInfo info;
+
+       public byte[] data;
+       
+       public ClusterStreamChunk chunk;
+       public ClusterChangeSet ccs;
+       boolean finished = false;
+
+       public ClusterUpdateOperation(ClusterManager manager, byte[] data) {
+               
+               long cuid1 = Bytes.readLE8(data, 8);
+               long cuid2 = Bytes.readLE8(data, 16);
+
+               this.manager = manager;
+               this.uid = ClusterUID.make(cuid1, cuid2);
+               this.data = data;
+               this.info = manager.clusterLRU.getOrCreate(uid, true);
+                               
+       }
+       
+       public void finish() {
+               finished = true;
+       }
+       
+       public void scheduled(String ccsInfoId) {
+               ccs = new ClusterChangeSet(ccsInfoId, uid);
+               chunk = ccs.getChunk(manager);
+               manager.addIntoCurrentChangeSet(ccsInfoId);
+       }
+       
+       public void run() {
+               ClusterUpdateOperation op = null;
+               byte[] data = null;
+               chunk.acquireMutex();
+               try {
+                       chunk.makeResident();
+                       op = chunk.operations.get(ccs.chunkOffset);
+                       data = op.data;
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       chunk.releaseMutex();
+               }
+               op.runWithData(data);
+       }
+       
+       public void runWithData(byte[] data) {
+               
+               try {
+                       int version = Bytes.readLE4(data, 4);
+                       if(version == ClusterChange.VERSION) {
+                               ClusterUpdateProcessor processor = new ClusterUpdateProcessor(manager, manager.support, data, this);
+                               ClusterImpl cluster = info.getForUpdate();
+                               cluster = processor.process(cluster);
+                               manager.update(uid, cluster);
+                       } else if (version == ClusterChange2.VERSION) {
+                               ClusterUpdateProcessor2 processor = new ClusterUpdateProcessor2(manager.support, data, this);
+                               ClusterImpl cluster = info.getForUpdate();
+                               processor.process(cluster);
+                               manager.update(uid, cluster);
+                       } else {
+                               throw new IllegalStateException();
+                       }
+               } catch (Throwable t) {
+                       t.printStackTrace();
+               }
+               
+       }
+       
+       @Override
+       public String toString() {
+           StringBuilder sb = new StringBuilder();
+           sb.append("ClusterUpdateOperation [uid=").append(uid).append("] [info=").append(info).append("] [ccs=").append(ccs).append("] [chunk=").append("]");
+           return sb.toString();
+       }
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateState.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateState.java
new file mode 100644 (file)
index 0000000..5ce4688
--- /dev/null
@@ -0,0 +1,51 @@
+package org.simantics.acorn.lru;
+
+import java.util.concurrent.Semaphore;
+
+import org.simantics.db.service.ClusterUID;
+
+public class ClusterUpdateState {
+       
+       final ClusterUID uid;
+       final Semaphore lock = new Semaphore(0);
+       int referenceCount = 0;
+       boolean inUpdate = false;
+       
+       ClusterUpdateState(ClusterUID uid) {
+               this.uid = uid;
+       }
+       
+       public void waitForUpdates() {
+               try {
+                       lock.acquire();
+               } catch (InterruptedException e) {
+                       e.printStackTrace();
+               }
+       }
+       
+       public synchronized void beginUpdate() {
+//         System.err.println("ClusterUpdateState.beginUpdate() for " + uid + ", inUpdate=" + inUpdate);
+               assert(!inUpdate);
+               inUpdate = true;
+       }
+       
+       public synchronized void endUpdate() {
+//         System.err.println("ClusterUpdateState.endUpdate() for " + uid + ", inUpdate=" + inUpdate);
+               assert(inUpdate);
+               inUpdate = false;
+       }
+       
+       public synchronized void incRef() {
+               referenceCount++;
+       }
+       
+       public synchronized ClusterUpdateState decRef() {
+               referenceCount--;
+               if(referenceCount == 0) {
+                       lock.release(Integer.MAX_VALUE);
+                       return null;
+               }
+               return this;
+       }
+       
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/FileInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/FileInfo.java
new file mode 100644 (file)
index 0000000..660f245
--- /dev/null
@@ -0,0 +1,139 @@
+package org.simantics.acorn.lru;
+
+import java.nio.file.Path;
+
+import org.simantics.db.Database.Session.ResourceSegment;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.utils.datastructures.Pair;
+
+import gnu.trove.list.array.TByteArrayList;
+
+public class FileInfo extends LRUObject<String, FileInfo> {
+
+       private TByteArrayList bytes;
+       
+       // Stub
+       public FileInfo(LRU<String, FileInfo> LRU, Path readDir, String id, int offset, int length) {
+               super(LRU, id, readDir, id.toString() + ".extFile", offset, length, false, false);
+               LRU.map(this);
+       }
+       
+       // New
+       public FileInfo(LRU<String, FileInfo> LRU, String id, int size) {
+               super(LRU, id, LRU.getDirectory(), id.toString() + ".extFile", true, true);
+               this.bytes = new TByteArrayList(size);
+               LRU.insert(this, accessTime);
+       }
+
+       public byte[] getResourceFile() {
+               
+               if(VERIFY) verifyAccess();
+               
+               makeResident();
+               return bytes.toArray();
+
+       }
+       
+       
+       public ResourceSegment getResourceSegment(final byte[] clusterUID,
+                       final int resourceIndex, final long segmentOffset, short segmentSize)
+                       throws ProCoreException {
+
+               if(VERIFY) verifyAccess();
+
+               makeResident();
+
+               try {
+
+                       int segSize = segmentSize;
+                       if (segSize < 0)
+                               segSize += 65536;
+                       if (segmentSize == -1)
+                               segSize = Math.min(65535, bytes.size());
+
+                       final long valueSize = bytes.size();
+
+                       final byte[] segment = bytes.toArray((int) segmentOffset, segSize);
+
+                       return new ResourceSegment() {
+
+                               @Override
+                               public long getValueSize() {
+                                       return valueSize;
+                               }
+
+                               @Override
+                               public byte[] getSegment() {
+                                       return segment;
+                               }
+
+                               @Override
+                               public int getResourceIndex() {
+                                       return resourceIndex;
+                               }
+
+                               @Override
+                               public long getOffset() {
+                                       return segmentOffset;
+                               }
+
+                               @Override
+                               public byte[] getClusterId() {
+                                       return clusterUID;
+                               }
+                       };
+
+               } catch (Throwable t) {
+
+                       t.printStackTrace();
+
+               }
+
+               throw new UnsupportedOperationException();
+
+       }
+       
+       public void updateData(byte[] newBytes, long offset, long pos, long size) {
+
+               if(VERIFY) verifyAccess();
+               makeResident();
+
+               if(size == 0) {
+                       bytes.remove((int)offset, (int)(bytes.size()-offset));
+               } else {
+                       bytes.fill((int) (offset + size), (int) (offset + size), (byte) 0);
+                       bytes.set((int) offset, newBytes, (int) pos, (int) size);
+               }
+               
+               setDirty();
+               
+       }
+       
+       @Override
+       public Pair<byte[], Integer> toBytes() {
+               byte[] result = bytes.toArray();
+               release();
+               return Pair.make(result, result.length);
+       }
+       
+       @Override
+       protected void release() {
+               bytes = null;
+       }
+
+       @Override
+       public void fromFile(byte[] data) {
+               bytes = new TByteArrayList(data);
+       }
+
+       @Override
+       protected String getExtension() {
+               return "extFile";
+       }
+       
+       @Override
+       protected boolean overwrite() {
+               return true;
+       }
+
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java
new file mode 100644 (file)
index 0000000..508127d
--- /dev/null
@@ -0,0 +1,624 @@
+package org.simantics.acorn.lru;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import org.simantics.acorn.GraphClientImpl2;
+import org.simantics.db.common.utils.Logger;
+
+/*
+ * The order rule of synchronization for LRU and LRUObject is:
+ *   Â§ Always lock LRUObject first!
+ * 
+ */
+
+public class LRU<MapKey,MapValue extends LRUObject<MapKey, MapValue>> {
+       
+       public static boolean VERIFY = true;
+
+       final private long swapTime = 5L*1000000000L;
+       final private int swapSize = 200;
+
+       final private HashMap<MapKey, MapValue> map = new HashMap<MapKey, MapValue>();
+       final private TreeMap<Long, MapKey> priorityQueue = new TreeMap<Long, MapKey>();
+       
+       final private Semaphore mutex = new Semaphore(1);
+       final private String identifier;
+       
+       private Path writeDir;
+       
+       private Thread mutexOwner;
+       
+       public Map<String, WriteRunnable> pending = new HashMap<String, WriteRunnable>();
+       
+       public LRU(String identifier, Path writeDir) {
+               this.identifier = identifier;
+               this.writeDir = writeDir;
+               resume();
+       }
+       
+       /*
+        * Public interface
+        */
+       
+       public void acquireMutex() {
+               
+               try {
+                       
+                       while(!mutex.tryAcquire(3, TimeUnit.SECONDS)) {
+                               System.err.println("Mutex is taking a long time to acquire - owner is " + mutexOwner);
+                       }
+                               
+                       if(VERIFY)
+                               mutexOwner = Thread.currentThread();
+                               
+               } catch (InterruptedException e) {
+                       throw new IllegalStateException(e);
+               }
+       }
+       
+       public void releaseMutex() {
+               mutex.release();
+               mutexOwner = null;
+       }
+
+       public void shutdown() {
+           if (GraphClientImpl2.DEBUG)
+               System.err.println("Shutting down LRU writers " + writers);
+               writers.shutdown();
+               try {
+                       writers.awaitTermination(60, TimeUnit.SECONDS);
+               } catch (InterruptedException e) {
+                       e.printStackTrace();
+               }
+
+       }
+       
+       public void resume() {
+               writers = new ScheduledThreadPoolExecutor(2, new ThreadFactory() {
+                       
+                       @Override
+                       public Thread newThread(Runnable r) {
+                               return new Thread(r, identifier + " File Writer");
+                       }
+                       
+               });
+               if (GraphClientImpl2.DEBUG)
+                   System.err.println("Resuming LRU writers " + writers);
+       }
+
+       /*
+        * This method violates the synchronization order rule between LRU and MapVAlue
+        * External synchronization is used to ensure correct operation
+        */
+       public void persist(ArrayList<String> state) {
+       
+               acquireMutex();
+               
+               try {
+               
+                       for (MapValue value : values()) {
+                               value.acquireMutex();
+                               // for debugging purposes
+                               boolean persisted = false;
+                               try {
+                                       // Persist the value if needed
+                                       persisted = value.persist();
+                               } finally {
+                                       // WriteRunnable may want to 
+                                       value.releaseMutex();
+                               }
+                               // Wait pending if value was actually persisted 
+                               waitPending(value, false);
+                               // Take lock again
+                               value.acquireMutex();
+                               try {
+                                       // Record the value
+                                       state.add(value.getStateKey());
+                               } catch (Throwable t) {
+                                       throw new IllegalStateException(t);
+                               } finally {
+                                       value.releaseMutex();
+                               }
+                       }
+                       
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+
+       }
+
+       public MapValue getWithoutMutex(MapKey key) {
+               
+               acquireMutex();
+               try {
+                       return get(key);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+               
+       }
+
+       public MapValue get(MapKey key) {
+               
+               if(VERIFY) verifyAccess();
+               
+               return map.get(key);
+               
+       }
+
+       public void map(MapValue info) {
+               
+               if(VERIFY) verifyAccess();
+               
+               map.put(info.getKey(), info);
+               
+       }
+
+       public Collection<MapValue> values() {
+               
+               if(VERIFY) verifyAccess();
+               
+               return map.values();
+               
+       }
+
+       public boolean swapForced() {
+               
+               acquireMutex();
+               
+               try {
+                       return swap(0, 0, null);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+               
+       }
+
+       public boolean swap(long lifeTime, int targetSize) {
+               
+               if(VERIFY) verifyAccess();
+
+               return swap(lifeTime, targetSize, null);
+               
+       }
+
+       /*
+        * This is called under global lock
+        */
+       public void setWriteDir(Path dir) {
+               
+               this.writeDir = dir;
+               
+       }
+
+
+       /*
+        * Package access
+        */
+
+       void insert(MapValue info, long accessTime) {
+               
+               if(VERIFY) verifyAccess();
+
+               map.put(info.getKey(), info);
+               priorityQueue.put(accessTime, info.getKey());
+               
+       }
+
+       /*
+        * We have access to ClusterLRU - try to refresh value if available
+        */
+       boolean tryRefresh(MapValue info) {
+
+               if(VERIFY) verifyAccess();
+               
+               if(!info.tryAcquireMutex())
+                       return false;
+
+               try {
+
+                       priorityQueue.remove(info.getLastAccessTime());
+                       info.accessed();
+                       map.put(info.getKey(), info);
+                       priorityQueue.put(info.getLastAccessTime(), info.getKey());
+                       
+                       return true;
+
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+
+                       info.releaseMutex();
+                       
+               }
+               
+       }
+
+       /*
+        * We have access to MapValue and no access to clusterLRU
+        */
+       void refresh(MapValue info, boolean needMutex) {
+               
+               if(VERIFY) {
+                       if(!needMutex) verifyAccess();
+                       info.verifyAccess();
+               }
+               
+               if(needMutex)
+                       acquireMutex();
+
+               try {
+
+                       priorityQueue.remove(info.getLastAccessTime());
+                       info.accessed();
+                       map.put(info.getKey(), info);
+                       priorityQueue.put(info.getLastAccessTime(), info.getKey());
+
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+
+                       if(needMutex)
+                               releaseMutex();
+                       
+               }
+               
+       }
+
+       /*
+        * Private implementation
+        */
+
+       public int size() {
+               
+               if(VERIFY) verifyAccess();
+
+               return priorityQueue.size();
+               
+       }
+
+       boolean swap(MapKey excluded) {
+               
+               if(VERIFY) verifyAccess();
+
+               return swap(swapTime, swapSize, excluded);
+               
+       }
+
+       boolean swap(long lifeTime, int targetSize, MapKey excluded) {
+
+               if(VERIFY) verifyAccess();
+
+               MapValue valueToSwap = getValueToSwap(lifeTime, targetSize, excluded);
+               if(valueToSwap != null) {
+                       
+                       if(valueToSwap.tryAcquireMutex()) {
+                               
+                               try {
+                                       
+                                       if(valueToSwap.canBePersisted()) {
+                                               valueToSwap.persist();
+                                               return true;
+                                       }
+                                       
+                               } catch (Throwable t) {
+                                       throw new IllegalStateException(t);
+                               } finally {
+                                       valueToSwap.releaseMutex();
+                               }
+                       }
+                       
+               }
+               
+               return false;
+
+       }
+
+       
+       private MapValue getValueToSwap1(long lifeTime, int targetSize, MapKey excluded) {
+
+               if(VERIFY) verifyAccess();
+
+               for(int i=0;i<10;i++) {
+
+                       long candidate = getSwapCandidate(lifeTime, targetSize);
+                       if(candidate == 0) return null;
+                       
+                       MapKey key = priorityQueue.remove(candidate);
+                       if(key.equals(excluded)) {
+                               tryRefresh(map.get(key));
+                               continue;
+                       }
+                       
+                       return map.get(key);
+                       
+               }
+               
+               return null;
+
+       }
+       
+       
+       private MapValue getValueToSwap(long lifeTime, int targetSize, MapKey excluded) {
+
+               if(VERIFY) verifyAccess();
+
+               for(int i=0;i<10;i++) {
+                       
+                       // Lock LRU and get a candidate
+                       MapValue value = getValueToSwap1(lifeTime, targetSize, excluded);
+                       if(value == null) return null;
+                       
+                       if(value.tryAcquireMutex()) {
+
+                               try {
+                                       
+                                       // This may lock the object
+                                       if(value.canBePersisted()) return value;
+                                       // Insert back the value
+                                       refresh(value, false);
+       
+                               } catch (Throwable t) {
+                                       throw new IllegalStateException(t);
+                               } finally {
+                                       
+                                       value.releaseMutex();
+                                       
+                               }
+                               
+                       }
+                       
+               }
+               
+               return null;
+               
+       }
+       
+       private long getSwapCandidate(long lifeTime, int targetSize) {
+               
+               if(VERIFY) verifyAccess();
+
+               if(priorityQueue.isEmpty()) return 0;
+
+               long currentTime = System.nanoTime();
+               Long lowest = priorityQueue.firstKey();
+
+               if(currentTime - lowest > lifeTime || priorityQueue.size() > targetSize) {
+                       return lowest;
+               }
+
+               return 0;
+
+       }
+       
+       /*
+        * Tries to persist this object. Can fail if the object cannot be persisted at this time.
+        * 
+        */
+       boolean persist(Object object_) {
+               
+               MapValue object = (MapValue)object_;
+               
+               if(VERIFY) object.verifyAccess();
+               
+               if(object.isDirty()) {
+
+                       // It is possible that this just became unpersistable. Fail here in this case.
+                       if(!object.canBePersisted()) {
+                               return false;
+                       }
+
+                       assert(object.isResident());
+
+                       Path f = writeDir.resolve(object.getFileName());
+
+                       WriteRunnable runnable = new WriteRunnable(f, object);
+                       
+                       synchronized(pending) {
+                               WriteRunnable existing = pending.put(object.getKey().toString(), runnable);
+                               assert(existing == null);
+                       }
+
+                       writers.execute(runnable);
+
+                       object.setResident(false);
+                       object.setDirty(false);
+
+                       return true;
+
+               } else if(object.isResident()) {
+
+                       object.release();
+                       object.setResident(false);
+                       return false;
+
+               }
+
+               return false;
+               
+       }
+       
+       int makeResident(Object object_, boolean keepResident) {
+
+               MapValue object = (MapValue)object_;
+
+               if(VERIFY) object.verifyAccess();
+
+               try {
+
+                       object.setForceResident(keepResident);
+
+                       if(object.isResident()) {
+                               refresh(object, true);
+                               return 0;
+                       }
+
+                       waitPending(object, true);
+
+                       byte[] data = object.readFile();
+
+                       object.fromFile(data);
+                       object.setResident(true);
+
+                       acquireMutex();
+                       try {
+                               refresh(object, false);
+                               swap(swapTime, swapSize, object.getKey());
+                       } catch (Throwable t) {
+                               throw new IllegalStateException(t);
+                       } finally {
+                               releaseMutex();
+                       }
+
+                       return data.length;
+
+               } catch (IOException e) {
+                       
+                       e.printStackTrace();
+                       
+               }
+               
+               return 0;
+               
+       }
+
+       static int readCounter = 0;
+       static int writeCounter = 0;
+       
+       ScheduledThreadPoolExecutor writers;
+       
+       void waitPending(MapValue value, boolean hasMutex) {
+               
+               WriteRunnable r = null;
+               boolean inProgress = false;
+               synchronized(pending) {
+                       r = pending.get(value.getKey().toString());
+                       if(r != null) {
+                               synchronized(r) {
+                                       if(r.committed) {
+                                               // just being written - just need to wait
+                                               inProgress = true;
+                                       } else {
+                                               r.committed = true;
+                                               // we do the writing
+                                       }
+                               }
+                       }
+               }
+               if(r != null) {
+                       if(inProgress) {
+//                             System.err.println("reader waits for WriteRunnable to finish");
+                               try {
+                                       r.s.acquire();
+                               } catch (InterruptedException e) {
+                                       e.printStackTrace();
+                               }
+                       } else {
+//                             System.err.println("reader took WriteRunnable");
+                               try {
+                    r.runReally(hasMutex);
+                } catch (Throwable e) {
+                    e.printStackTrace();
+                    Logger.defaultLogError(e);
+                }
+                       }
+               }
+               
+       }
+       
+       public class WriteRunnable implements Runnable {
+
+               Path bytes;
+               MapValue impl;
+               boolean committed = false;
+               private Semaphore s = new Semaphore(0);
+               
+               WriteRunnable(Path bytes, MapValue impl) {
+                       this.bytes = bytes;
+                       this.impl = impl;
+               }
+               
+               @Override
+               public void run() {
+                       synchronized(impl) {
+
+                               synchronized(this) {
+                               
+                                       if(committed) return;
+                                       
+                                       committed = true;
+                               
+                               }
+                   try {
+                       runReally(false);
+                   } catch (Throwable e) {
+                       e.printStackTrace();
+                       Logger.defaultLogError(e);
+                   }
+                       }
+               }
+
+               public void runReally(boolean hasMutex) throws IOException {
+               
+                       if(!hasMutex)
+                               impl.acquireMutex();
+                       
+                       try {
+                               
+                               // These have been set in method persist
+                               assert(!impl.isResident());
+                               assert(!impl.isDirty());
+                               
+                               impl.toFile(bytes);
+
+                               synchronized(pending) {
+                                       pending.remove(impl.getKey().toString());
+                                       s.release(Integer.MAX_VALUE);
+                               }
+                       } finally {
+                               if(!hasMutex)
+                                       impl.releaseMutex();
+                       }
+                       
+               }
+       
+       }
+       
+       public Path getDirectory() {
+               return writeDir;
+       }
+       
+       /*
+        * Protected implementation 
+        * 
+        */
+       
+       protected void verifyAccess() {
+//         assert (mutex.availablePermits() == 0);
+               if (mutex.availablePermits() != 0)
+                   throw new IllegalStateException("identifier=" + identifier + " mutex has " + mutex.availablePermits() + " available permits, should be 0! Current mutexOwner is " + mutexOwner);
+       }
+       
+       /*
+        * Private implementation 
+        * 
+        */
+       
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java
new file mode 100644 (file)
index 0000000..1079bf5
--- /dev/null
@@ -0,0 +1,236 @@
+package org.simantics.acorn.lru;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+import org.simantics.acorn.FileIO;
+import org.simantics.acorn.Persistable;
+import org.simantics.utils.datastructures.Pair;
+
+public abstract class LRUObject<MapKey, MapValue extends LRUObject<MapKey, MapValue>> implements Persistable {
+       
+       public static boolean VERIFY = true;
+       
+       // Final stuff
+       final protected LRU<MapKey, MapValue> LRU;
+       final private Semaphore mutex = new Semaphore(1);
+       final private MapKey key;
+       final private String fileName;
+       
+       // Mutable stuff
+       protected long accessTime = AccessTime.getInstance().getAccessTime();
+       private int offset;
+       private int length;
+       private boolean resident = true;
+       private boolean dirty = true;
+       private boolean forceResident = false;
+       
+       // DEBUG
+//     private boolean isForceResidentSetAfterLastGet = false;
+       
+       private Path readDirectory;
+
+       private Thread mutexOwner;
+
+       // for loading
+       public LRUObject(LRU<MapKey, MapValue> LRU, MapKey key, Path readDirectory, String fileName, int offset, int length, boolean dirty, boolean resident) {
+               this.LRU = LRU;
+               this.key = key;
+               this.fileName = fileName;
+               this.offset = offset;
+               this.length = length;
+               this.readDirectory = readDirectory;
+               this.dirty = dirty;
+               this.resident = resident;
+       }
+
+       // for creating
+       public LRUObject(LRU<MapKey, MapValue> LRU, MapKey key, Path readDirectory, String fileName, boolean dirty, boolean resident) {
+               this(LRU, key, readDirectory, fileName, -1, -1, dirty, resident);
+       }
+
+       /*
+        * Public interface
+        */
+       public MapKey getKey() {
+               // This can be called without mutex
+               return key;
+       }
+       
+       public void acquireMutex() {
+               
+               try {
+
+                       while(!mutex.tryAcquire(3, TimeUnit.SECONDS)) {
+                               System.err.println("Mutex is taking a long time to acquire - owner is " + mutexOwner);
+                       }
+                       
+                       if(VERIFY)
+                               mutexOwner = Thread.currentThread();
+
+               } catch (InterruptedException e) {
+                       throw new IllegalStateException(e);
+               }
+       }
+       
+       public boolean tryAcquireMutex() {
+               return mutex.tryAcquire();
+       }
+       
+       public void releaseMutex() {
+               mutex.release();
+       }
+
+       @Override
+       public void toFile(Path bytes) throws IOException {
+               if(VERIFY) verifyAccess();
+               Pair<byte[],Integer> pair = toBytes();
+               byte[] data = pair.first;
+               int length = pair.second;
+               FileIO fio = FileIO.get(bytes);
+               int offset = fio.saveBytes(data, length, overwrite());
+               setPosition(offset, length);
+       }
+       
+       public int makeResident() {
+               if(VERIFY) verifyAccess();
+               return LRU.makeResident(this, false);
+       }
+
+       public int makeResident(boolean keepResident) {
+               if(VERIFY) verifyAccess();
+               return LRU.makeResident(this, true);
+       }
+
+       /*
+        * Package implementation details
+        */
+
+       abstract void release();
+       abstract String getExtension();
+       
+       String getStateKey() {
+               String result = getKey().toString() + "#" + getDirectory().getFileName() + "#" + getOffset() + "#" + getLength(); 
+               if(offset == -1)
+                   throw new IllegalStateException(result);
+               return result; 
+       }
+
+       long getLastAccessTime() {
+               if(VERIFY) verifyAccess();
+               return accessTime;
+       }
+       
+       void accessed() {
+               if(VERIFY) verifyAccess();
+               accessTime = AccessTime.getInstance().getAccessTime();
+       }
+       
+       boolean persist() {
+               if(VERIFY) verifyAccess();
+               if(LRU.persist(this)) {
+                       readDirectory = LRU.getDirectory();
+                       return true;
+               } else {
+                       return false;
+               }
+       }
+       
+       void setForceResident(boolean value) {
+        if(VERIFY) verifyAccess();
+        forceResident = value;
+//        isForceResidentSetAfterLastGet = true;
+       }
+       
+       boolean canBePersisted() {
+               if(VERIFY) verifyAccess();
+//             isForceResidentSetAfterLastGet = false;
+               return !forceResident;
+       }
+       
+       boolean isDirty() {
+               if(VERIFY) verifyAccess();
+               return dirty;
+       }
+       
+       boolean isResident() {
+               if(VERIFY) verifyAccess();
+               return resident;
+       }
+       
+       String getFileName() {
+               if(VERIFY) verifyAccess();
+               return fileName;
+       }
+
+       void setResident(boolean value) {
+               if(VERIFY) verifyAccess();
+               resident = value;
+       }
+       
+       void setDirty(boolean value) {
+               if(VERIFY) verifyAccess();
+               dirty = value;
+       }
+       
+       byte[] readFile() throws IOException {
+               if(VERIFY) verifyAccess();
+               Path dir = getDirectory();
+               Path f = dir.resolve(getFileName());
+               FileIO fio = FileIO.get(f);
+               return fio.readBytes(getOffset(), getLength());
+       }
+       
+       /*
+        * Protected implementation details
+        */
+
+       abstract protected boolean overwrite();
+       
+       abstract protected Pair<byte[],Integer> toBytes();
+       
+       protected void setDirty() {
+               if(VERIFY) verifyAccess();
+               dirty = true;
+       }
+       
+       protected void verifyAccess() {
+               assert(mutex.availablePermits() == 0);
+       }
+
+       protected synchronized void cancelForceResident() {
+               setForceResident(false);
+       }
+       
+       /*
+        * Private implementation details
+        */
+       
+       private int getOffset() {
+               if(VERIFY) verifyAccess();
+               return offset;
+       }
+       
+       private int getLength() {
+               if(VERIFY) verifyAccess();
+               return length;
+       }
+       
+       private void setPosition(int offset, int length) {
+               if(VERIFY) verifyAccess();
+               if(offset == -1)
+                   throw new IllegalStateException();
+               this.offset = offset;
+               this.length = length;
+               if(overwrite() && offset > 0)
+                       throw new IllegalStateException();
+       }
+       
+       private Path getDirectory() {
+               if(VERIFY) verifyAccess();
+               return readDirectory;
+       }
+       
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java b/bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java
new file mode 100644 (file)
index 0000000..5a96be2
--- /dev/null
@@ -0,0 +1,73 @@
+package org.simantics.db.javacore;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import org.simantics.acorn.InvalidHeadStateException;
+
+public class HeadState implements Serializable {
+
+    private static final long serialVersionUID = -4135031566499790077L;
+
+    public int headChangeSetId = 0;
+    public long transactionId = 1;
+    public long reservedIds = 3;
+
+    public ArrayList<String> clusters = new ArrayList<>();
+    public ArrayList<String> files = new ArrayList<>();
+    public ArrayList<String> stream = new ArrayList<>();
+    public ArrayList<String> cs = new ArrayList<>();
+//    public ArrayList<String> ccs = new ArrayList<String>();
+
+    public static HeadState load(Path directory) throws InvalidHeadStateException {
+        Path f = directory.resolve("head.state");
+        try {
+            byte[] bytes = Files.readAllBytes(f);
+            MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+            int digestLength = sha1.getDigestLength();
+            sha1.update(bytes, digestLength, bytes.length - digestLength);
+            byte[] newChecksum = sha1.digest();
+            if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) {
+                throw new InvalidHeadStateException(
+                        "Checksum " + Arrays.toString(newChecksum) + " does not match excpected "
+                                + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + f.toAbsolutePath());
+            }
+            try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes, digestLength, bytes.length - digestLength))) {
+                HeadState state = (HeadState) ois.readObject();
+                return state;
+            }
+        } catch (IOException i) {
+            return new HeadState();
+        } catch (ClassNotFoundException c) {
+//            throw new Error("HeadState class not found", c);
+            return new HeadState();
+        } catch (NoSuchAlgorithmException e) {
+            throw new Error("SHA-1 Algorithm not found", e);
+        }
+    }
+
+    public static void validateHeadStateIntegrity(Path headState) throws InvalidHeadStateException, IOException {
+        try {
+            byte[] bytes = Files.readAllBytes(headState);
+            MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+            int digestLength = sha1.getDigestLength();
+            sha1.update(bytes, digestLength, bytes.length - digestLength);
+            byte[] newChecksum = sha1.digest();
+            if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) {
+                throw new InvalidHeadStateException(
+                        "Checksum " + Arrays.toString(newChecksum) + " does not match excpected "
+                                + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + headState.toAbsolutePath());
+            }
+        } catch (NoSuchAlgorithmException e) {
+            throw new Error("SHA-1 digest not found, should not happen", e);
+        }
+    }
+}