]> gerrit.simantics Code Review - simantics/platform.git/commitdiff
Merge remote-tracking branch 'origin/svn' 80/80/1
authorHannu Niemistö <hannu.niemisto@semantum.fi>
Mon, 19 Sep 2016 09:15:51 +0000 (12:15 +0300)
committerHannu Niemistö <hannu.niemisto@semantum.fi>
Mon, 19 Sep 2016 09:17:21 +0000 (12:17 +0300)
Change-Id: I6de2fe5c0e54e45498ef01e5ee398325dee1a2a7

183 files changed:
.gitignore
bundles/org.apache.batik/.classpath
bundles/org.apache.batik/META-INF/MANIFEST.MF
bundles/org.apache.batik/build.properties
bundles/org.apache.batik/src/.keep [new file with mode: 0644]
bundles/org.apache.batik/src/org/apache/batik/Activator.java [deleted file]
bundles/org.simantics.acorn/.classpath [new file with mode: 0644]
bundles/org.simantics.acorn/.project [new file with mode: 0644]
bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs [new file with mode: 0644]
bundles/org.simantics.acorn/.svn/wc.db [new file with mode: 0644]
bundles/org.simantics.acorn/META-INF/MANIFEST.MF [new file with mode: 0644]
bundles/org.simantics.acorn/OSGI-INF/component.xml [new file with mode: 0644]
bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml [new file with mode: 0644]
bundles/org.simantics.acorn/build.properties [new file with mode: 0644]
bundles/org.simantics.acorn/log4j.properties [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/InvalidHeadStateException.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterChangeSet.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterLRU.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateOperation.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateState.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/FileInfo.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java [new file with mode: 0644]
bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java [new file with mode: 0644]
bundles/org.simantics.action.ontology/graph.tg
bundles/org.simantics.annotation.ontology/graph.tg
bundles/org.simantics.backup.ontology/build.properties
bundles/org.simantics.backup.ontology/graph.tg
bundles/org.simantics.browsing.ui.ontology/graph.tg
bundles/org.simantics.charts.ontology/graph.tg
bundles/org.simantics.compressions/src/org/simantics/compressions/impl/DecompressingInputStream.java
bundles/org.simantics.databoard/src/org/simantics/databoard/util/URIStringUtils.java
bundles/org.simantics.datatypes.ontology/graph.tg
bundles/org.simantics.db.indexing/META-INF/MANIFEST.MF
bundles/org.simantics.db.procore.ui/src/org/simantics/db/procore/ui/ProCoreUserAgent.java
bundles/org.simantics.desktop.ui.ontology/graph.tg
bundles/org.simantics.diagram.ontology/graph.tg
bundles/org.simantics.document.base.ontology/graph.tg
bundles/org.simantics.document.linking.ontology/graph.tg
bundles/org.simantics.document.ontology/graph.tg
bundles/org.simantics.document.swt.ontology/graph.tg
bundles/org.simantics.dublincore.ontology/graph.tg
bundles/org.simantics.fastlz/LICENSE
bundles/org.simantics.fastlz/README.txt
bundles/org.simantics.fastlz/native/Makefile [deleted file]
bundles/org.simantics.fastlz/native/compile-x64.bat
bundles/org.simantics.fastlz/native/compile-x86.bat
bundles/org.simantics.fastlz/native/compile.bat
bundles/org.simantics.fastlz/native/compile.sh
bundles/org.simantics.fastlz/native/jniWrapper.c
bundles/org.simantics.fastlz/native/lz4.c [deleted file]
bundles/org.simantics.fastlz/native/lz4.h [deleted file]
bundles/org.simantics.fastlz/native/lz4_format_description.txt [deleted file]
bundles/org.simantics.fastlz/native/lz4hc.c [deleted file]
bundles/org.simantics.fastlz/native/lz4hc.h [deleted file]
bundles/org.simantics.fastlz/native/vs2012/fastlz.sln [new file with mode: 0644]
bundles/org.simantics.fastlz/native/vs2012/fastlz.vcxproj [new file with mode: 0644]
bundles/org.simantics.fastlz/native/vs2012/fastlz.vcxproj.filters [new file with mode: 0644]
bundles/org.simantics.fastlz/src/libfastlz-darwin-x86_64.dylib [moved from bundles/org.simantics.fastlz/src/libfastlz-darwin-x86_64.jnilib with 100% similarity]
bundles/org.simantics.fastlz/src/org/simantics/fastlz/java/FastLZJavaInputStream.java
bundles/org.simantics.fastlz/testcases/org/simantics/fastlz/FastLZBasicTests.java
bundles/org.simantics.g2d.ontology/graph.tg
bundles/org.simantics.graph.compiler/META-INF/MANIFEST.MF
bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/GraphCompiler.java
bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/parsing/Graph.g
bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/parsing/GraphLexer.java
bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/validation/ReportCollisions.java
bundles/org.simantics.graph/META-INF/MANIFEST.MF
bundles/org.simantics.graph/src/org/simantics/graph/store/StatementCollision.java [new file with mode: 0644]
bundles/org.simantics.graph/src/org/simantics/graph/store/StatementStore.java
bundles/org.simantics.graphfile.ontology/graph.tg
bundles/org.simantics.help.base/.classpath
bundles/org.simantics.help.base/META-INF/MANIFEST.MF
bundles/org.simantics.help.base/build.properties
bundles/org.simantics.help.base/fontbox-1.8.10.jar [deleted file]
bundles/org.simantics.help.base/jempbox-1.8.10.jar [deleted file]
bundles/org.simantics.help.base/pdfbox-1.8.10-src.zip [deleted file]
bundles/org.simantics.help.base/pdfbox-1.8.10.jar [deleted file]
bundles/org.simantics.help.base/src/org/simantics/help/base/internal/PDFUtil.java
bundles/org.simantics.help.base/xmpbox-1.8.10.jar [deleted file]
bundles/org.simantics.help.ontology/graph.tg
bundles/org.simantics.image.ontology/graph.tg
bundles/org.simantics.image2.ontology/graph.tg
bundles/org.simantics.issues.ontology/graph.tg
bundles/org.simantics.issues.ui.ontology/graph.tg
bundles/org.simantics.layer0/graph.tg
bundles/org.simantics.layer0x.ontology/graph.tg
bundles/org.simantics.ltk.antlr/META-INF/MANIFEST.MF
bundles/org.simantics.modeling.ontology/graph.tg
bundles/org.simantics.modeling.template2d.ontology/graph.tg
bundles/org.simantics.modeling.ui/META-INF/MANIFEST.MF
bundles/org.simantics.platform.ui.ontology/graph.tg
bundles/org.simantics.project.ontology/graph.tg
bundles/org.simantics.project/src/org/simantics/project/management/ServerManagerFactory.java
bundles/org.simantics.scenegraph.ontology/graph.tg
bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/internal/elaboration/subsumption/SubSolver.java
bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/top/ExpressionEvaluator.java
bundles/org.simantics.scl.data/META-INF/MANIFEST.MF
bundles/org.simantics.scl.data/scl/Data/Json.md [new file with mode: 0644]
bundles/org.simantics.scl.data/scl/Data/Json.scl [new file with mode: 0644]
bundles/org.simantics.scl.data/scl/Data/Writer.scl [new file with mode: 0644]
bundles/org.simantics.scl.runtime/scl/Expressions/Equations.scl [deleted file]
bundles/org.simantics.scl.runtime/scl/Prelude.scl
bundles/org.simantics.scl.runtime/scl/StandardLibrary.scl
bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/EquationContext.java [deleted file]
bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/TestEquationContext.java [deleted file]
bundles/org.simantics.selectionview.ui.ontology/graph.tg
bundles/org.simantics.silk.ontology/graph.tg
bundles/org.simantics.simulation.ontology/graph.tg
bundles/org.simantics.softwareconfiguration.ontology/graph.tg
bundles/org.simantics.spreadsheet.graph/META-INF/MANIFEST.MF
bundles/org.simantics.spreadsheet.graph/src/org/simantics/spreadsheet/graph/ExcelImport.java
bundles/org.simantics.spreadsheet.ontology/graph.tg
bundles/org.simantics.structural.ontology/graph.tg
bundles/org.simantics.user.ontology/graph.tg
bundles/org.simantics.utils.datastructures/build.properties
bundles/org.simantics.viewpoint.ontology/graph.tg
bundles/org.simantics.views.ontology/graph.tg
bundles/org.simantics.workbench.ontology/graph.tg
bundles/pom.xml
features/com.lowagie.text.feature/.gitignore
features/org.apache.lucene4.feature/.gitignore
features/org.simantics.charts.feature/feature.xml
features/org.simantics.db.client.feature/feature.xml
features/org.simantics.desktop.feature/feature.xml
features/org.simantics.eclipsec.launcher.feature/bin/win32/win32/x86/eclipsec.exe [new file with mode: 0644]
features/org.simantics.eclipsec.launcher.feature/bin/win32/win32/x86_64/eclipsec.exe [new file with mode: 0644]
features/org.simantics.eclipsec.launcher.feature/build.properties
features/org.simantics.event.feature/feature.xml
features/org.simantics.g2d.feature/feature.xml
features/org.simantics.issues.feature/feature.xml
features/org.simantics.issues.ui.feature/feature.xml
features/org.simantics.modeling.feature/feature.xml
features/org.simantics.platform.ui.feature/feature.xml
features/org.simantics.sdk.feature/feature.xml
features/org.simantics.spreadsheet.feature/feature.xml
features/org.simantics.utils.feature/feature.xml
features/org.simantics.utils.ui.feature/feature.xml
features/org.simantics.views.swt.client.feature/feature.xml
features/org.simantics.views.swt.feature/feature.xml
features/org.simantics.wiki.ui.feature/feature.xml
releng/org.simantics.sdk.build.p2.site/pom.xml
releng/org.simantics.sdk.build.targetdefinition/org.simantics.sdk.build.targetdefinition-semantum.target [new file with mode: 0644]
releng/org.simantics.sdk.build.targetdefinition/org.simantics.sdk.build.targetdefinition.target
releng/org.simantics.target/simantics-sdk.target [deleted file]
releng/org.simantics.target/simantics.target
sonar-simantics-platform-sdk.properties [new file with mode: 0644]

index 0d0631b566c3668b493965d2b8c1ef421f7c2a2f..7d388077123ad5154301acda4f9f48eaa2f2e41d 100644 (file)
@@ -1,2 +1,4 @@
 /**/bin/\r
+/features/*/target/\r
+/bundles/*/target/\r
 /**/.polyglot.build.properties
index 4a8d60f3c684177ce34c7b3ae8d827101080aa1a..0f23270eff9b84582a281b0c770a009827e1e581 100644 (file)
@@ -1,5 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>\r
 <classpath>\r
+       <classpathentry exported="true" kind="lib" path="lib/xml-apis-ext-1.3.04.jar"/>\r
        <classpathentry exported="true" kind="lib" path="lib/fop-transcoder-allinone-svn-trunk.jar"/>\r
        <classpathentry exported="true" kind="lib" path="lib/batik-codec-1.8.jar"/>\r
        <classpathentry exported="true" kind="lib" path="lib/batik-extension-1.8.jar"/>\r
@@ -11,8 +12,6 @@
        <classpathentry exported="true" kind="lib" path="lib/js.jar"/>\r
        <classpathentry exported="true" kind="lib" path="lib/xalan-2.7.0.jar"/>\r
        <classpathentry exported="true" kind="lib" path="lib/xerces_2_5_0.jar"/>\r
-       <classpathentry exported="true" kind="lib" path="lib/xml-apis-1.3.04.jar"/>\r
-       <classpathentry exported="true" kind="lib" path="lib/xml-apis-ext-1.3.04.jar"/>\r
        <classpathentry exported="true" kind="lib" path="lib/batik-anim-1.8.jar"/>\r
        <classpathentry exported="true" kind="lib" path="lib/xmlgraphics-commons-2.0.jar"/>\r
        <classpathentry exported="true" kind="lib" path="lib/batik-css-1.8.jar"/>\r
index aebf128018b5b0481ce13e4d43de3b7bdc9c27b8..22d86a06df08db8ae7248e550a6484b1171ea151 100644 (file)
@@ -3,22 +3,8 @@ Bundle-ManifestVersion: 2
 Bundle-Name: Batik
 Bundle-SymbolicName: org.apache.batik;singleton:=true
 Bundle-Version: 1.8.0.qualifier
-Bundle-Activator: org.apache.batik.Activator
-Require-Bundle: org.eclipse.ui,\r
- org.eclipse.core.runtime
 Bundle-RequiredExecutionEnvironment: JavaSE-1.8
-Bundle-ActivationPolicy: lazy
 Export-Package: java_cup.runtime,
- javax.xml,
- javax.xml.datatype,
- javax.xml.namespace,
- javax.xml.parsers,
- javax.xml.transform,
- javax.xml.transform.dom,
- javax.xml.transform.sax,
- javax.xml.transform.stream,
- javax.xml.validation,
- javax.xml.xpath,
  org.apache.avalon.framework,
  org.apache.avalon.framework.activity,
  org.apache.avalon.framework.configuration,
@@ -108,12 +94,17 @@ Export-Package: java_cup.runtime,
  org.apache.fop,
  org.apache.fop.accessibility,
  org.apache.fop.apps,
+ org.apache.fop.apps.io,
+ org.apache.fop.complexscripts.bidi,
  org.apache.fop.complexscripts.fonts,
+ org.apache.fop.complexscripts.util,
+ org.apache.fop.events,
  org.apache.fop.fo,
  org.apache.fop.fonts,
  org.apache.fop.fonts.apps,
  org.apache.fop.fonts.autodetect,
  org.apache.fop.fonts.base14,
+ org.apache.fop.fonts.cff,
  org.apache.fop.fonts.substitute,
  org.apache.fop.fonts.truetype,
  org.apache.fop.fonts.type1,
@@ -121,13 +112,18 @@ Export-Package: java_cup.runtime,
  org.apache.fop.pdf,
  org.apache.fop.pdf.xref,
  org.apache.fop.render,
+ org.apache.fop.render.gradient,
  org.apache.fop.render.intermediate,
  org.apache.fop.render.pdf,
  org.apache.fop.render.pdf.extensions,
  org.apache.fop.render.ps,
  org.apache.fop.render.ps.extensions,
  org.apache.fop.render.ps.fonts,
+ org.apache.fop.render.ps.svg,
  org.apache.fop.svg,
+ org.apache.fop.svg.font,
+ org.apache.fop.svg.text,
+ org.apache.fop.traits,
  org.apache.fop.util,
  org.apache.html.dom,
  org.apache.regexp,
@@ -195,7 +191,6 @@ Export-Package: java_cup.runtime,
  org.apache.xml.serialize,
  org.apache.xml.utils,
  org.apache.xml.utils.res,
- org.apache.xmlcommons,
  org.apache.xmlgraphics.fonts,
  org.apache.xmlgraphics.image,
  org.apache.xmlgraphics.image.codec.png,
@@ -260,21 +255,11 @@ Export-Package: java_cup.runtime,
  org.w3c.css.sac,
  org.w3c.css.sac.helpers,
  org.w3c.dom,
- org.w3c.dom.bootstrap,
- org.w3c.dom.css,
  org.w3c.dom.events,
  org.w3c.dom.html,
  org.w3c.dom.ls,
- org.w3c.dom.ranges,
  org.w3c.dom.smil,
- org.w3c.dom.stylesheets,
- org.w3c.dom.svg,
- org.w3c.dom.traversal,
- org.w3c.dom.views,
- org.w3c.dom.xpath,
- org.xml.sax,
- org.xml.sax.ext,
- org.xml.sax.helpers
+ org.w3c.dom.svg
 Bundle-ClassPath: lib/batik-awt-util-1.8.jar,
  lib/batik-dom-1.8.jar,
  lib/batik-ext-1.8.jar,
@@ -298,6 +283,6 @@ Bundle-ClassPath: lib/batik-awt-util-1.8.jar,
  lib/js.jar,
  lib/xalan-2.7.0.jar,
  lib/xerces_2_5_0.jar,
- lib/xml-apis-1.3.04.jar,
- lib/xml-apis-ext-1.3.04.jar,
- lib/fop-transcoder-allinone-svn-trunk.jar
+ lib/fop-transcoder-allinone-svn-trunk.jar,
+ lib/xml-apis-ext-1.3.04.jar
+Require-Bundle: javax.xml;bundle-version="1.3.4"
index 91aa3c77a208e751f3c660a0d2c90c51c72d9c24..d435895c2e79560cc7531b5deb83e061e09bb526 100644 (file)
@@ -25,6 +25,5 @@ bin.includes = META-INF/,\
                lib/js.jar,\\r
                lib/xalan-2.7.0.jar,\\r
                lib/xerces_2_5_0.jar,\\r
-               lib/xml-apis-1.3.04.jar,\\r
-               lib/xml-apis-ext-1.3.04.jar,\\r
-               lib/fop-transcoder-allinone-svn-trunk.jar\r
+               lib/fop-transcoder-allinone-svn-trunk.jar,\\r
+               lib/xml-apis-ext-1.3.04.jar\r
diff --git a/bundles/org.apache.batik/src/.keep b/bundles/org.apache.batik/src/.keep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/bundles/org.apache.batik/src/org/apache/batik/Activator.java b/bundles/org.apache.batik/src/org/apache/batik/Activator.java
deleted file mode 100644 (file)
index 0023b84..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-package org.apache.batik;\r
-\r
-import org.eclipse.ui.plugin.AbstractUIPlugin;\r
-import org.osgi.framework.BundleContext;\r
-\r
-/**\r
- * The activator class controls the plug-in life cycle\r
- */\r
-public class Activator extends AbstractUIPlugin {\r
-\r
-       // The plug-in ID\r
-       public static final String PLUGIN_ID = "org.apache.batik"; //$NON-NLS-1$\r
-\r
-       // The shared instance\r
-       private static Activator plugin;\r
-       \r
-       /**\r
-        * The constructor\r
-        */\r
-       public Activator() {\r
-       }\r
-\r
-       /*\r
-        * (non-Javadoc)\r
-        * @see org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext)\r
-        */\r
-       public void start(BundleContext context) throws Exception {\r
-               super.start(context);\r
-               plugin = this;\r
-       }\r
-\r
-       /*\r
-        * (non-Javadoc)\r
-        * @see org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext)\r
-        */\r
-       public void stop(BundleContext context) throws Exception {\r
-               plugin = null;\r
-               super.stop(context);\r
-       }\r
-\r
-       /**\r
-        * Returns the shared instance\r
-        *\r
-        * @return the shared instance\r
-        */\r
-       public static Activator getDefault() {\r
-               return plugin;\r
-       }\r
-\r
-}\r
diff --git a/bundles/org.simantics.acorn/.classpath b/bundles/org.simantics.acorn/.classpath
new file mode 100644 (file)
index 0000000..22f3064
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+       <classpathentry kind="src" path="src"/>
+       <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>
+       <classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+       <classpathentry kind="output" path="bin"/>
+</classpath>
diff --git a/bundles/org.simantics.acorn/.project b/bundles/org.simantics.acorn/.project
new file mode 100644 (file)
index 0000000..9726c0b
--- /dev/null
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+       <name>org.simantics.acorn</name>
+       <comment></comment>
+       <projects>
+       </projects>
+       <buildSpec>
+               <buildCommand>
+                       <name>org.eclipse.jdt.core.javabuilder</name>
+                       <arguments>
+                       </arguments>
+               </buildCommand>
+               <buildCommand>
+                       <name>org.eclipse.pde.ManifestBuilder</name>
+                       <arguments>
+                       </arguments>
+               </buildCommand>
+               <buildCommand>
+                       <name>org.eclipse.pde.SchemaBuilder</name>
+                       <arguments>
+                       </arguments>
+               </buildCommand>
+               <buildCommand>
+                       <name>org.eclipse.pde.ds.core.builder</name>
+                       <arguments>
+                       </arguments>
+               </buildCommand>
+       </buildSpec>
+       <natures>
+               <nature>org.eclipse.pde.PluginNature</nature>
+               <nature>org.eclipse.jdt.core.javanature</nature>
+       </natures>
+</projectDescription>
diff --git a/bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs b/bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs
new file mode 100644 (file)
index 0000000..0c68a61
--- /dev/null
@@ -0,0 +1,7 @@
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
+org.eclipse.jdt.core.compiler.compliance=1.8
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.source=1.8
diff --git a/bundles/org.simantics.acorn/.svn/wc.db b/bundles/org.simantics.acorn/.svn/wc.db
new file mode 100644 (file)
index 0000000..9defa90
Binary files /dev/null and b/bundles/org.simantics.acorn/.svn/wc.db differ
diff --git a/bundles/org.simantics.acorn/META-INF/MANIFEST.MF b/bundles/org.simantics.acorn/META-INF/MANIFEST.MF
new file mode 100644 (file)
index 0000000..9152aca
--- /dev/null
@@ -0,0 +1,18 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: Acorn Database for Simantics
+Bundle-SymbolicName: org.simantics.acorn
+Bundle-Version: 1.1.2.qualifier
+Bundle-Vendor: Semantum Oy
+Require-Bundle: gnu.trove3;bundle-version="3.0.0",
+ gnu.trove2;bundle-version="2.0.4",
+ org.simantics.db.impl;bundle-version="0.8.0",
+ org.simantics.db.server;bundle-version="1.0.0",
+ org.simantics.compressions;bundle-version="1.0.0",
+ org.simantics.backup,
+ org.eclipse.core.runtime;bundle-version="3.11.1",
+ org.simantics.db.procore
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Bundle-ActivationPolicy: lazy
+Bundle-Activator: org.simantics.acorn.internal.Activator
+Service-Component: OSGI-INF/component.xml, OSGI-INF/org.simantics.acorn.AcornDriver.xml
diff --git a/bundles/org.simantics.acorn/OSGI-INF/component.xml b/bundles/org.simantics.acorn/OSGI-INF/component.xml
new file mode 100644 (file)
index 0000000..5b88ac3
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="org.simantics.acorn.backupProvider">
+    <implementation class="org.simantics.acorn.backup.AcornBackupProvider"/>
+    <service>
+        <provide interface="org.simantics.backup.IBackupProvider"/>
+    </service>
+</scr:component>
diff --git a/bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml b/bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml
new file mode 100644 (file)
index 0000000..f1a97d1
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="AcornDriver">
+   <implementation class="org.simantics.acorn.AcornDriver"/>
+   <service>
+      <provide interface="org.simantics.db.Driver"/>
+   </service>
+</scr:component>
diff --git a/bundles/org.simantics.acorn/build.properties b/bundles/org.simantics.acorn/build.properties
new file mode 100644 (file)
index 0000000..40374cc
--- /dev/null
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2007, 2010 Association for Decentralized Information Management
+# in Industry THTH ry.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Eclipse Public License v1.0
+# which accompanies this distribution, and is available at
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Contributors:
+#     VTT Technical Research Centre of Finland - initial API and implementation
+###############################################################################
+output.. = bin/
+bin.includes = META-INF/,\
+               .,\
+               log4j.properties,\
+               OSGI-INF/
+source.. = src/
diff --git a/bundles/org.simantics.acorn/log4j.properties b/bundles/org.simantics.acorn/log4j.properties
new file mode 100644 (file)
index 0000000..6fecb6d
--- /dev/null
@@ -0,0 +1,63 @@
+###############################################################################
+# Copyright (c) 2007, 2010 Association for Decentralized Information Management
+# in Industry THTH ry.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Eclipse Public License v1.0
+# which accompanies this distribution, and is available at
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Contributors:
+#     VTT Technical Research Centre of Finland - initial API and implementation
+###############################################################################
+# For the general syntax of property based configuration files see the
+# documentation of org.apache.log4j.PropertyConfigurator.
+
+# The root category uses the appender called rolling. If no priority is
+# specified, the root category assumes the default priority for root
+# which is DEBUG in log4j. The root category is the only category that
+# has a default priority. All other categories need not be assigned a
+# priority in which case they inherit their priority from the
+# hierarchy.
+
+# This will provide console output on log4j configuration loading
+#log4j.debug=true
+
+log4j.rootCategory=warn, stdout
+#log4j.rootCategory=warn
+
+# BEGIN APPENDER: CONSOLE APPENDER (stdout)
+#  first:  type of appender (fully qualified class name)
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+
+#  second: Any configuration information needed for that appender.
+#    Many appenders require a layout.
+log4j.appender.stdout.layout=org.apache.log4j.TTCCLayout
+# log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout
+
+# Possible information overload?
+# log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+#  additionally, some layouts can take additional information --
+#    like the ConversionPattern for the PatternLayout.
+# log4j.appender.stdout.layout.ConversionPattern=%d %-5p %-17c{2} (%30F:%L) %3x - %m%n
+# END APPENDER: CONSOLE APPENDER (stdout)
+
+# BEGIN APPENDER: ROLLING FILE APPENDER (rolling)
+#log4j.appender.rolling=com.tools.logging.PluginFileAppender
+#log4j.appender.rolling=org.apache.log4j.FileAppender
+log4j.appender.rolling=org.apache.log4j.RollingFileAppender
+log4j.appender.rolling.File=procore.log
+log4j.appender.rolling.append=true
+log4j.appender.rolling.MaxFileSize=8192KB
+# Keep one backup file
+log4j.appender.rolling.MaxBackupIndex=1
+log4j.appender.rolling.layout=org.apache.log4j.PatternLayout
+#log4j.appender.rolling.layout.ConversionPattern=%p %t %c - %m%n
+log4j.appender.rolling.layout.ConversionPattern=%-6r [%15.15t] %-5p %30.30c - %m%n
+# END APPENDER: ROLLING FILE APPENDER (rolling)
+
+# BEGIN APPENDER: PLUG-IN LOG APPENDER (plugin)
+log4j.appender.plugin=com.tools.logging.PluginLogAppender
+log4j.appender.plugin.layout=org.apache.log4j.PatternLayout
+#log4j.appender.plugin.layout.ConversionPattern=%p %t %c - %m%n
+log4j.appender.plugin.layout.ConversionPattern=%-6r [%15.15t] %-5p %30.30c - %m%n
+# END APPENDER: PLUG-IN LOG APPENDER (plugin)
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java
new file mode 100644 (file)
index 0000000..db2c167
--- /dev/null
@@ -0,0 +1,40 @@
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.simantics.acorn.internal.AcornDatabase;
+import org.simantics.db.Database;
+import org.simantics.db.server.ProCoreException;
+
+/**
+ * @author Tuukka Lehtonen
+ */
+public class AcornDatabaseManager {
+
+    private static Map<String, Database> dbs = new HashMap<String, Database>();
+
+    public static synchronized Database getDatabase(Path folder) throws ProCoreException {
+        Path canonical;
+        try {
+            if (!Files.exists(folder))
+                Files.createDirectories(folder);
+            canonical = folder.toRealPath();
+        } catch (IOException e) {
+            throw new ProCoreException("Could not get canonical path.", e);
+        }
+
+        String canonicalPath = canonical.toString();
+        Database db = dbs.get(canonicalPath);
+        if (null != db)
+            return db;
+
+        db = new AcornDatabase(canonical);
+        dbs.put(canonicalPath, db);
+        return db;
+    }
+
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java
new file mode 100644 (file)
index 0000000..99ec490
--- /dev/null
@@ -0,0 +1,116 @@
+package org.simantics.acorn;
+
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Properties;
+
+import org.simantics.db.DatabaseUserAgent;
+import org.simantics.db.Driver;
+import org.simantics.db.ServerI;
+import org.simantics.db.ServerReference;
+import org.simantics.db.Session;
+import org.simantics.db.SessionReference;
+import org.simantics.db.exception.DatabaseException;
+
+public class AcornDriver implements Driver {
+
+    public static final String AcornDriverName = "acorn";
+
+    @Override
+    public String getName() {
+        return AcornDriverName;
+    }
+
+    @Override
+    public DatabaseUserAgent getDatabaseUserAgent(String address) throws DatabaseException {
+        Path dbFolder = Paths.get(address);
+        return AcornDatabaseManager.getDatabase(dbFolder).getUserAgent();
+    }
+
+    @Override
+    public void setDatabaseUserAgent(String address, DatabaseUserAgent dbUserAgent) throws DatabaseException {
+        Path dbFolder = Paths.get(address);
+        AcornDatabaseManager.getDatabase(dbFolder).setUserAgent(dbUserAgent);
+    }
+
+    @Override
+    public Session getSession(String address, Properties properties) throws DatabaseException {
+        Path dbFolder = Paths.get(address);
+        Session session = AcornSessionManagerImpl.getInstance().createSession(new SessionReference() {
+            
+            @Override
+            public ServerReference getServerReference() {
+                return new ServerReference() {
+                    
+                    @Override
+                    public Path getDBFolder() {
+                        return dbFolder;
+                    }
+                };
+            }
+
+            @Override
+            public long getSessionId() {
+                return 0L;
+            }
+        }, null);
+        if (!properties.containsKey("clientId"))
+            properties.put("clientId", dbFolder.toFile().getAbsolutePath());
+        session.registerService(Properties.class, properties);
+        Session s = session.peekService(Session.class);
+        if (null == s)
+            session.registerService(Session.class, session);
+        return session;
+    }
+
+    @Override
+    public ServerI getServer(String address, Properties properties) throws DatabaseException {
+        return new AcornServerI(address);
+    }
+
+    @Override
+    public Management getManagement(String address, Properties properties) throws DatabaseException {
+        Path dbFolder = Paths.get(address);
+        return new AcornManagement(dbFolder, properties);
+    }
+    
+    private static class AcornServerI implements ServerI {
+        
+        private String address;
+
+        public AcornServerI(String address) {
+            this.address = address;
+        }
+        
+        @Override
+        public void stop() throws DatabaseException {
+            AcornDatabaseManager.getDatabase(Paths.get(address)).tryToStop();
+        }
+        
+        @Override
+        public void start() throws DatabaseException {
+            AcornDatabaseManager.getDatabase(Paths.get(address)).start();
+        }
+        
+        @Override
+        public boolean isActive() throws DatabaseException {
+            return AcornDatabaseManager.getDatabase(Paths.get(address)).isRunning();
+        }
+        
+        @Override
+        public String getAddress() throws DatabaseException {
+            return address;
+        }
+        
+        @Override
+        public String executeAndDisconnect(String command) throws DatabaseException {
+            return "";
+        }
+        
+        @Override
+        public String execute(String command) throws DatabaseException {
+            return "";
+        }
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java
new file mode 100644 (file)
index 0000000..a7bccf0
--- /dev/null
@@ -0,0 +1,51 @@
+package org.simantics.acorn;
+
+import java.nio.file.Path;
+import java.util.Properties;
+
+import org.simantics.db.Database;
+import org.simantics.db.Driver.Management;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.server.ProCoreException;
+
+public class AcornManagement implements Management {
+
+    private final Database db;
+    private final Properties properties;
+
+    AcornManagement(Path dbFolder, Properties properties) throws ProCoreException {
+        db = AcornDatabaseManager.getDatabase(dbFolder);
+        this.properties = properties;
+    }
+
+    @Override
+    public boolean exist() throws DatabaseException {
+        return db.isFolderOk();
+    }
+
+    @Override
+    public void delete() throws DatabaseException {
+        db.deleteFiles();
+        if (exist())
+            throw new DatabaseException("Failed to delete database. folder=" + db.getFolder());
+    }
+
+    @Override
+    public void create() throws DatabaseException {
+        db.initFolder(properties);
+        if (!exist())
+            throw new DatabaseException("Failed to create Acorn database. folder=" + db.getFolder());
+    }
+
+    @Override
+    public void purge() throws DatabaseException {
+        db.purgeDatabase();
+    }
+
+    @Override
+    public void shutdown() throws DatabaseException {
+        db.tryToStop();
+        db.disconnect();
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java
new file mode 100644 (file)
index 0000000..1a1e160
--- /dev/null
@@ -0,0 +1,125 @@
+package org.simantics.acorn;
+
+import java.nio.file.Path;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.simantics.db.Database;
+import org.simantics.db.Session;
+import org.simantics.db.SessionErrorHandler;
+import org.simantics.db.SessionManager;
+import org.simantics.db.SessionReference;
+import org.simantics.db.authentication.UserAuthenticationAgent;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.event.SessionEvent;
+import org.simantics.db.event.SessionListener;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.LifecycleSupport;
+import org.simantics.utils.datastructures.ListenerList;
+
+import fi.vtt.simantics.procore.internal.SessionImplDb;
+import fi.vtt.simantics.procore.internal.SessionImplSocket;
+
+public class AcornSessionManagerImpl implements SessionManager {
+
+    private static AcornSessionManagerImpl INSTANCE;
+    
+    private ConcurrentHashMap<SessionImplSocket, SessionImplSocket> sessionMap = new ConcurrentHashMap<SessionImplSocket, SessionImplSocket>();
+    private ListenerList<SessionListener> sessionListeners = new ListenerList<SessionListener>(SessionListener.class);
+    private SessionErrorHandler errorHandler;
+
+       private Database database;
+
+    private AcornSessionManagerImpl() {}
+    
+    void finish() {
+        sessionMap = null;
+        sessionListeners = null;
+    }
+
+    @Override
+    public void addSessionListener(SessionListener listener) {
+        sessionListeners.add(listener);
+    }
+
+    @Override
+    public Session createSession(SessionReference sessionReference, UserAuthenticationAgent authAgent)
+    throws DatabaseException {
+        SessionImplDb sessionImpl = new SessionImplDb(this, authAgent);
+        boolean ok = false;
+        try {
+            Path dbFolder = sessionReference.getServerReference().getDBFolder();
+            database = AcornDatabaseManager.getDatabase(dbFolder);
+            Database.Session dbSession = database.newSession(sessionImpl);
+            sessionImpl.connect(sessionReference, dbSession);
+            sessionMap.put(sessionImpl, sessionImpl);
+            fireSessionOpened(sessionImpl);
+            ok = true;
+        } catch (Throwable e) {
+            Logger.defaultLogError("Connection failed. See exception for details.", e);
+            try {
+                fireSessionClosed(sessionImpl, e);
+                sessionMap.remove(sessionImpl);
+                sessionImpl = null;
+            } catch (Throwable t) {
+            }
+            throw new DatabaseException(e);
+        } finally {
+            if (!ok && null != sessionImpl)
+                sessionImpl.getService(LifecycleSupport.class).close();
+        }
+        return sessionImpl;
+    }
+
+    @Override
+    public void removeSessionListener(SessionListener listener) {
+        sessionListeners.remove(listener);
+    }
+
+    private void fireSessionOpened(SessionImplSocket session) {
+        SessionEvent se = new SessionEvent(session, null);
+        for (SessionListener listener : sessionListeners.getListeners()) {
+               listener.sessionOpened(se);
+        }
+    }
+
+    private void fireSessionClosed(SessionImplSocket session, Throwable cause) {
+        SessionEvent se = new SessionEvent(session, cause);
+        for (SessionListener listener : sessionListeners.getListeners()) {
+               listener.sessionClosed(se);
+        }
+    }
+
+    @Override
+    public void shutdown(Session s, Throwable cause) {
+        SessionImplSocket sis = sessionMap.get(s);
+        if (null == sis)
+            return;
+        try {
+            fireSessionClosed(sis, cause);
+        } finally {
+            sessionMap.remove(s);
+        }
+    }
+
+    @Override
+    public SessionErrorHandler getErrorHandler() {
+        return errorHandler;
+    }
+
+    @Override
+    public void setErrorHandler(SessionErrorHandler errorHandler) {
+        this.errorHandler = errorHandler;
+    }
+
+    public synchronized static AcornSessionManagerImpl getInstance() {
+        if (INSTANCE == null)
+            INSTANCE = new AcornSessionManagerImpl();
+        return INSTANCE;
+    }
+
+    @Override
+    public Database getDatabase() {
+        return database;
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java
new file mode 100644 (file)
index 0000000..5b8e5ab
--- /dev/null
@@ -0,0 +1,584 @@
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.acorn.lru.ChangeSetInfo;
+import org.simantics.acorn.lru.ClusterInfo;
+import org.simantics.acorn.lru.ClusterLRU;
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.FileInfo;
+import org.simantics.acorn.lru.LRU;
+import org.simantics.db.ClusterCreator;
+import org.simantics.db.ServiceLocator;
+import org.simantics.db.Database.Session.ClusterIds;
+import org.simantics.db.Database.Session.ResourceSegment;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.db.service.ClusterSetsSupport;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.threads.logger.ITask;
+import org.simantics.utils.threads.logger.ThreadLogger;
+
+public class ClusterManager {
+
+       private ArrayList<String> currentChanges = new ArrayList<String>();
+
+       public final Path dbFolder;
+       public Path lastSessionDirectory;
+       public Path workingDirectory;
+
+       public LRU<String, ClusterStreamChunk> streamLRU;
+       public LRU<Long, ChangeSetInfo> csLRU;
+       public ClusterLRU clusterLRU;
+       public LRU<String, FileInfo> fileLRU;
+
+       public MainState mainState;
+       public HeadState state;
+
+       private long lastSnapshot = System.nanoTime();
+
+       final public ClusterSupport2 support = new ClusterSupport2(this);
+
+       /*
+        * Public interface
+        * 
+        */
+
+       public ClusterManager(Path dbFolder) {
+               this.dbFolder = dbFolder;
+       }
+
+       public ArrayList<String> getChanges(long changeSetId) {
+               ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId);
+               info.acquireMutex();
+               try {
+                       info.makeResident();
+                       return info.getCSSIds();
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+       }
+
+       public ClusterBase getClusterByClusterKey(int clusterKey) throws DatabaseException {
+               return clusterLRU.getClusterByClusterKey(clusterKey);
+       }
+       
+       public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) throws DatabaseException {
+               return clusterLRU.getClusterByClusterUIDOrMake(clusterUID);
+       }
+
+       public ClusterImpl getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) throws DatabaseException {
+               return clusterLRU.getClusterByClusterUIDOrMakeProxy(clusterUID);
+       }
+
+       public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+               return clusterLRU.getClusterKeyByClusterUIDOrMake(clusterUID);
+       }
+
+       public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) {
+               return clusterLRU.getClusterKeyByClusterUIDOrMakeWithoutMutex(clusterUID);
+       }
+
+       public int getClusterKeyByUID(long id1, long id2) throws DatabaseException {
+               return clusterLRU.getClusterKeyByUIDWithoutMutex(id1, id2);
+       }
+       
+       public <T extends ClusterI> T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException {
+               return clusterLRU.getClusterProxyByResourceKey(resourceKey);
+       }
+
+       public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException {
+               return clusterLRU.getClusterUIDByResourceKey(resourceKey);
+       }
+
+       public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws DatabaseException {
+               return clusterLRU.getClusterUIDByResourceKeyWithoutMutex(resourceKey);
+       }
+
+       /*
+        * Private implementation
+        * 
+        */
+
+       private static long countFiles(Path directory) throws IOException {
+               try (DirectoryStream<Path> ds = Files.newDirectoryStream(directory)) {
+                       int count = 0;
+                       for (@SuppressWarnings("unused") Path p : ds)
+                               ++count;
+                       return count;
+               }
+       }
+
+       public synchronized boolean makeSnapshot(ServiceLocator locator, boolean force) throws IOException {
+
+               // Maximum autosave frequency is per 60s
+               if(!force && System.nanoTime() - lastSnapshot < 10*1000000000L) {
+//                 System.err.println("lastSnapshot too early");
+                   return false;
+               }
+
+               // Cluster files are always there 
+               // Nothing has been written => no need to do anything
+               long amountOfFiles = countFiles(workingDirectory);
+               if(!force && amountOfFiles < 3) {
+//                 System.err.println("amountOfFiles < 3");
+                   return false;
+               }
+
+               System.err.println("makeSnapshot");
+
+               // Schedule writing of all data to disk
+               refreshHeadState();
+
+               // Wait for all files to be written
+               clusterLRU.shutdown();
+               fileLRU.shutdown();
+               streamLRU.shutdown();
+               csLRU.shutdown();
+               
+               persistHeadState();
+               
+               mainState.save(dbFolder);
+
+               ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class); 
+               cssi.save();
+
+               amountOfFiles = countFiles(workingDirectory);
+               
+               System.err.println(" -finished: amount of files is " + amountOfFiles);
+
+               workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
+               if (!Files.exists(workingDirectory)) {
+                   Files.createDirectories(workingDirectory);
+               }
+
+               cssi.updateReadAndWriteDirectories(lastSessionDirectory, workingDirectory);
+
+               clusterLRU.setWriteDir(workingDirectory);
+               fileLRU.setWriteDir(workingDirectory);
+               streamLRU.setWriteDir(workingDirectory);
+               csLRU.setWriteDir(workingDirectory);
+
+               clusterLRU.resume();
+               fileLRU.resume();
+               streamLRU.resume();
+               csLRU.resume();
+
+               lastSnapshot = System.nanoTime();
+               
+               return true;
+               
+       }
+       
+       public void refreshHeadState() throws IOException {
+
+               state.clusters.clear();
+               state.files.clear();
+               state.stream.clear();
+               state.cs.clear();
+
+               clusterLRU.persist(state.clusters);
+               fileLRU.persist(state.files);
+               streamLRU.persist(state.stream);
+               csLRU.persist(state.cs);
+
+       }
+       
+       public void persistHeadState() throws IOException {
+
+               // Sync current working directory
+               Files.walk(workingDirectory, 1).filter(Files::isRegularFile).forEach(FileIO::uncheckedSyncPath);
+               state.save(workingDirectory);
+               mainState.headDir++;
+       }
+
+       
+//     public void save() throws IOException {
+//
+//             refreshHeadState();
+//             
+//             clusterLRU.shutdown();
+//             fileLRU.shutdown();
+//             streamLRU.shutdown();
+//             csLRU.shutdown();
+//
+//             persistHeadState();
+//
+//             mainState.save(getBaseDirectory());
+
+//             try {
+//                     ThreadLogVisualizer visualizer = new ThreadLogVisualizer();
+//                     visualizer.read(new DataInputStream(new FileInputStream(
+//                                     ThreadLogger.LOG_FILE)));
+//                     visualizer.visualize3(new PrintStream(ThreadLogger.LOG_FILE
+//                                     + ".svg"));
+//             } catch (FileNotFoundException e) {
+//                     // TODO Auto-generated catch block
+//                     e.printStackTrace();
+//             }
+
+               // System.err.println("-- load statistics --");
+               // for(Pair<ClusterUID, Integer> entry :
+               // CollectionUtils.valueSortedEntries(histogram)) {
+               // System.err.println(" " + entry.second + " " + entry.first);
+               // }
+
+//     }
+       
+       private void acquireAll() {
+               clusterLRU.acquireMutex();
+               fileLRU.acquireMutex();
+               streamLRU.acquireMutex();
+               csLRU.acquireMutex();
+       }
+       
+       private void releaseAll() {
+               csLRU.releaseMutex();
+               streamLRU.releaseMutex();
+               fileLRU.releaseMutex();
+               clusterLRU.releaseMutex();
+       }
+
+       public void load() throws IOException {
+
+               // Main state
+               mainState = MainState.load(dbFolder);
+
+               lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1));
+               
+               // Head State
+               try {
+            state = HeadState.load(lastSessionDirectory);
+        } catch (InvalidHeadStateException e) {
+            // For backwards compatibility only!
+            Throwable cause = e.getCause();
+            if (cause instanceof Throwable) {
+                try {
+                    org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory);
+                    
+                    HeadState newState = new HeadState();
+                    newState.clusters = oldState.clusters;
+                    newState.cs = oldState.cs;
+                    newState.files = oldState.files;
+                    newState.stream = oldState.stream;
+                    newState.headChangeSetId = oldState.headChangeSetId;
+                    newState.reservedIds = oldState.reservedIds;
+                    newState.transactionId = oldState.transactionId;
+                    state = newState;
+                } catch (InvalidHeadStateException e1) {
+                    throw new IOException("Could not load HeadState due to corruption", e1);
+                }
+            } else {
+                // This should never happen as MainState.load() checks the integrity
+                // of head.state files and rolls back in cases of corruption until a
+                // consistent state is found (could be case 0 - initial db state)
+                // IF this does happen something is completely wrong
+                throw new IOException("Could not load HeadState due to corruption", e);
+            }
+        }
+
+               workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
+               Files.createDirectories(workingDirectory);
+
+               csLRU = new LRU<Long, ChangeSetInfo>("Change Set", workingDirectory);
+               streamLRU = new LRU<String, ClusterStreamChunk>("Cluster Stream", workingDirectory);
+               clusterLRU = new ClusterLRU(this, "Cluster", workingDirectory);
+               fileLRU = new LRU<String, FileInfo>("External Value", workingDirectory);
+
+               acquireAll();
+               
+               // Clusters
+               for (String clusterKey : state.clusters) {
+                       String[] parts1 = clusterKey.split("#");
+                       String[] parts = parts1[0].split("\\.");
+                       long first = new BigInteger(parts[0], 16).longValue();
+                       long second = new BigInteger(parts[1], 16).longValue();
+                       ClusterUID uuid = ClusterUID.make(first, second);
+                       Path readDir = dbFolder.resolve(parts1[1]);
+                       int offset = Integer.parseInt(parts1[2]);
+                       int length = Integer.parseInt(parts1[3]);
+                       clusterLRU.map(new ClusterInfo(this, clusterLRU, readDir, uuid, offset, length));
+               }
+               // Files
+               for (String fileKey : state.files) {
+//                     System.err.println("loadFile: " + fileKey);
+                       String[] parts = fileKey.split("#");
+                       Path readDir = dbFolder.resolve(parts[1]);
+                       int offset = Integer.parseInt(parts[2]);
+                       int length = Integer.parseInt(parts[3]);
+                       FileInfo info = new FileInfo(fileLRU, readDir, parts[0], offset, length);
+                       fileLRU.map(info);
+               }
+               // Update chunks
+               for (String fileKey : state.stream) {
+//                     System.err.println("loadStream: " + fileKey);
+                       String[] parts = fileKey.split("#");
+                       Path readDir = dbFolder.resolve(parts[1]);
+                       int offset = Integer.parseInt(parts[2]);
+                       int length = Integer.parseInt(parts[3]);
+                       ClusterStreamChunk info = new ClusterStreamChunk(this,
+                                       streamLRU, readDir, parts[0], offset, length);
+                       streamLRU.map(info);
+               }
+               // Change sets
+               for (String fileKey : state.cs) {
+                       String[] parts = fileKey.split("#");
+                       Path readDir = dbFolder.resolve(parts[1]);
+                       Long revisionId = Long.parseLong(parts[0]);
+                       int offset = Integer.parseInt(parts[2]);
+                       int length = Integer.parseInt(parts[3]);
+                       ChangeSetInfo info = new ChangeSetInfo(csLRU, readDir, revisionId, offset, length);
+                       csLRU.map(info);
+               }
+               
+               releaseAll();
+
+       }
+
+       public <T> T clone(ClusterUID uid, ClusterCreator creator)
+                       throws DatabaseException {
+               
+               clusterLRU.ensureUpdates(uid);
+               
+               ClusterInfo info = clusterLRU.getWithoutMutex(uid);
+               return info.clone(uid, creator);
+
+       }
+
+       //private int loadCounter = 0;
+
+       public static void startLog(String msg) {
+               tasks.put(msg, ThreadLogger.getInstance().begin(msg));
+       }
+
+       public static void endLog(String msg) {
+               ITask task = tasks.get(msg);
+               if (task != null)
+                       task.finish();
+       }
+
+       static Map<String, ITask> tasks = new HashMap<String, ITask>();
+
+       public void update(ClusterUID uid, ClusterImpl clu) {
+
+               ClusterInfo info = clusterLRU.getWithoutMutex(uid);
+               info.acquireMutex();
+               try {
+                       info.update(clu);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+               
+       }
+
+       public long getClusterIdOrCreate(ClusterUID clusterUID) {
+               return 1;
+       }
+
+       public int getResourceKey(ClusterUID uid, int index) {
+               return clusterLRU.getResourceKey(uid, index);
+       }
+
+       public int getResourceKeyWitoutMutex(ClusterUID uid, int index) {
+               return clusterLRU.getResourceKeyWithoutMutex(uid, index);
+       }
+
+       public ClusterIds getClusterIds() throws ProCoreException {
+
+               clusterLRU.acquireMutex();
+
+               try {
+
+                       Collection<ClusterInfo> infos = clusterLRU.values();
+                       final int status = infos.size();
+                       final long[] firsts = new long[status];
+                       final long[] seconds = new long[status];
+
+                       int index = 0;
+                       for (ClusterInfo info : infos) {
+                               firsts[index] = 0;
+                               seconds[index] = info.getKey().second;
+                               index++;
+                       }
+
+                       return new ClusterIds() {
+
+                               @Override
+                               public int getStatus() {
+                                       return status;
+                               }
+
+                               @Override
+                               public long[] getFirst() {
+                                       return firsts;
+                               }
+
+                               @Override
+                               public long[] getSecond() {
+                                       return seconds;
+                               }
+
+                       };
+
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       clusterLRU.releaseMutex();
+               }
+
+       }
+
+       public void addIntoCurrentChangeSet(String ccs) {
+               
+               csLRU.acquireMutex();
+
+               try {
+                       
+                       currentChanges.add(ccs);
+                       
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       
+                       csLRU.releaseMutex();
+                       
+               }
+
+       }
+
+       public void commitChangeSet(long changeSetId, byte[] data) {
+               csLRU.acquireMutex();
+               try {
+                       ArrayList<String> csids = new ArrayList<String>(currentChanges);
+                       currentChanges = new ArrayList<String>();
+                       new ChangeSetInfo(csLRU, changeSetId, data, csids);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       csLRU.releaseMutex();
+               }
+       }
+
+       public byte[] getMetadata(long changeSetId) {
+               
+               ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId);
+               if (info == null) return null;
+               info.acquireMutex();
+               try {
+                       return info.getMetadataBytes();
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+               
+       }
+
+       public byte[] getResourceFile(final byte[] clusterUID,
+                       final int resourceIndex) throws ProCoreException {
+
+               ClusterUID uid = ClusterUID.make(clusterUID, 0);
+               String key = uid.toString() + "_" + resourceIndex;
+               FileInfo info = fileLRU.getWithoutMutex(key);
+               if(info == null) return null;
+               info.acquireMutex();
+               try {
+                       return info.getResourceFile();
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+
+       }
+
+       public ResourceSegment getResourceSegment(final byte[] clusterUID,
+                       final int resourceIndex, final long segmentOffset, short segmentSize)
+                       throws ProCoreException {
+
+               ClusterUID uid = ClusterUID.make(clusterUID, 0);
+
+               String key = uid.toString() + "_" + resourceIndex;
+               FileInfo info = fileLRU.getWithoutMutex(key);
+               if(info == null) return null;
+               info.acquireMutex();
+               try {
+                       return info.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+
+       }
+
+       public void modiFileEx(ClusterUID uid, int resourceKey, long offset,
+                       long size, byte[] bytes, long pos, ClusterSupport support) {
+
+               try {
+
+                       String key = uid.toString()
+                                       + "_"
+                                       + ClusterTraits
+                                                       .getResourceIndexFromResourceKey(resourceKey);
+
+                       FileInfo info = null;
+
+                       fileLRU.acquireMutex();
+
+                       try {
+
+                               info = fileLRU.get(key);
+                               if (info == null)
+                                       info = new FileInfo(fileLRU, key, (int) (offset + size));
+                               
+                               
+                       } catch (Throwable t) {
+                               throw new IllegalStateException(t);
+                       } finally {
+                               
+                               fileLRU.releaseMutex();
+                               
+                       }
+                       
+                       info.acquireMutex();
+                       try {
+                               info.updateData(bytes, offset, pos, size);
+                       } catch (Throwable t) {
+                               throw new IllegalStateException(t);
+                       } finally {
+                               info.releaseMutex();
+                       }
+
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+
+       }
+
+    public void shutdown() {
+        clusterLRU.shutdown();
+        fileLRU.shutdown();
+        streamLRU.shutdown();
+        csLRU.shutdown();
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java
new file mode 100644 (file)
index 0000000..8d0bac2
--- /dev/null
@@ -0,0 +1,43 @@
+package org.simantics.acorn;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+
+public class ExternalizableExample implements Externalizable {
+
+    public int first;
+    private long second;
+    
+    public ExternalizableExample(int first, long second) {
+        this.first = first;
+        this.second = second;
+    }
+    
+    @Override
+    public void writeExternal(ObjectOutput out) throws IOException {
+        out.writeInt(first);
+        out.writeLong(second);
+    }
+
+    @Override
+    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        
+    }
+
+    
+    public static void main(String[] args) {
+        Externalizable test = new ExternalizableExample(123, 3456);
+        
+        try (ObjectOutputStream stream = new ObjectOutputStream(Files.newOutputStream(Paths.get("C:/Users/Jani Simomaa/Desktop/test"), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING))) {
+            stream.writeObject(test);
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java
new file mode 100644 (file)
index 0000000..aa71732
--- /dev/null
@@ -0,0 +1,142 @@
+package org.simantics.acorn;
+
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.SeekableByteChannel;
+import java.nio.file.Files;
+import java.nio.file.OpenOption;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.nio.file.attribute.FileAttribute;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.simantics.databoard.file.RuntimeIOException;
+
+public class FileIO {
+       
+    private static final FileAttribute<?>[] NO_ATTRIBUTES = new FileAttribute[0];
+    
+    private static final Set<OpenOption> CREATE_OPTIONS = new HashSet<>(2);
+    private static final Set<OpenOption> APPEND_OPTIONS = new HashSet<>(1);
+    
+    static {
+        CREATE_OPTIONS.add(StandardOpenOption.WRITE);
+        CREATE_OPTIONS.add(StandardOpenOption.CREATE);
+        
+        APPEND_OPTIONS.add(StandardOpenOption.APPEND);
+    }
+    
+       private Path path;
+       private int writePosition = 0;
+
+       private FileIO(Path path) {
+               this.path = path;
+       }
+       
+       private static Map<Path, FileIO> map = new HashMap<Path, FileIO>();
+       
+       public static FileIO get(Path path) {
+               synchronized(map) {
+                       FileIO existing = map.get(path);
+                       if(existing == null) {
+                               existing = new FileIO(path);
+                               map.put(path, existing);
+                       }
+                       return existing;
+               }
+       }
+       
+       //private static final boolean TRACE_SWAP = false;
+       private static final boolean TRACE_PERF = false;
+
+       public synchronized int saveBytes(byte[] bytes, int length, boolean overwrite) throws IOException {
+               if(overwrite) writePosition = 0;
+               int result = writePosition;
+               long start = System.nanoTime();
+               Set<OpenOption> options = writePosition == 0 ? CREATE_OPTIONS : APPEND_OPTIONS;
+               
+               ByteBuffer bb = ByteBuffer.wrap(bytes, 0, length);
+               try (FileChannel fc = FileChannel.open(path, options, NO_ATTRIBUTES)) {
+            fc.write(bb);
+               }
+               
+        writePosition += length;
+               if(TRACE_PERF) {
+                       long duration = System.nanoTime()-start;
+                       double ds = 1e-9*duration;
+                       System.err.println("Wrote " + bytes.length + " bytes @ " + 1e-6*bytes.length / ds + "MB/s");
+               }
+               return result;
+       }
+
+    public synchronized byte[] readBytes(int offset, int length) throws IOException {
+        long start = System.nanoTime();
+        try (SeekableByteChannel channel = Files.newByteChannel(path)) {
+            channel.position(offset);
+            ByteBuffer buf = ByteBuffer.allocate(length);
+            int read = 0;
+            while (read < length) {
+                read += channel.read(buf);
+            }
+            byte[] result = buf.array();
+            if (result.length != length)
+                System.err.println("faa");
+            if (TRACE_PERF) {
+                long duration = System.nanoTime() - start;
+                double ds = 1e-9 * duration;
+                System.err.println("Read " + result.length + " bytes @ " + 1e-6 * result.length / ds + "MB/s");
+            }
+            return result;
+        }
+    }
+
+       public static void syncPath(Path f) throws IOException {
+               // Does not seem to need 's' according to unit test in Windows
+               try (RandomAccessFile raf = new RandomAccessFile(f.toFile(), "rw")) {
+                       raf.getFD().sync();
+               }
+       }
+
+       static void uncheckedSyncPath(Path f) {
+               try {
+                       syncPath(f);
+               } catch (IOException e) {
+                       throw new RuntimeIOException(e);
+               }
+       }
+
+       public static void main(String[] args) throws Exception {
+
+               byte[] buf = new byte[1024*1024];
+               
+               long s = System.nanoTime();
+               
+               Path test = Paths.get("e:/work/test.dat");
+               OutputStream fs = Files.newOutputStream(test);
+               OutputStream os = new BufferedOutputStream(fs, 128*1024);
+               
+               for(int i=0;i<40;i++) {
+                       os.write(buf);
+               }
+               
+               os.flush();
+               //fs.getFD().sync();
+               os.close();
+               
+               syncPath(test);
+               
+               long duration = System.nanoTime()-s;
+               System.err.println("Took " + 1e-6*duration + "ms.");
+               
+               
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java
new file mode 100644 (file)
index 0000000..774b605
--- /dev/null
@@ -0,0 +1,713 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterUpdateProcessorBase;
+import org.simantics.acorn.internal.UndoClusterUpdateProcessor;
+import org.simantics.acorn.lru.ClusterInfo;
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.ClusterUpdateOperation;
+import org.simantics.acorn.lru.ClusterChangeSet.Entry;
+import org.simantics.db.ClusterCreator;
+import org.simantics.db.Database;
+import org.simantics.db.ServiceLocator;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.SDBException;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.db.service.ClusterSetsSupport;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Pair;
+import org.simantics.utils.logging.TimeLogger;
+
+import gnu.trove.map.hash.TLongObjectHashMap;
+
+public class GraphClientImpl2 implements Database.Session {
+       
+       public static final boolean DEBUG = false;
+
+       public final ClusterManager clusters;
+       
+       private TransactionManager transactionManager = new TransactionManager();
+       private ExecutorService executor = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Main Program", false));
+       private ExecutorService saver = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Snapshot Saver", true));
+
+       private static GraphClientImpl2 INSTANCE;
+       private Path dbFolder;
+       private final Database database;
+       private ServiceLocator locator;
+       private MainProgram mainProgram;
+
+       static class ClientThreadFactory implements ThreadFactory {
+               
+               final String name;
+               final boolean daemon;
+               
+               public ClientThreadFactory(String name, boolean daemon) {
+                       this.name = name;
+                       this.daemon = daemon;
+               }
+               
+               @Override
+               public Thread newThread(Runnable r) {
+                       Thread thread = new Thread(r, name);
+                       thread.setDaemon(daemon);
+                       return thread;
+               }
+       }
+
+       public GraphClientImpl2(Database database, Path dbFolder, ServiceLocator locator) throws IOException {
+           this.database = database;
+           this.dbFolder = dbFolder;
+           this.locator = locator;
+           this.clusters = new ClusterManager(dbFolder);
+           load();
+           ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class); 
+           cssi.updateReadAndWriteDirectories(clusters.lastSessionDirectory, clusters.workingDirectory);
+           mainProgram = new MainProgram(this, clusters);
+           executor.execute(mainProgram);
+           INSTANCE = this;
+       }
+
+       public Path getDbFolder() {
+           return dbFolder;
+       }
+
+       public void tryMakeSnapshot() throws IOException {
+               
+           if (isClosing)
+               return;
+           
+               saver.execute(new Runnable() {
+
+                       @Override
+                       public void run() {
+                               Transaction tr = null;
+                               try {
+                                       // First take a write transaction
+                                       tr = askWriteTransaction(-1);
+                                       // Then make sure that MainProgram is idling
+                                       mainProgram.mutex.acquire();
+                                       try {
+                                               synchronized(mainProgram) {
+                                                       if(mainProgram.operations.isEmpty()) {
+                                                               makeSnapshot(false);
+                                                       } else {
+                                                               // MainProgram is becoming busy again - delay snapshotting
+                                                               return;
+                                                       }
+                                               }
+                                       } finally {
+                                               mainProgram.mutex.release();
+                                       }
+                               } catch (IOException e) {
+                                       Logger.defaultLogError(e);
+                               } catch (ProCoreException e) {
+                                       Logger.defaultLogError(e);
+                               } catch (InterruptedException e) {
+                                       Logger.defaultLogError(e);
+                               } finally {
+                                       try {
+                                               if(tr != null)
+                                                       endTransaction(tr.getTransactionId());
+                                       } catch (ProCoreException e) {
+                                               Logger.defaultLogError(e);
+                                       }
+                               }
+                       }
+                       
+               });
+       }
+       
+    public void makeSnapshot(boolean force) throws IOException {
+        if (safeToMakeSnapshot)
+            clusters.makeSnapshot(locator, force);
+    }
+       
+       public <T> T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException {
+               return clusters.clone(uid, creator);
+       }
+
+//     private void save() throws IOException {
+//             clusters.save();
+//     }
+       
+       public void load() throws IOException {
+               clusters.load();
+       }
+       
+//     public void modiFileEx(ClusterUID uid, int resourceKey, long offset, long size, byte[] bytes, long pos, ClusterSupport support) {
+//             clusters.modiFileEx(uid, resourceKey, offset, size, bytes, pos, support);
+//     }
+
+       @Override
+       public Database getDatabase() {
+               return database;
+       }
+
+       private boolean closed = false;
+       private boolean isClosing = false;
+       // Add check to make sure if it safe to make snapshot (used with cancel which is not yet supported and may cause corrupted head.state writing)
+    private boolean safeToMakeSnapshot = true;
+       
+       @Override
+       public void close() throws ProCoreException {
+           System.err.println("Closing " + this + " and mainProgram " + mainProgram);
+               if(!closed && !isClosing) {
+                   isClosing = true;
+                       try {
+                           makeSnapshot(true);
+                               
+                               mainProgram.close();
+                               clusters.shutdown();
+                               executor.shutdown();
+                               saver.shutdown();
+                               boolean executorTerminated = executor.awaitTermination(500, TimeUnit.MILLISECONDS);
+                               boolean saverTerminated = saver.awaitTermination(500, TimeUnit.MILLISECONDS);
+                               
+                               System.err.println("executorTerminated=" + executorTerminated + ", saverTerminated=" + saverTerminated);
+                               
+                               INSTANCE = null;
+                               mainProgram = null;
+                               executor = null;
+                               saver = null;
+                               
+                       } catch (IOException | InterruptedException e) {
+                               throw new ProCoreException(e);
+                       }
+               }
+               closed = true;
+               //impl.close();
+       }
+
+       @Override
+       public void open() throws ProCoreException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public boolean isClosed() throws ProCoreException {
+               return closed;
+       }
+       
+       @Override
+       public void acceptCommit(long transactionId, long changeSetId, byte[] metadata) throws ProCoreException {
+               
+               clusters.state.headChangeSetId++;
+
+               long committedChangeSetId = changeSetId + 1;
+               
+               clusters.commitChangeSet(committedChangeSetId, metadata);
+               
+               clusters.state.transactionId = transactionId;
+               
+               mainProgram.committed();
+               
+               TimeLogger.log("Accepted commit");
+               
+       }
+
+       @Override
+       public long cancelCommit(long transactionId, long changeSetId,
+                       byte[] metadata, OnChangeSetUpdate onChangeSetUpdate)
+                       throws ProCoreException {
+           safeToMakeSnapshot = false;
+           throw new UnsupportedOperationException("org.simantics.acorn.GraphClientImpl2.cancelCommit() is not supported operation! Closing down to prevent further havoc");
+//         System.err.println("GraphClientImpl2.cancelCommit() called!! this is experimental and might cause havoc!");
+//         try {
+//            undo(new long[] {changeSetId}, onChangeSetUpdate);
+//        } catch (SDBException e) {
+//            e.printStackTrace();
+//            throw new ProCoreException(e);
+//        }
+//         clusters.state.headChangeSetId++;
+//         return clusters.state.headChangeSetId;
+       }
+
+       @Override
+       public Transaction askReadTransaction() throws ProCoreException {
+               return transactionManager.askReadTransaction();
+       }
+
+       enum TransactionState {
+               IDLE,WRITE,READ
+       }
+       
+       class TransactionRequest {
+               public TransactionState state;
+               public Semaphore semaphore;
+               public TransactionRequest(TransactionState state, Semaphore semaphore) {
+                       this.state = state;
+                       this.semaphore = semaphore;
+               }
+       }
+
+       class TransactionManager {
+
+               private TransactionState currentTransactionState = TransactionState.IDLE;
+               
+               private int reads = 0;
+               
+               LinkedList<TransactionRequest> requests = new LinkedList<TransactionRequest>();
+               
+               TLongObjectHashMap<TransactionRequest> requestMap = new TLongObjectHashMap<TransactionRequest>();
+               
+               private synchronized Transaction makeTransaction(TransactionRequest req) {
+                       
+                       final int csId = clusters.state.headChangeSetId;
+                       final long trId = clusters.state.transactionId+1;
+                       requestMap.put(trId, req);
+                       return new Transaction() {
+                               
+                               @Override
+                               public long getTransactionId() {
+                                       return trId;
+                               }
+                               
+                               @Override
+                               public long getHeadChangeSetId() {
+                                       return csId;
+                               }
+                       };
+               }
+               
+               /*
+                * This method cannot be synchronized since it waits and must support multiple entries
+                * by query thread(s) and internal transactions such as snapshot saver
+                */
+               public Transaction askReadTransaction() throws ProCoreException {
+               
+                       Semaphore semaphore = new Semaphore(0);
+                       
+                       TransactionRequest req = queue(TransactionState.READ, semaphore);
+                       
+                       try {
+                               semaphore.acquire();
+                       } catch (InterruptedException e) {
+                               throw new ProCoreException(e);
+                       }
+                       
+                       return makeTransaction(req);
+
+               }
+
+               private synchronized void dispatch() {
+                       TransactionRequest r = requests.removeFirst();
+                       if(r.state == TransactionState.READ) reads++;
+                       r.semaphore.release();
+               }
+               
+               private synchronized void processRequests() {
+                       
+                       while(true) {
+
+                               if(requests.isEmpty()) return;
+                               TransactionRequest req = requests.peek();
+
+                               if(currentTransactionState == TransactionState.IDLE) {
+                               
+                                       // Accept anything while IDLE
+                                       currentTransactionState = req.state;
+                                       dispatch();
+                                       
+                               } else if (currentTransactionState == TransactionState.READ) {
+                                       
+                                       if(req.state == currentTransactionState) {
+
+                                               // Allow other reads
+                                               dispatch();
+
+                                       } else {
+                                               
+                                               // Wait
+                                               return;
+                                               
+                                       }
+                                       
+                               }  else if (currentTransactionState == TransactionState.WRITE) {
+
+                                       // Wait
+                                       return;
+                                       
+                               }
+                               
+                       }
+                       
+               }
+               
+               private synchronized TransactionRequest queue(TransactionState state, Semaphore semaphore) {
+                       TransactionRequest req = new TransactionRequest(state, semaphore); 
+                       requests.addLast(req);
+                       processRequests();
+                       return req;
+               }
+               
+               /*
+                * This method cannot be synchronized since it waits and must support multiple entries
+                * by query thread(s) and internal transactions such as snapshot saver
+                */
+               public Transaction askWriteTransaction()
+                               throws ProCoreException {
+                       
+                       Semaphore semaphore = new Semaphore(0);
+                       
+                       TransactionRequest req = queue(TransactionState.WRITE, semaphore);
+                       
+                       try {
+                               semaphore.acquire();
+                       } catch (InterruptedException e) {
+                               throw new ProCoreException(e);
+                       }
+                       
+                       mainProgram.startTransaction(clusters.state.headChangeSetId+1);
+                       
+                       return makeTransaction(req);
+                       
+               }
+               
+               public synchronized long endTransaction(long transactionId) throws ProCoreException {
+                       
+                       TransactionRequest req = requestMap.remove(transactionId);
+                       if(req.state == TransactionState.WRITE) {
+                               currentTransactionState = TransactionState.IDLE;
+                               processRequests();
+                       } else {
+                               reads--;
+                               if(reads == 0) {
+                                       currentTransactionState = TransactionState.IDLE;
+                                       processRequests();
+                               }
+                       }
+                       return clusters.state.transactionId;
+               }
+
+       }
+       
+       @Override
+       public Transaction askWriteTransaction(final long transactionId)
+                       throws ProCoreException {
+               return transactionManager.askWriteTransaction();
+       }
+
+       @Override
+       public long endTransaction(long transactionId) throws ProCoreException {
+               return transactionManager.endTransaction(transactionId);
+       }
+
+       @Override
+       public String execute(String command) throws ProCoreException {
+               // This is called only by WriteGraphImpl.commitAccessorChanges
+               // We can ignore this in Acorn
+               return "";
+       }
+
+       @Override
+       public byte[] getChangeSetMetadata(long changeSetId)
+                       throws ProCoreException {
+               return clusters.getMetadata(changeSetId);
+       }
+
+       @Override
+       public ChangeSetData getChangeSetData(long minChangeSetId,
+                       long maxChangeSetId, OnChangeSetUpdate onChangeSetupate)
+                       throws ProCoreException {
+               
+               new Exception("GetChangeSetDataFunction " + minChangeSetId + " " + maxChangeSetId).printStackTrace();;
+               return null;
+               
+       }
+
+       @Override
+       public ChangeSetIds getChangeSetIds() throws ProCoreException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public Cluster getCluster(byte[] clusterId) throws ProCoreException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterChanges getClusterChanges(long changeSetId, byte[] clusterId)
+                       throws ProCoreException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterIds getClusterIds() throws ProCoreException {
+               return clusters.getClusterIds();
+       }
+
+       @Override
+       public Information getInformation() throws ProCoreException {
+               return new Information() {
+
+                       @Override
+                       public String getServerId() {
+                               return "server";
+                       }
+
+                       @Override
+                       public String getProtocolId() {
+                               return "";
+                       }
+
+                       @Override
+                       public String getDatabaseId() {
+                               return "database";
+                       }
+
+                       @Override
+                       public long getFirstChangeSetId() {
+                               return 0;
+                       }
+                       
+               };
+       }
+
+       @Override
+       public Refresh getRefresh(long changeSetId) throws ProCoreException {
+               
+               final ClusterIds ids = getClusterIds();
+               
+               return new Refresh() {
+
+                       @Override
+                       public long getHeadChangeSetId() {
+                               return clusters.state.headChangeSetId;
+                       }
+
+                       @Override
+                       public long[] getFirst() {
+                               return ids.getFirst();
+                       }
+
+                       @Override
+                       public long[] getSecond() {
+                               return ids.getSecond();
+                       }
+                       
+               };
+               
+       }
+
+       public byte[] getResourceFile(final byte[] clusterUID, final int resourceIndex) throws ProCoreException {
+               return clusters.getResourceFile(clusterUID, resourceIndex);
+       }
+
+       @Override
+       public ResourceSegment getResourceSegment(final byte[] clusterUID,
+                       final int resourceIndex, final long segmentOffset, short segmentSize) throws ProCoreException {
+               
+               return clusters.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize);
+
+       }
+
+       @Override
+       public long reserveIds(int count) throws ProCoreException {
+               return clusters.state.reservedIds++;
+       }
+
+       @Override
+       public void updateCluster(byte[] operations) throws ProCoreException {
+
+               ClusterUpdateOperation operation = new ClusterUpdateOperation(clusters, operations);
+               ClusterInfo info = clusters.clusterLRU.getOrCreate(operation.uid, true);
+               if(info == null) throw new IllegalStateException();
+               info.acquireMutex();
+               try {
+                       info.scheduleUpdate();
+                       mainProgram.schedule(operation);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+
+       }
+
+       private UndoClusterUpdateProcessor getUndoCSS(String ccsId) throws DatabaseException {
+
+               String[] ss = ccsId.split("\\.");
+               String chunkKey = ss[0];
+               int chunkOffset = Integer.parseInt(ss[1]);
+               ClusterStreamChunk chunk = clusters.streamLRU.getWithoutMutex(chunkKey);
+               if(chunk == null) throw new IllegalStateException("Cluster Stream Chunk " + chunkKey + " was not found.");
+               chunk.acquireMutex();
+               try {
+                       return chunk.getUndoProcessor(clusters, chunkOffset, ccsId);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       chunk.releaseMutex();
+               }
+               
+       }
+       
+       private void performUndo(String ccsId, ArrayList<Pair<ClusterUID, byte[]>> clusterChanges, UndoClusterSupport support) throws ProCoreException, DatabaseException {
+
+               UndoClusterUpdateProcessor proc = getUndoCSS(ccsId);
+
+               int clusterKey = clusters.getClusterKeyByClusterUIDOrMakeWithoutMutex(proc.getClusterUID());
+
+               clusters.clusterLRU.acquireMutex();
+               try {
+
+                       ClusterChange cs = new ClusterChange(clusterChanges, proc.getClusterUID());
+                       for(int i=0;i<proc.entries.size();i++) {
+                               
+                               Entry e = proc.entries.get(proc.entries.size() - 1 - i);
+                               e.process(clusters, cs, clusterKey);
+                               
+                       }
+                       
+                       cs.flush();
+
+               } finally {
+                       clusters.clusterLRU.releaseMutex();
+               }
+               
+       }
+       
+       @Override
+       public boolean undo(long[] changeSetIds, OnChangeSetUpdate onChangeSetUpdate) throws SDBException {
+               
+               final ArrayList<Pair<ClusterUID, byte[]>> clusterChanges = new ArrayList<Pair<ClusterUID, byte[]>>();
+               
+               UndoClusterSupport support = new UndoClusterSupport(clusters);
+               
+               final int changeSetId = clusters.state.headChangeSetId;
+               
+               if(ClusterUpdateProcessorBase.DEBUG)
+                       System.err.println(" === BEGIN UNDO ===");
+               
+               for(int i=0;i<changeSetIds.length;i++) {
+                       final long id = changeSetIds[changeSetIds.length-1-i];
+                       ArrayList<String> ccss = clusters.getChanges(id);
+                       for(int j=0;j<ccss.size();j++) {
+                               try {
+                                       if(ClusterUpdateProcessorBase.DEBUG)
+                                               System.err.println("performUndo " + ccss.get(ccss.size()-j-1));
+                                       performUndo(ccss.get(ccss.size()-j-1), clusterChanges, support);
+                               } catch (DatabaseException e) {
+                                       e.printStackTrace();
+                               }
+                       }
+               }
+
+               if(ClusterUpdateProcessorBase.DEBUG)
+                       System.err.println(" === END UNDO ===");
+
+               for(int i=0;i<clusterChanges.size();i++) {
+                       
+                       final int changeSetIndex = i;
+                       
+                       final Pair<ClusterUID, byte[]> pair = clusterChanges.get(i);
+                       
+                       final ClusterUID cuid = pair.first;
+                       final byte[] data = pair.second;
+
+                       onChangeSetUpdate.onChangeSetUpdate(new ChangeSetUpdate() {
+
+                               @Override
+                               public long getChangeSetId() {
+                                       return changeSetId;
+                               }
+
+                               @Override
+                               public int getChangeSetIndex() {
+                                       return 0;
+                               }
+
+                               @Override
+                               public int getNumberOfClusterChangeSets() {
+                                       return clusterChanges.size();
+                               }
+
+                               @Override
+                               public int getIndexOfClusterChangeSet() {
+                                       return changeSetIndex;
+                               }
+
+                               @Override
+                               public byte[] getClusterId() {
+                                       return cuid.asBytes();
+                               }
+
+                               @Override
+                               public boolean getNewCluster() {
+                                       return false;
+                               }
+
+                               @Override
+                               public byte[] getData() {
+                                       return data;
+                               }
+
+                       });
+
+               }
+
+               
+               return false;
+               
+       }
+       
+       public static GraphClientImpl2 getInstance() {
+           return INSTANCE;
+       }
+       
+       public ServiceLocator getServiceLocator() {
+           return locator;
+       }
+
+    @Override
+    public boolean refreshEnabled() {
+        return false;
+    }
+
+       
+       
+       
+       
+       
+       
+       
+       
+       
+       
+       ////////////////////////
+       
+       
+       
+       
+       
+       
+       
+       
+       
+       
+       
+       
+}
+
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java
new file mode 100644 (file)
index 0000000..c20d8e8
--- /dev/null
@@ -0,0 +1,104 @@
+package org.simantics.acorn;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import org.simantics.databoard.Bindings;
+import org.simantics.databoard.binding.mutable.MutableVariant;
+import org.simantics.databoard.serialization.Serializer;
+import org.simantics.databoard.util.binary.BinaryMemory;
+
+public class HeadState {
+
+    public int headChangeSetId = 0;
+    public long transactionId = 1;
+    public long reservedIds = 3;
+
+    public ArrayList<String> clusters = new ArrayList<>();
+    public ArrayList<String> files = new ArrayList<>();
+    public ArrayList<String> stream = new ArrayList<>();
+    public ArrayList<String> cs = new ArrayList<>();
+//    public ArrayList<String> ccs = new ArrayList<String>();
+
+    public static HeadState load(Path directory) throws InvalidHeadStateException {
+        Path f = directory.resolve("head.state");
+        
+        try {
+            byte[] bytes = Files.readAllBytes(f);
+            MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+            int digestLength = sha1.getDigestLength();
+            
+            sha1.update(bytes, digestLength, bytes.length - digestLength);
+            byte[] newChecksum = sha1.digest();
+            if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) {
+                throw new InvalidHeadStateException(
+                        "Checksum " + Arrays.toString(newChecksum) + " does not match excpected "
+                                + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + f.toAbsolutePath());
+            }
+            
+            HeadState object = (HeadState) org.simantics.databoard.Files.readFile(new ByteArrayInputStream(bytes, digestLength, bytes.length - digestLength), Bindings.getBindingUnchecked(HeadState.class));
+            return object;
+            
+        } catch (IOException i) {
+            return new HeadState();
+//            throw new InvalidHeadStateException(i);
+        } catch (NoSuchAlgorithmException e) {
+            throw new Error("SHA-1 Algorithm not found", e);
+        } catch (Throwable t) {
+            throw new InvalidHeadStateException(t);
+        }
+    }
+    
+    public void save(Path directory) throws IOException {
+        Path f = directory.resolve("head.state");
+        try {
+            BinaryMemory rf = new BinaryMemory(4096);
+            try {
+                MutableVariant v = new MutableVariant(Bindings.getBindingUnchecked(HeadState.class), this);
+                Serializer s = Bindings.getSerializerUnchecked( Bindings.VARIANT );
+                s.serialize(rf, v);
+            } finally {
+                rf.close();
+            }
+            
+            byte[] bytes = rf.toByteBuffer().array();
+            
+            MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+            sha1.update(bytes);
+            byte[] checksum = sha1.digest();
+            
+            try (OutputStream out = Files.newOutputStream(f)) {
+                out.write(checksum);
+                out.write(bytes);
+            }
+            FileIO.syncPath(f);
+        } catch (NoSuchAlgorithmException e) {
+            throw new Error("SHA-1 digest not found, should not happen", e);
+        }
+    }
+
+    public static void validateHeadStateIntegrity(Path headState) throws InvalidHeadStateException, IOException {
+        try {
+            byte[] bytes = Files.readAllBytes(headState);
+            MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+            int digestLength = sha1.getDigestLength();
+            sha1.update(bytes, digestLength, bytes.length - digestLength);
+            byte[] newChecksum = sha1.digest();
+            if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) {
+                throw new InvalidHeadStateException(
+                        "Checksum " + Arrays.toString(newChecksum) + " does not match excpected "
+                                + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + headState.toAbsolutePath());
+            }
+        } catch (NoSuchAlgorithmException e) {
+            throw new Error("SHA-1 digest not found, should not happen", e);
+        }
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/InvalidHeadStateException.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/InvalidHeadStateException.java
new file mode 100644 (file)
index 0000000..2c342b7
--- /dev/null
@@ -0,0 +1,27 @@
+package org.simantics.acorn;
+
+public class InvalidHeadStateException extends Exception {
+
+    private static final long serialVersionUID = -7291859180968235955L;
+
+    public InvalidHeadStateException() {
+        super();
+    }
+
+    public InvalidHeadStateException(String message, Throwable cause, boolean enableSuppression,
+            boolean writableStackTrace) {
+        super(message, cause, enableSuppression, writableStackTrace);
+    }
+
+    public InvalidHeadStateException(String message, Throwable cause) {
+        super(message, cause);
+    }
+
+    public InvalidHeadStateException(String message) {
+        super(message);
+    }
+
+    public InvalidHeadStateException(Throwable cause) {
+        super(cause);
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java
new file mode 100644 (file)
index 0000000..f39a498
--- /dev/null
@@ -0,0 +1,342 @@
+package org.simantics.acorn;
+
+import java.io.Closeable;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.ClusterUpdateOperation;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.logging.TimeLogger;
+
+public class MainProgram implements Runnable, Closeable {
+
+       private static final int CLUSTER_THREADS = 4;
+       private static final int CHUNK_CACHE_SIZE = 100;
+
+       private final GraphClientImpl2 client;
+       private final ClusterManager clusters;
+       private final ExecutorService[] clusterUpdateThreads;
+    private final List<ClusterUpdateOperation>[] updateSchedules;
+       
+       private int residentOperationBytes = 0;
+       private long currentChangeSetId = -1;
+       private int nextChunkId = 0;
+       private boolean alive = true;
+       private Semaphore deathBarrier = new Semaphore(0);
+
+       final Semaphore mutex = new Semaphore(1);
+       final LinkedList<ClusterStreamChunk> operations = new LinkedList<>();
+
+       static class ClusterThreadFactory implements ThreadFactory {
+
+               final String name;
+               final boolean daemon;
+
+               public ClusterThreadFactory(String name, boolean daemon) {
+                       this.name = name;
+                       this.daemon = daemon;
+               }
+
+               @Override
+               public Thread newThread(Runnable r) {
+                       Thread thread = new Thread(r, name);
+                       thread.setDaemon(daemon);
+                       return thread;
+               }
+       }
+
+       public MainProgram(GraphClientImpl2 client, ClusterManager clusters) {
+
+               this.client = client;
+               this.clusters = clusters;
+               this.clusterUpdateThreads = new ExecutorService[CLUSTER_THREADS];
+               this.updateSchedules = new ArrayList[CLUSTER_THREADS];
+               for(int i=0;i<clusterUpdateThreads.length;i++) {
+                       clusterUpdateThreads[i] = Executors.newSingleThreadExecutor(new ClusterThreadFactory("Cluster Updater " + (i+1), false));
+                       updateSchedules[i] = new ArrayList<ClusterUpdateOperation>();
+               }
+       }
+
+       public void startTransaction(long id) {
+               currentChangeSetId = id;
+               nextChunkId = 0;
+       }
+
+       private static Comparator<ClusterUID> clusterComparator = new Comparator<ClusterUID>() {
+
+               @Override
+               public int compare(ClusterUID o1, ClusterUID o2) {
+                       return Long.compare(o1.second, o2.second);
+               }
+       };
+
+       @Override
+       public void run() {
+               try {
+
+                       mutex.acquire();
+                       main:
+                       while(alive) {
+
+                               TreeMap<ClusterUID, List<ClusterUpdateOperation>> updates = new TreeMap<ClusterUID, List<ClusterUpdateOperation>>(clusterComparator);
+
+                               synchronized(MainProgram.this) {
+
+                                       while(!operations.isEmpty() && updates.size() < 100) {
+
+                                               ClusterStreamChunk chunk = operations.pollFirst();
+
+                                               for(int i=chunk.nextToProcess;i<chunk.operations.size();i++) {
+                                                       ClusterUpdateOperation o = chunk.operations.get(i);
+                                                       ClusterUID uid = o.uid;
+                                                       List<ClusterUpdateOperation> ops = updates.get(uid);
+                                                       if(ops == null) {
+                                                               ops = new ArrayList<ClusterUpdateOperation>();
+                                                               updates.put(uid, ops);
+                                                       }
+                                                       ops.add(o);
+                                               }
+
+                                               chunk.nextToProcess = chunk.operations.size();
+
+                                               if(!chunk.isCommitted()) {
+                                                       assert(operations.isEmpty());
+                                                       operations.add(chunk);
+                                                       break;
+                                               }
+
+                                       }
+
+                                       if(updates.isEmpty()) {
+                                               try {
+                                                       long start = System.nanoTime();
+                                                       mutex.release();
+                                                       MainProgram.this.wait(5000);
+                                                       mutex.acquire();
+                                                       if (!alive)
+                                                               break main;
+                                                       long duration = System.nanoTime()-start;
+                                                       if(duration > 4000000000L) {
+
+                                                               // Was this a time-out or a new stream request?
+                                                               if(operations.isEmpty()) {
+
+                                                                       /*
+                                                                        * We are idling here.
+                                                                        * Flush all caches gradually
+                                                                        */
+
+                                                                       // Write pending cs to disk
+                                                                       boolean written = clusters.csLRU.swapForced();
+                                                                       while(written) {
+                                                                               if(!updates.isEmpty()) break;
+                                                                               written = clusters.csLRU.swapForced();
+                                                                       }
+                                                                       // Write pending chunks to disk
+                                                                       written = clusters.streamLRU.swapForced();
+                                                                       while(written) {
+                                                                               if(!updates.isEmpty()) break;
+                                                                               written = clusters.streamLRU.swapForced();
+                                                                       }
+                                                                       // Write pending files to disk
+                                                                       written = clusters.fileLRU.swapForced();
+                                                                       while(written) {
+                                                                               if(!updates.isEmpty()) break;
+                                                                               written = clusters.fileLRU.swapForced();
+                                                                       }
+                                                                       // Write pending clusters to disk
+                                                                       written = clusters.clusterLRU.swapForced();
+                                                                       while(written) {
+                                                                               if(!updates.isEmpty()) break;
+                                                                               written = clusters.clusterLRU.swapForced();
+                                                                       }
+
+                                                                       client.tryMakeSnapshot();
+                                                               }
+                                                       }
+                                               } catch (InterruptedException e) {
+                                                       e.printStackTrace();
+                                               }
+
+                                       }
+
+                               }
+
+//                             long sss = System.nanoTime();
+
+                               for(int i=0;i<CLUSTER_THREADS;i++)
+                                       updateSchedules[i].clear();
+
+                               final Semaphore s = new Semaphore(0);
+
+                               for(Map.Entry<ClusterUID, List<ClusterUpdateOperation>> entry : updates.entrySet()) {
+                                       ClusterUID key = entry.getKey();
+                                       int hash = key.hashCode() & (clusterUpdateThreads.length-1);
+                                       updateSchedules[hash].addAll(entry.getValue());
+                               }
+
+                               //                              final AtomicLong elapsed = new AtomicLong(0);
+                               int acquireAmount = 0;
+                               for(int i=0;i<CLUSTER_THREADS;i++) {
+                                       final List<ClusterUpdateOperation> ops = updateSchedules[i];
+                                       if (!ops.isEmpty()) {
+                                               acquireAmount++;
+                                               clusterUpdateThreads[i].execute(() -> {
+
+                                                       //long st = System.nanoTime();
+                                                       for(ClusterUpdateOperation op : ops) {
+                                                               op.run();
+                                                       }
+                                                       s.release();
+                                                       //                                                      long duration = System.nanoTime()-st;
+                                                       //                                                      elapsed.addAndGet(duration);
+                                                       //                                                      double dur = 1e-9*duration;
+                                                       //                                                      if(dur > 0.05)
+                                                       //                                                              System.err.println("duration=" + dur + "s. " + ops.size());
+                                               });
+                                       }
+                               }
+
+                               s.acquire(acquireAmount);
+
+                               /*
+                                * Here we are actively processing updates from client.
+                                * Maintain necessary caching here.
+                                */
+
+                               clusters.streamLRU.acquireMutex();
+                               try {
+                                       swapChunks();
+                               } catch (Throwable t) {
+                                       throw new IllegalStateException(t);
+                               } finally {
+                                       clusters.streamLRU.releaseMutex();
+                               }
+                               clusters.csLRU.acquireMutex();
+                               try {
+                                       swapCS();
+                               } catch (Throwable t) {
+                                       throw new IllegalStateException(t);
+                               } finally {
+                                       clusters.csLRU.releaseMutex();
+                               }
+
+                               TimeLogger.log("Performed updates");
+
+                       }
+
+               } catch (Throwable t) {
+                       t.printStackTrace();
+               } finally {
+                       deathBarrier.release();
+               }
+
+       }
+
+       /*
+        * Mutex for streamLRU is assumed here
+        * 
+        */
+       private void swapChunks() {
+
+               // Cache chunks during update operations
+               boolean written = clusters.streamLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+               while(written) {
+                       written = clusters.streamLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+               }
+
+       }
+
+       private void swapCS() {
+
+               // Cache chunks during update operations
+               boolean written = clusters.csLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+               while(written) {
+                       written = clusters.csLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+               }
+
+       }
+
+       public synchronized void committed() {
+
+               ClusterStreamChunk last = operations.isEmpty() ? null : operations.getLast();
+        if (!alive) {
+            System.err.println("Trying to commit operation after MainProgram is closed! Operation is " + last);
+//          return;
+        }
+               if(last != null) last.commit();
+
+       }
+
+       public synchronized void schedule(ClusterUpdateOperation operation) {
+           if (!alive) {
+               System.err.println("Trying to schedule operation after MainProgram is closed! Operation is " + operation);
+//             return;
+           }
+               clusters.streamLRU.acquireMutex();
+
+               try {
+
+                       ClusterStreamChunk last = operations.isEmpty() ? null : operations.getLast();
+                       if(last == null || last.isCommitted()) {
+                               String id = "" + currentChangeSetId + "-" + nextChunkId++;
+                               last = new ClusterStreamChunk(clusters, clusters.streamLRU, id);
+                               operations.add(last);
+                       }
+
+                       String chunkId = last.getKey();
+                       int chunkOffset = last.operations.size();
+                       operation.scheduled(chunkId + "." + chunkOffset);
+
+                       last.addOperation(operation);
+
+                       swapChunks();
+
+                       notifyAll();
+
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+
+                       clusters.streamLRU.releaseMutex();
+
+               }
+
+       }
+
+    @Override
+    public void close() {
+        alive = false;
+        synchronized (this) {
+            notifyAll();
+        }
+        try {
+            deathBarrier.acquire();
+        } catch (InterruptedException e) {
+        }
+
+        for (ExecutorService executor : clusterUpdateThreads)
+            executor.shutdown();
+
+        for (int i = 0; i < clusterUpdateThreads.length; i++) {
+            try {
+                ExecutorService executor  = clusterUpdateThreads[i];
+                executor.awaitTermination(500, TimeUnit.MILLISECONDS);
+                clusterUpdateThreads[i] = null;
+            } catch (InterruptedException e) {
+                e.printStackTrace();
+            }
+        }
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java
new file mode 100644 (file)
index 0000000..7733528
--- /dev/null
@@ -0,0 +1,135 @@
+package org.simantics.acorn;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.simantics.databoard.file.RuntimeIOException;
+import org.simantics.utils.FileUtils;
+
+public class MainState implements Serializable {
+
+    private static final long serialVersionUID = 6237383147637270225L;
+
+    public int headDir = 0;
+
+    public MainState() {
+    }
+
+    public MainState(int headDir) {
+        this.headDir = headDir;
+    }
+
+    public static MainState load(Path directory) throws IOException {
+        Files.createDirectories(directory);
+        Path f = directory.resolve("main.state");
+        try {
+            MainState state = null;
+            try (ObjectInputStream in = new ObjectInputStream(new BufferedInputStream(Files.newInputStream(f)))) {
+                state = (MainState) in.readObject();
+            }
+            while (true) {
+                Path last = directory.resolve(Integer.toString(state.headDir - 1));
+                try {
+                    Path headState = last.resolve("head.state");
+                    HeadState.validateHeadStateIntegrity(headState);
+                    break;
+                } catch (InvalidHeadStateException e) {
+                    e.printStackTrace();
+                    state.headDir--;
+                    uncheckedDeleteAll(last);
+                }
+            }
+            return state;
+        } catch(IOException i) {
+            return new MainState( findNewHeadState(directory) );
+        } catch(ClassNotFoundException c) {
+            throw new Error("MainState class not found", c);
+        } finally {
+            if (Files.exists(f)) {
+                Files.delete(f);
+            }
+        }
+    }
+
+    public void save(Path directory) throws IOException {
+        Path f = directory.resolve("main.state");
+        try (ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream(Files.newOutputStream(f)))) {
+            out.writeObject(this);
+        }
+        FileIO.syncPath(f);
+    }
+
+    private static boolean isInteger(Path p) {
+        try {
+            Integer.parseInt(p.getFileName().toString());
+            return true;
+        } catch (NumberFormatException e) {
+            return false;
+        }
+    }
+
+    /**
+     * TODO> shouldn't do two things in the same function, this does both head.state search and directory cleanup
+     *  
+     * @param directory
+     * @return
+     * @throws IOException
+     */
+    private static int findNewHeadState(Path directory) throws IOException {
+        try (Stream<Path> s = Files.walk(directory, 1)) {
+            List<Path> reverseSortedPaths = s
+            .filter(p -> !p.equals(directory) && isInteger(p) && Files.isDirectory(p))
+            .sorted((p1, p2) -> {
+                int p1Name = Integer.parseInt(p1.getFileName().toString()); 
+                int p2Name = Integer.parseInt(p2.getFileName().toString());
+                return Integer.compare(p2Name, p1Name);
+            }).collect(Collectors.toList());
+
+            int largest = -1;
+            for (Path last : reverseSortedPaths) {
+                Path headState = last.resolve("head.state");
+                if (Files.exists(headState)) {
+                    try {
+                        HeadState.validateHeadStateIntegrity(headState);
+                        largest = safeParseInt(-1, last.getFileName().toString());
+                        break;
+                    } catch (IOException | InvalidHeadStateException e) {
+                        e.printStackTrace();
+                        uncheckedDeleteAll(last);
+                    }
+                } else {
+                    uncheckedDeleteAll(last);
+                }
+            }
+            // +1 because we want to return the next head version to use,
+            // not the latest existing version.
+            return largest + 1;
+        }
+    }
+
+    private static int safeParseInt(int defaultValue, String s) {
+        try {
+            return Integer.parseInt(s);
+        } catch (NumberFormatException e) {
+            return defaultValue;
+        }
+    }
+
+    private static void uncheckedDeleteAll(Path path) {
+        try {
+            FileUtils.deleteAll(path.toFile());
+        } catch (IOException e) {
+            throw new RuntimeIOException(e);
+        }
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java
new file mode 100644 (file)
index 0000000..0d209b2
--- /dev/null
@@ -0,0 +1,11 @@
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+public interface Persistable {
+       
+       void toFile(Path path) throws IOException ;
+       void fromFile(byte[] data);
+       
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java
new file mode 100644 (file)
index 0000000..1e7352c
--- /dev/null
@@ -0,0 +1,170 @@
+package org.simantics.acorn;
+
+import java.io.InputStream;
+
+import org.simantics.db.Session;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.service.ClusterUID;
+
+public class UndoClusterSupport implements ClusterSupport {
+
+       final ClusterManager impl;
+       
+       public UndoClusterSupport(ClusterManager impl) {
+               this.impl = impl;
+       }
+       
+       @Override
+       public int createClusterKeyByClusterUID(ClusterUID clusterUID,
+                       long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterId(long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterKey(int clusterKey) {
+               try {
+            return impl.getClusterByClusterKey(clusterKey);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            return null;
+        }
+       }
+
+       @Override
+       public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByResourceKey(int resourceKey) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public long getClusterIdOrCreate(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void addStatement(Object cluster) {
+       }
+
+       @Override
+       public void cancelStatement(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void removeStatement(Object cluster) {
+       }
+
+       @Override
+       public void cancelValue(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void removeValue(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setValue(Object cluster, long clusterId, byte[] bytes,
+                       int length) {
+       }
+
+       @Override
+       public void modiValue(Object cluster, long clusterId, long voffset,
+                       int length, byte[] bytes, int offset) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setImmutable(Object cluster, boolean immutable) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setDeleted(Object cluster, boolean deleted) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void createResource(Object cluster, short resourceIndex,
+                       long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void addStatementIndex(Object cluster, int resourceKey,
+                       ClusterUID clusterUID, byte op) {
+       }
+
+       @Override
+       public void setStreamOff(boolean setOff) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public boolean getStreamOff() {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public InputStream getValueStreamEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId, long voffset,
+                       int length) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public long getValueSizeEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int wait4RequestsLess(int limit) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public Session getSession() {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public IClusterTable getClusterTable() {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int getClusterKeyByClusterUIDOrMake(long id1, long id2) {
+               throw new UnsupportedOperationException();
+       }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java
new file mode 100644 (file)
index 0000000..5ea0799
--- /dev/null
@@ -0,0 +1,316 @@
+package org.simantics.acorn.backup;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.LinkOption;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.StandardCopyOption;
+import java.nio.file.StandardOpenOption;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.Arrays;
+import java.util.concurrent.Future;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.simantics.acorn.GraphClientImpl2;
+import org.simantics.backup.BackupException;
+import org.simantics.backup.IBackupProvider;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.utils.FileUtils;
+
+/**
+ * @author Jani
+ *
+ * TODO: get rid of {@link GraphClientImpl2#getInstance()} invocations somehow in a cleaner way
+ */
+public class AcornBackupProvider implements IBackupProvider {
+
+    private static final String IDENTIFIER = "AcornBackupProvider";
+    private long trId = -1;
+    private final Semaphore lock = new Semaphore(1);
+
+    private static Path getAcornMetadataFile(Path dbFolder) {
+        return dbFolder.getParent().resolve(IDENTIFIER);
+    }
+    
+    @Override
+    public void lock() throws BackupException {
+        try {
+            if (trId != -1)
+                throw new IllegalStateException(this + " backup provider is already locked");
+            trId = GraphClientImpl2.getInstance().askWriteTransaction(-1)
+                    .getTransactionId();
+        } catch (ProCoreException e) {
+            e.printStackTrace();
+        }
+    }
+
+    @Override
+    public Future<BackupException> backup(Path targetPath, int revision) throws BackupException {
+        boolean releaseLock = true;
+        try {
+            lock.acquire();
+
+            GraphClientImpl2 client = GraphClientImpl2.getInstance();
+            client.makeSnapshot(true);
+
+            Path dbDir = client.getDbFolder();
+            int newestFolder = client.clusters.mainState.headDir - 1;
+            int latestFolder = -2;
+            Path AcornMetadataFile = getAcornMetadataFile(dbDir);
+            if (Files.exists(AcornMetadataFile)) {
+                try (BufferedReader br = Files.newBufferedReader(AcornMetadataFile)) {
+                    latestFolder = Integer.parseInt( br.readLine() );
+                }
+            }
+
+            AcornBackupRunnable r = new AcornBackupRunnable(
+                    lock, targetPath, revision, dbDir, latestFolder, newestFolder);
+            new Thread(r, "Acorn backup thread").start();
+
+             releaseLock = false;
+             return r;
+        } catch (InterruptedException e) {
+            releaseLock = false;
+            throw new BackupException("Failed to lock Acorn for backup.", e);
+        } catch (NumberFormatException e) {
+            throw new BackupException("Failed to read Acorn head state file.", e);
+        } catch (IOException e) {
+            throw new BackupException("I/O problem during Acorn backup.", e);
+        } finally {
+            if (releaseLock)
+                lock.release();
+        }
+    }
+
+    @Override
+    public void unlock() throws BackupException {
+        try {
+            if (trId == -1)
+                throw new BackupException(this + " backup provider is not locked");
+            GraphClientImpl2.getInstance().endTransaction(trId);
+            trId = -1;
+        } catch (ProCoreException e) {
+            throw new BackupException(e);
+        }
+    }
+
+    @Override
+    public void restore(Path fromPath, int revision) {
+        try {
+            // 1. Resolve initial backup restore target.
+            // This can be DB directory directly or a temporary directory that
+            // will replace the DB directory.
+            Path dbRoot = GraphClientImpl2.getInstance().getDbFolder();
+            Path restorePath = dbRoot;
+            if (!Files.exists(dbRoot, LinkOption.NOFOLLOW_LINKS)) {
+                Files.createDirectories(dbRoot);
+            } else {
+                Path dbRootParent = dbRoot.getParent();
+                restorePath = dbRootParent == null ? Files.createTempDirectory("restore")
+                        : Files.createTempDirectory(dbRootParent, "restore");
+            }
+
+            // 2. Restore the backup.
+            Files.walkFileTree(fromPath, new RestoreCopyVisitor(restorePath, revision));
+
+            // 3. Override existing DB root with restored temporary copy if necessary.
+            if (dbRoot != restorePath) {
+                FileUtils.deleteAll(dbRoot.toFile());
+                Files.move(restorePath, dbRoot);
+            } 
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+    }
+
+    private class RestoreCopyVisitor extends SimpleFileVisitor<Path> {
+
+        private final Path toPath;
+        private final int revision;
+        private Path currentSubFolder;
+
+        public RestoreCopyVisitor(Path toPath, int revision) {
+            this.toPath = toPath;
+            this.revision = revision;
+        }
+
+        @Override
+        public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
+            Path dirName = dir.getFileName();
+            if (dirName.toString().equals(IDENTIFIER)) {
+                currentSubFolder = dir;
+                return FileVisitResult.CONTINUE;
+            } else if (dir.getParent().getFileName().toString().equals(IDENTIFIER)) {
+                Path targetPath = toPath.resolve(dirName);
+                if (!Files.exists(targetPath)) {
+                    Files.createDirectory(targetPath);
+                }
+                return FileVisitResult.CONTINUE;
+            } else if (dirName.toString().length() == 1 && Character.isDigit(dirName.toString().charAt(0))) {
+                int dirNameInt = Integer.parseInt(dirName.toString());
+                if (dirNameInt <= revision) {
+                    return FileVisitResult.CONTINUE;
+                } else {
+                    return FileVisitResult.SKIP_SUBTREE;
+                }
+            } else {
+                return FileVisitResult.CONTINUE;
+            }
+        }
+
+        @Override
+        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+            if (file.getFileName().toString().endsWith(".tar.gz"))
+                return FileVisitResult.CONTINUE;
+            System.out.println("Restore " + file + " to " + toPath.resolve(currentSubFolder.relativize(file)));
+            Files.copy(file, toPath.resolve(currentSubFolder.relativize(file)), StandardCopyOption.REPLACE_EXISTING);
+            return FileVisitResult.CONTINUE;
+        }
+    }
+
+    private static class AcornBackupRunnable implements Runnable, Future<BackupException> {
+
+        private final Semaphore lock;
+        private final Path targetPath;
+        private final int revision;
+        private final Path baseDir;
+        private final int latestFolder;
+        private final int newestFolder;
+
+        private boolean done = false;
+        private final Semaphore completion = new Semaphore(0);
+        private BackupException exception = null;
+
+        public AcornBackupRunnable(Semaphore lock, Path targetPath, int revision,
+                Path baseDir, int latestFolder, int newestFolder) {
+            this.lock = lock;
+            this.targetPath = targetPath;
+            this.revision = revision;
+            this.baseDir = baseDir;
+            this.latestFolder = latestFolder;
+            this.newestFolder = newestFolder;
+        }
+
+        @Override
+        public void run() {
+            try {
+                doBackup();
+                writeHeadstateFile();
+            } catch (IOException e) {
+                exception = new BackupException("Acorn backup failed", e);
+                rollback();
+            } finally {
+                done = true;
+                lock.release();
+                completion.release();
+            }
+        }
+
+        private void doBackup() throws IOException {
+            Path target = targetPath.resolve(String.valueOf(revision)).resolve(IDENTIFIER);
+            if (!Files.exists(target))
+                Files.createDirectories(target);
+            Files.walkFileTree(baseDir,
+                    new BackupCopyVisitor(baseDir, target));
+        }
+
+        private void writeHeadstateFile() throws IOException {
+            Path AcornMetadataFile = getAcornMetadataFile(baseDir);
+            if (!Files.exists(AcornMetadataFile)) {
+                Files.createFile(AcornMetadataFile);
+            }
+            Files.write(AcornMetadataFile,
+                    Arrays.asList(Integer.toString(newestFolder)),
+                    StandardOpenOption.WRITE,
+                    StandardOpenOption.TRUNCATE_EXISTING,
+                    StandardOpenOption.CREATE);
+        }
+
+        private void rollback() {
+            // TODO
+        }
+
+        private class BackupCopyVisitor extends SimpleFileVisitor<Path> {
+
+            private Path fromPath;
+            private Path toPath;
+
+            public BackupCopyVisitor(Path fromPath, Path toPath) {
+                this.fromPath = fromPath;
+                this.toPath = toPath;
+            }
+
+            @Override
+            public FileVisitResult preVisitDirectory(Path dir,
+                    BasicFileAttributes attrs) throws IOException {
+                Path dirName = dir.getFileName();
+                if (dirName.equals(fromPath)) {
+                    Path targetPath = toPath.resolve(fromPath.relativize(dir));
+                    if (!Files.exists(targetPath)) {
+                        Files.createDirectory(targetPath);
+                    }
+                    return FileVisitResult.CONTINUE;
+                } else {
+                    int dirNameInt = Integer.parseInt(dirName.toString());
+                    if (latestFolder < dirNameInt && dirNameInt <= newestFolder) {
+                        Path targetPath = toPath.resolve(fromPath
+                                .relativize(dir));
+                        if (!Files.exists(targetPath)) {
+                            Files.createDirectory(targetPath);
+                        }
+                        return FileVisitResult.CONTINUE;
+                    }
+                    return FileVisitResult.SKIP_SUBTREE;
+                }
+            }
+
+            @Override
+            public FileVisitResult visitFile(Path file,
+                    BasicFileAttributes attrs) throws IOException {
+                System.out.println("Backup " + file + " to "
+                        + toPath.resolve(fromPath.relativize(file)));
+                Files.copy(file, toPath.resolve(fromPath.relativize(file)),
+                        StandardCopyOption.REPLACE_EXISTING);
+                return FileVisitResult.CONTINUE;
+            }
+        }
+
+        @Override
+        public boolean cancel(boolean mayInterruptIfRunning) {
+            return false;
+        }
+
+        @Override
+        public boolean isCancelled() {
+            return false;
+        }
+
+        @Override
+        public boolean isDone() {
+            return done;
+        }
+
+        @Override
+        public BackupException get() throws InterruptedException {
+            completion.acquire();
+            completion.release();
+            return exception;
+        }
+
+        @Override
+        public BackupException get(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException {
+            if (completion.tryAcquire(timeout, unit))
+                completion.release();
+            else
+                throw new TimeoutException("Acorn backup completion waiting timed out.");
+            return exception;
+        }
+
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java
new file mode 100644 (file)
index 0000000..f623d58
--- /dev/null
@@ -0,0 +1,1104 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.cluster;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterStream;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.acorn.internal.DebugPolicy;
+import org.simantics.db.Resource;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.ExternalValueException;
+import org.simantics.db.exception.ValidationException;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterI.PredicateProcedure;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.ClusterTraitsBase;
+import org.simantics.db.impl.ForEachObjectContextProcedure;
+import org.simantics.db.impl.ForEachObjectProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueContextProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueProcedure;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.impl.Table;
+import org.simantics.db.impl.TableHeader;
+import org.simantics.db.impl.graph.ReadGraphImpl;
+import org.simantics.db.impl.query.QueryProcessor;
+import org.simantics.db.procedure.AsyncContextMultiProcedure;
+import org.simantics.db.procedure.AsyncMultiProcedure;
+import org.simantics.db.procore.cluster.ClusterMap;
+import org.simantics.db.procore.cluster.ClusterPrintDebugInfo;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.procore.cluster.CompleteTable;
+import org.simantics.db.procore.cluster.FlatTable;
+import org.simantics.db.procore.cluster.ForeignTable;
+import org.simantics.db.procore.cluster.ObjectTable;
+import org.simantics.db.procore.cluster.PredicateTable;
+import org.simantics.db.procore.cluster.ResourceTable;
+import org.simantics.db.procore.cluster.ValueTable;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Callback;
+
+final public class ClusterBig extends ClusterImpl {
+    private static final int TABLE_HEADER_SIZE = TableHeader.HEADER_SIZE + TableHeader.EXTRA_SIZE;
+    private static final int RESOURCE_TABLE_OFFSET = 0;
+    private static final int PREDICATE_TABLE_OFFSET = RESOURCE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int OBJECT_TABLE_OFFSET = PREDICATE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int VALUE_TABLE_OFFSET = OBJECT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int FLAT_TABLE_OFFSET = VALUE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int COMPLETE_TABLE_OFFSET = FLAT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int FOREIGN_TABLE_OFFSET = COMPLETE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int INT_HEADER_SIZE = FOREIGN_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private final int clusterBits;
+    final private ResourceTable resourceTable;
+    //final private ResourceTable movedResourceTable;
+    final private PredicateTable predicateTable;
+    final private ObjectTable objectTable;
+    final private ValueTable valueTable;
+    final private FlatTable flatTable;
+    final private ForeignTable foreignTable;
+    final private CompleteTable completeTable;
+    final private ClusterMap clusterMap;
+    final private int[] headerTable;
+    final private ClusterSupport2 clusterSupport;
+    
+    public ClusterBig(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) {
+        super(clusterTable, clusterUID, clusterKey, support);
+        if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+            new Exception(getClusterUID().toString()).printStackTrace();
+        this.headerTable = new int[INT_HEADER_SIZE];
+        this.resourceTable = new ResourceTable(this, headerTable, RESOURCE_TABLE_OFFSET);
+        this.foreignTable = new ForeignTable(this, headerTable, FOREIGN_TABLE_OFFSET);
+        this.predicateTable = new PredicateTable(this, headerTable, PREDICATE_TABLE_OFFSET);
+        this.objectTable = new ObjectTable(this, headerTable, OBJECT_TABLE_OFFSET);
+        this.valueTable = new ValueTable(this, headerTable, VALUE_TABLE_OFFSET);
+        this.completeTable = new CompleteTable(this, headerTable, COMPLETE_TABLE_OFFSET);
+        this.flatTable = null;
+        this.clusterMap = new ClusterMap(foreignTable, flatTable);
+        this.clusterSupport = support;
+        this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+        this.importance = 0;
+//        clusterTable.setDirtySizeInBytes(true);
+    }
+    protected ClusterBig(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey)
+    throws DatabaseException {
+        super(clusterTable, checkValidity(0, longs, ints, bytes), clusterKey, support);
+        if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+            new Exception(getClusterUID().toString()).printStackTrace();
+        if (ints.length < INT_HEADER_SIZE)
+            throw new IllegalArgumentException("Too small integer table for cluster.");
+        this.headerTable = ints;
+        this.resourceTable = new ResourceTable(this, ints, RESOURCE_TABLE_OFFSET, longs);
+        this.foreignTable = new ForeignTable(this, headerTable, FOREIGN_TABLE_OFFSET, longs);
+        this.predicateTable = new PredicateTable(this, ints, PREDICATE_TABLE_OFFSET, ints);
+        this.objectTable = new ObjectTable(this, ints, OBJECT_TABLE_OFFSET, ints);
+        this.valueTable = new ValueTable(this, ints, VALUE_TABLE_OFFSET, bytes);
+        this.flatTable = null;
+        this.completeTable = new CompleteTable(this, headerTable, COMPLETE_TABLE_OFFSET, ints);
+        this.clusterMap = new ClusterMap(foreignTable, flatTable);
+        this.clusterSupport = support;
+        this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+    }
+    void analyse() {
+        System.out.println("Cluster " + clusterId);
+        System.out.println("-size:" + getUsedSpace());
+        System.out.println(" -rt:" + (resourceTable.getTableCapacity() * 8 + 8));
+        System.out.println(" -ft:" + foreignTable.getTableCapacity() * 8);
+        System.out.println(" -pt:" + predicateTable.getTableCapacity() * 4);
+        System.out.println(" -ot:" + objectTable.getTableCapacity() * 4);
+        System.out.println(" -ct:" + completeTable.getTableCapacity() * 4);
+        System.out.println(" -vt:" + valueTable.getTableCapacity());
+
+        System.out.println("-resourceTable:");
+        System.out.println(" -resourceCount=" + resourceTable.getResourceCount());
+        System.out.println(" -size=" + resourceTable.getTableSize());
+        System.out.println(" -capacity=" + resourceTable.getTableCapacity());
+        System.out.println(" -count=" + resourceTable.getTableCount());
+        System.out.println(" -size=" + resourceTable.getTableSize());
+        //resourceTable.analyse();
+    }
+    public void checkDirectReference(int dr)
+    throws DatabaseException {
+        if (!ClusterTraits.statementIndexIsDirect(dr))
+            throw new ValidationException("Reference is not direct. Reference=" + dr);
+        if (ClusterTraits.isFlat(dr))
+            throw new ValidationException("Reference is flat. Reference=" + dr);
+        if (ClusterTraits.isLocal(dr)) {
+            if (dr < 1 || dr > resourceTable.getUsedSize())
+                throw new ValidationException("Illegal local reference. Reference=" + dr);
+        } else {
+            int fi = ClusterTraits.getForeignIndexFromReference(dr);
+            int ri = ClusterTraits.getResourceIndexFromForeignReference(dr);
+            if (fi < 1 || fi > foreignTable.getUsedSize())
+                throw new ValidationException("Illegal foreign reference. Reference=" + dr + " foreign index=" + fi);
+            if (ri < 1 || ri > ClusterTraits.getMaxNumberOfResources())
+                throw new ValidationException("Illegal foreign reference. Reference=" + dr + " resource index=" + ri);
+        }
+    }
+    public void checkPredicateIndex(int pi)
+    throws DatabaseException {
+        predicateTable.checkPredicateSetIndex(this, pi);
+    }
+    public void checkObjectSetReference(int or)
+    throws DatabaseException {
+        if (ClusterTraits.statementIndexIsDirect(or))
+            throw new ValidationException("Illegal object set reference. Reference=" + or);
+        int oi = ClusterTraits.statementIndexGet(or);
+        this.objectTable.checkObjectSetIndex(this, oi);
+    }
+
+    public void checkValueInit()
+    throws DatabaseException {
+        valueTable.checkValueInit();
+    }
+    public void checkValue(int capacity, int index)
+    throws DatabaseException {
+        valueTable.checkValue(capacity, index);
+    }
+    public void checkValueFini()
+    throws DatabaseException {
+        valueTable.checkValueFini();
+    }
+    public void checkForeingIndex(int fi)
+    throws DatabaseException {
+        if (fi<1 || fi > foreignTable.getUsedSize())
+            throw new ValidationException("Illegal foreign index=" + fi);
+    }
+    public void checkCompleteSetReference(int cr)
+    throws DatabaseException {
+        if (!ClusterTraits.completeReferenceIsMultiple(cr))
+            throw new ValidationException("Illegal complete set reference. Reference=" + cr);
+        int ci = cr;
+        this.completeTable.checkCompleteSetIndex(this, ci);
+    }
+    public void check()
+    throws DatabaseException {
+        this.completeTable.check(this);
+        this.objectTable.check(this);
+        // Must be after object table check.
+        this.predicateTable.check(this);
+        this.resourceTable.check(this);
+    }
+    @Override
+    public CompleteTypeEnum getCompleteType(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceRef = getLocalReference(resourceKey);
+        int completeRef = resourceTable.getCompleteObjectRef(resourceRef);
+        CompleteTypeEnum ct = ClusterTraits.completeReferenceGetType(completeRef);
+        if (DEBUG)
+            System.out.println("Cluster.getCompleteType rk=" + resourceKey + " ct=" + ct);
+        int i = ct.getValue();
+        switch (i) {
+            case 0: return CompleteTypeEnum.NotComplete;
+            case 1: return CompleteTypeEnum.InstanceOf;
+            case 2: return CompleteTypeEnum.Inherits;
+            case 3: return CompleteTypeEnum.SubrelationOf;
+            default: throw new DatabaseException("Illegal complete type enumeration.");
+        }
+    }
+
+    @Override
+    public int getCompleteObjectKey(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceRef = getLocalReference(resourceKey);
+        int completeRef = resourceTable.getCompleteObjectRef(resourceRef);
+        int clusterIndex;
+        int resourceIndex = ClusterTraits.completeReferenceGetResourceIndex(completeRef);
+        
+        ClusterI.CompleteTypeEnum completeType = ClusterTraits.completeReferenceGetType(completeRef);
+        if (completeType == ClusterI.CompleteTypeEnum.NotComplete)
+            throw new DatabaseException("Resource has multiple complete objects. Resource key=" + resourceKey + ".");
+        
+        if (ClusterTraits.completeReferenceIsLocal(completeRef)) {
+            clusterIndex = clusterKey;
+        } else {
+            int foreignIndex = ClusterTraits.completeReferenceGetForeignIndex(completeRef);
+//            System.err.println("completeRef=" + completeRef + " foreignIndex=" + foreignIndex );
+            ClusterUID clusterUID = foreignTable.getResourceUID(foreignIndex).asCID();
+            ClusterI c = support.getClusterByClusterUIDOrMake(clusterUID);
+            clusterIndex = c.getClusterKey();
+        }
+        int key = ClusterTraits.createResourceKey(clusterIndex, resourceIndex);
+        if (DEBUG)
+            System.out.println("Cluster.complete object rk=" + resourceKey + " ck=" + key);
+        return key;
+    }
+
+    @Override
+    public boolean isComplete(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceRef = getLocalReference(resourceKey);
+        int completeRef = resourceTable.getCompleteObjectRef(resourceRef);
+        ClusterI.CompleteTypeEnum completeType = ClusterTraits.completeReferenceGetType(completeRef);
+        boolean complete = completeType != ClusterI.CompleteTypeEnum.NotComplete;
+        if (DEBUG)
+            System.out.println("Cluster.key=" + resourceKey + " isComplete=" + complete);
+        return complete;
+    }
+
+    public int getSingleObject(int resourceKey, int predicateKey, int objectIndex, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.getSingleObject: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = getLocalReference(resourceKey);
+            final int pRef = getInternalReferenceOrZero(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        }
+        return objectTable.getSingleObject(objectIndex, support, this);
+    }
+
+    public void forObjects(int resourceKey, int predicateKey, int objectIndex, QueryProcessor processor, ReadGraphImpl graph, AsyncMultiProcedure<Resource> procedure,
+            ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = getLocalReference(resourceKey);
+            final int pRef = getInternalReferenceOrZero(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+            return;
+        }
+        objectTable.foreachObject(graph, objectIndex, procedure, this);
+    }
+    public <C> void forObjects(int resourceKey, int predicateKey, int objectIndex, QueryProcessor processor, ReadGraphImpl graph, C context, AsyncContextMultiProcedure<C, Resource> procedure,
+            ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = getLocalReference(resourceKey);
+            final int pRef = getInternalReferenceOrZero(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+            return;
+        }
+        objectTable.foreachObject(graph, objectIndex, context, procedure, this);
+    }
+    @Override
+    public <Context> boolean forObjects(int resourceKey, int predicateKey, int objectIndex, ObjectProcedure<Context> procedure,
+            Context context, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.forObjects2: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = getLocalReference(resourceKey);
+            final int pRef = getInternalReferenceOrZero(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        }
+        return objectTable.foreachObject(objectIndex, procedure, context, support, this);
+    }
+
+    @Override
+    public int getSingleObject(int resourceKey, int predicateKey, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+    
+    @Override
+    public <T> int getSingleObject(int resourceKey, ForPossibleRelatedValueProcedure<T> procedure, ClusterSupport support) throws DatabaseException {
+        final int predicateKey = procedure.predicateKey;
+        if (DEBUG)
+            System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+
+    @Override
+    public <C, T> int getSingleObject(int resourceKey, ForPossibleRelatedValueContextProcedure<C, T> procedure, ClusterSupport support) throws DatabaseException {
+        final int predicateKey = procedure.predicateKey;
+        if (DEBUG)
+            System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+    
+    @Override
+    public void forObjects(ReadGraphImpl graph, int resourceKey,
+            int predicateKey, AsyncMultiProcedure<Resource> procedure)
+            throws DatabaseException {
+        
+       throw new UnsupportedOperationException();
+       
+//        SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//        ClusterSupport support = session.clusterTranslator;
+//        
+//        if (DEBUG)
+//            System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+//        final int resourceIndex = getLocalReference(resourceKey);
+//        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+//        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+//        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+//        if (0 == predicateIndex) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+//        forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, procedure, support);
+        
+    }
+    
+    @Override
+    public void forObjects(ReadGraphImpl graph, int resourceKey, ForEachObjectProcedure procedure) throws DatabaseException {
+       
+       throw new UnsupportedOperationException();
+       
+//        SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//        ClusterSupport support = session.clusterTranslator;
+//        final int predicateKey = procedure.predicateKey;
+//        if (DEBUG)
+//            System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+//        final int resourceIndex = getLocalReference(resourceKey);
+//        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+//        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+//        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+//        if (0 == predicateIndex) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+//        forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, procedure, support);
+        
+    }
+    @Override
+    public <C> void forObjects(ReadGraphImpl graph, int resourceKey, C context,
+            ForEachObjectContextProcedure<C> procedure) throws DatabaseException {
+        
+       throw new UnsupportedOperationException();
+
+//     SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//        ClusterSupport support = session.clusterTranslator;
+//        
+//        final int predicateKey = procedure.predicateKey;
+//        
+//        if (DEBUG)
+//            System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+//        final int resourceIndex = getLocalReference(resourceKey);
+//        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+//        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+//        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+//            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+//        if (0 == predicateIndex) {
+//            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+//        forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, context, procedure, support);
+        
+    }
+
+    @Override
+    public <Context> boolean forObjects(int resourceKey, int predicateKey,
+            ObjectProcedure<Context> procedure, Context context, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.forObjects4: rk=" + resourceKey + " pk=" + predicateKey);
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int pRef = getInternalReferenceOrZero(predicateKey, support);
+        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+        return forObjects(resourceKey, predicateKey, objectIndex, procedure, context, support);
+    }
+    @Override
+    public <Context> boolean forPredicates(int resourceKey,
+            PredicateProcedure<Context> procedure, Context context, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("Cluster.forPredicates: rk=" + resourceKey);
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.foreachPredicate(resourceIndex,
+                    procedure, context, support, this, completeTable);
+        else {
+            boolean broken = resourceTable.foreachPredicate(resourceIndex,
+                    procedure, context, support, this, completeTable);
+            if (broken)
+                return true;
+        }
+        return predicateTable.foreachPredicate(predicateIndex, procedure, context, support, this);
+    }
+    @Override
+    public ClusterI addRelation(int sResourceKey, ClusterUID puid, int pResourceKey, ClusterUID ouid, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey);
+        int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+        int pri = getReferenceOrCreateIfForeign(pResourceKey, puid, support, ClusterStream.NULL_OPERATION);
+        int ori = getReferenceOrCreateIfForeign(oResourceKey, ouid, support, ClusterStream.NULL_OPERATION);
+        ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+        boolean ret = addRelationInternal(sri, pri, ori, completeType);
+//      check();
+        if (ret) {
+            support.addStatement(this);
+            return this;
+        } else {
+            support.cancelStatement(this);
+            return null;
+        }
+    }
+    @Override
+    public ClusterI addRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey);
+        int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+        int pri = getReferenceOrCreateIfForeign(pResourceKey, support, ClusterStream.NULL_OPERATION);
+        int ori = getReferenceOrCreateIfForeign(oResourceKey, support, ClusterStream.NULL_OPERATION);
+        ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+        boolean ret = addRelationInternal(sri, pri, ori, completeType);
+//      check();
+        if (ret) {
+            support.addStatement(this);
+            return this;
+        } else {
+            support.cancelStatement(this);
+            return null;
+        }
+    }
+    @Override
+    public boolean removeRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+//        check();
+        int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.REMOVE_OPERATION);
+        int pri = getInternalReferenceOrZeroAnd(pResourceKey, support, ClusterStream.NULL_OPERATION);
+        int ori = getInternalReferenceOrZeroAnd(oResourceKey, support, ClusterStream.NULL_OPERATION);
+        boolean ret = false;
+        if (0 != pri && 0 != ori) {
+            ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+            ret = removeRelationInternal(sri, pri, ori, completeType, support);
+        }
+        if (ret)
+            support.removeStatement(this);
+        else
+            support.cancelStatement(this);
+//        check();
+        return ret;
+    }
+    @Override
+    public void denyRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int sri = checkResourceKeyIsOursAndGetResourceIndexIf(sResourceKey, support);
+        ResourceIndexAndId p = checkResourceKeyAndGetResourceIndexIf(pResourceKey, support);
+        ResourceIndexAndId o = checkResourceKeyAndGetResourceIndexIf(oResourceKey, support);
+        if (0 == sri || 0 == p.index || 0 == o.index)
+            return;
+//        check();
+        ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+        boolean ret = removeRelationInternal(sri, p.reference, o.reference, completeType, support);
+        if (ret) {
+            support.addStatementIndex(this, sResourceKey, getClusterUID(), ClusterChange.REMOVE_OPERATION);
+            support.addStatementIndex(this, pResourceKey, p.clusterUID, ClusterStream.NULL_OPERATION);
+            support.addStatementIndex(this, oResourceKey, o.clusterUID, ClusterStream.NULL_OPERATION);
+            support.removeStatement(this);
+        }
+//        check();
+        return;
+    }
+    @Override
+    public InputStream getValueStream(int rResourceId, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterBig.getValue " + rResourceId);
+        int resourceIndex = getLocalReference(rResourceId);
+        try {
+            byte[] buffer = resourceTable.getValue(valueTable, resourceIndex);
+            if(buffer == null) return null;
+            return new ByteArrayInputStream(buffer);
+        } catch (ExternalValueException e) {
+            return support.getValueStreamEx(resourceIndex, clusterUID.second);
+        }
+    }
+    @Override
+    public byte[] getValue(int rResourceId, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterBig.getValue " + rResourceId);
+        int resourceIndex = getLocalReference(rResourceId);
+        try {
+            return resourceTable.getValue(valueTable, resourceIndex);
+        } catch (ExternalValueException e) {
+               return clusterSupport.impl.getResourceFile(clusterUID.asBytes(), resourceIndex);
+//            return support.getValueEx(resourceIndex, clusterUID.second);
+        }
+    }
+    @Override
+    public boolean hasValue(int rResourceId, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(rResourceId);
+        return resourceTable.hasValue(resourceIndex);
+    }
+    @Override
+    public boolean removeValue(int rResourceId, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterChange.DELETE_OPERATION);
+        support.removeValue(this);
+        return resourceTable.removeValue(valueTable, resourceIndex);
+    }
+    
+    @Override
+    public ClusterI setValue(int rResourceId, byte[] value, int length, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.SET_OPERATION);
+        support.setValue(this, getClusterId(), value, length);
+        resourceTable.setValue(valueTable, resourceIndex, value, length);
+        return this;
+    }
+    @Override
+    public ClusterI modiValueEx(int rResourceId, long voffset, int length, byte[] value, int offset, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.MODI_OPERATION);
+        support.modiValue(this, getClusterId(), voffset, length, value, offset);
+        resourceTable.setValueEx(valueTable, resourceIndex);
+        return this;
+    }
+    @Override
+    public byte[] readValueEx(int rResourceId, long voffset, int length, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(rResourceId);
+        boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+        if (!isExternal)
+            throw new DatabaseException("ClusterI.readValue supported only for external value. Resource key=" + rResourceId);
+        return support.getValueEx(resourceIndex, getClusterId(), voffset, length);
+    }
+    @Override
+    public long getValueSizeEx(int resourceKey, ClusterSupport support)
+    throws DatabaseException, ExternalValueException {
+        int resourceIndex = getLocalReference(resourceKey);
+        boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+        if (!isExternal)
+            throw new ExternalValueException("ClusterI.getSize supported only for external value. Resource key=" + resourceKey);
+        return support.getValueSizeEx(resourceIndex, getClusterId());
+    }
+    public boolean isValueEx(int resourceKey)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(resourceKey);
+        return resourceTable.isValueEx(valueTable, resourceIndex);
+    }
+    @Override
+    public void setValueEx(int resourceKey)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(resourceKey);
+        resourceTable.setValueEx(valueTable, resourceIndex);
+    }
+    @Override
+    public int createResource(ClusterSupport support)
+    throws DatabaseException {
+        short resourceIndex = resourceTable.createResource();
+
+        if(DebugPolicy.REPORT_RESOURCE_ID_ALLOCATION)
+            System.out.println("[RID_ALLOCATION]: ClusterBig[" + clusterId + "] allocates " + resourceIndex);
+
+        support.createResource(this, resourceIndex, clusterId);
+        return ClusterTraits.createResourceKey(clusterKey, resourceIndex);
+    }
+    @Override
+    public boolean hasResource(int resourceKey, ClusterSupport support) {
+        int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey);
+        if (this.clusterKey != clusterKey) // foreign resource
+            return false;
+        int resourceIndex;
+        try {
+            resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        } catch (DatabaseException e) {
+            return false;
+        }
+        if (resourceIndex > 0 & resourceIndex <= resourceTable.getTableCount())
+            return true;
+        else
+            return false;
+    }
+    @Override
+    public int getNumberOfResources(ClusterSupport support) {
+        return resourceTable.getUsedSize();
+    }
+    @Override
+    public long getUsedSpace() {
+        long rt = resourceTable.getTableCapacity() * 8 + 8; // (8 = cluster id)
+        long ft = foreignTable.getTableCapacity() * 8;
+        long pt = predicateTable.getTableCapacity() * 4;
+        long ot = objectTable.getTableCapacity() * 4;
+        long ct = completeTable.getTableCapacity() * 4;
+        long vt = valueTable.getTableCapacity() * 1;
+        long cm = clusterMap.getUsedSpace();
+        
+        return rt + ft + pt + ot + ct + vt + cm;
+//        System.out.println("resource table " + rt);
+//        System.out.println("foreign table (non flat cluster table) " + ft);
+//        System.out.println("predicate table " + pt);
+//        long pt2 = getRealSizeOfPredicateTable() * 4;
+//        System.out.println("predicate table real size " + pt2);
+//        System.out.println("object table " + ot);
+//        long ot2 = getRealSizeOfObjectTable() * 4;
+//        System.out.println("object table real size " + ot2);
+//        System.out.println("value table " + vt);
+    }
+    int getRealSizeOfPredicateTable() throws DatabaseException {
+        SizeOfPredicateTable proc = new SizeOfPredicateTable(resourceTable, predicateTable);
+        resourceTable.foreachResource(proc, 0, null, null);
+        return proc.getSize();
+    }
+    int getRealSizeOfObjectTable() throws DatabaseException {
+        SizeOfObjectTable proc = new SizeOfObjectTable(resourceTable, predicateTable, objectTable);
+        resourceTable.foreachResource(proc, 0, null, null);
+        return proc.getSize();
+    }
+    @Override
+    public boolean isEmpty() {
+        return resourceTable.getTableCount() == 0;
+    }
+    @Override
+    public void printDebugInfo(String message, ClusterSupport support)
+    throws DatabaseException {
+        predicateTable.printDebugInfo();
+        objectTable.printDebugInfo();
+        ClusterPrintDebugInfo proc = new ClusterPrintDebugInfo(this
+                , resourceTable, predicateTable, support, objectTable);
+        resourceTable.foreachResource(proc, 0, null, null);
+    }
+    private int getInternalReferenceOrZero(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) { // foreign resource
+            ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey);
+            ClusterUID clusterUID = foreignCluster.getClusterUID();
+            int foreignResourceIndex = clusterMap.getForeignReferenceOrZero(resourceIndex, clusterUID);
+            return foreignResourceIndex;
+        }
+        return resourceIndex;
+    }
+    private int getInternalReferenceOrZeroAnd(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) { // foreign resource
+            ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+            int foreignResourceIndex = clusterMap.getForeignReferenceOrZero(resourceIndex, clusterUID);
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            return foreignResourceIndex;
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private short getLocalReference(int resourceKey) throws DatabaseException {
+        return ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+    }
+    private int getLocalReferenceAnd(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(resourceKey);
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private int checkResourceKeyIsOursAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int clusterShortId = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterShortId)
+            return 0;
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        return resourceIndex;
+    }
+    private int getReferenceOrCreateIfForeign(int resourceKey, ClusterUID clusterUID, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) {
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            return clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private int getReferenceOrCreateIfForeign(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) {
+            ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            return clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private class ResourceIndexAndId {
+        ResourceIndexAndId(int reference, int index, ClusterUID clusterUID) {
+            this.reference = reference;
+            this.index = index;
+            this.clusterUID = clusterUID;
+        }
+        public final int reference;
+        public final int index;
+        public final ClusterUID clusterUID;
+    }
+    private ResourceIndexAndId checkResourceKeyAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) { // foreign resource
+            ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey);
+            ClusterUID clusterUID = foreignCluster.getClusterUID();
+            int ref = clusterMap.getForeignReferenceOrCreateByResourceIndex(resourceIndex, clusterUID);
+            return new ResourceIndexAndId(ref, resourceIndex, clusterUID);
+        }
+        return new ResourceIndexAndId(resourceIndex, resourceIndex, getClusterUID());
+    }
+    
+    @Override
+    final public int execute(int resourceIndex) throws DatabaseException {
+        int key;
+        if(resourceIndex > 0) {
+            key = clusterBits | resourceIndex;
+        } else {
+            ClusterUID clusterUID = clusterMap.getResourceUID(resourceIndex).asCID();
+            ClusterI cluster = clusterSupport.getClusterByClusterUIDOrMake(clusterUID);
+            int foreingResourceIndex =  clusterMap.getForeignResourceIndex(resourceIndex);
+            key = ClusterTraits.createResourceKey(cluster.getClusterKey(), foreingResourceIndex);
+        }
+        if (DEBUG)
+            System.out.println("Cluster.execute key=" + key);
+        return key;
+    }
+    
+    private boolean addRelationInternal(int sReference, int pReference, int oReference, ClusterI.CompleteTypeEnum completeType)
+    throws DatabaseException {
+        int predicateIndex = resourceTable.addStatement(sReference, pReference,
+                oReference, predicateTable, objectTable, completeType, completeTable);
+        if (0 == predicateIndex)
+            return true; // added to resourceTable
+        else if (0 > predicateIndex)
+            return false; // old complete statemenent
+        int newPredicateIndex = predicateTable.addPredicate(predicateIndex,
+                pReference, oReference, objectTable);
+        if (0 == newPredicateIndex)
+            return false;
+        if (predicateIndex != newPredicateIndex)
+            resourceTable.setPredicateIndex(sReference, newPredicateIndex);
+        return true;
+    }
+    private boolean removeRelationInternal(int sResourceIndex, int pResourceIndex,
+            int oResourceIndex, ClusterI.CompleteTypeEnum completeType, ClusterSupport support)
+    throws DatabaseException {
+        int predicateIndex = resourceTable.getPredicateIndex(sResourceIndex);
+        if (0 == predicateIndex || ClusterI.CompleteTypeEnum.NotComplete != completeType)
+            return resourceTable.removeStatementFromCache(sResourceIndex,
+                    pResourceIndex, oResourceIndex, completeType, completeTable);
+        PredicateTable.Status ret = predicateTable.removePredicate(predicateIndex, pResourceIndex, oResourceIndex, objectTable);
+        switch (ret) {
+            case NothingRemoved:
+                return false;
+            case PredicateRemoved: {
+                if (0 == predicateTable.getPredicateSetSize(predicateIndex))
+                    resourceTable.setPredicateIndex(sResourceIndex, 0);
+                // intentionally dropping to next case
+            } default:
+                break;
+        }
+        resourceTable.removeStatement(sResourceIndex,
+                pResourceIndex, oResourceIndex,
+                completeType, completeTable,
+                predicateTable, objectTable, this, support);
+        return true;
+    }
+    @Override
+    public void load() {
+        throw new Error("Not supported.");
+    }
+
+    @Override
+    public void load(Callback<DatabaseException> r) {
+        throw new Error("Not supported.");
+    }
+
+    public int makeResourceKey(int resourceIndex) throws DatabaseException {
+        int key = 0;
+        if (resourceIndex > 0) // local resource
+            key = ClusterTraits.createResourceKey(clusterKey, resourceIndex);
+        else {
+               ClusterUID clusterUID = clusterMap.getResourceUID(resourceIndex).asCID();
+               int clusterKey = clusterSupport.getClusterKeyByClusterUIDOrMake(clusterUID);
+               int foreingResourceIndex =  clusterMap.getForeignResourceIndex(resourceIndex);
+               key = ClusterTraits.createResourceKey(clusterKey, foreingResourceIndex);
+        }
+        if (0 == key)
+            throw new DatabaseException("Failed to make resource key from " + resourceIndex);
+        return key;
+    }
+    @Override
+    public ClusterBig toBig(ClusterSupport support) throws DatabaseException {
+        throw new Error("Not implemented");
+    }
+    @Override
+    public void load(ClusterSupport session, Runnable callback) {
+        throw new Error("Not implemented");
+    }
+    @Override
+    public ClusterI getClusterByResourceKey(int resourceKey,
+            ClusterSupport support) {
+        throw new Error("Not implemented");
+    }
+    @Override
+    public void increaseReferenceCount(int amount) {
+        throw new Error("Not implemented");
+    }
+    @Override
+
+    public void decreaseReferenceCount(int amount) {
+        throw new Error("Not implemented");
+    }
+    @Override
+    public int getReferenceCount() {
+        throw new Error("Not implemented");
+    }
+    @Override
+    public void releaseMemory() {
+    }
+    @Override
+    public void compact() {
+        clusterMap.compact();
+    }
+    public boolean contains(int resourceKey) {
+        return ClusterTraitsBase.isCluster(clusterBits, resourceKey);
+    }
+    @Override
+    public ClusterTypeEnum getType() {
+        return ClusterTypeEnum.BIG;
+    }
+    @Override
+    public boolean getImmutable() {
+        int status = resourceTable.getClusterStatus();
+        return (status & ClusterStatus.ImmutableMaskSet) == 1;
+    }
+    @Override
+    public void setImmutable(boolean immutable, ClusterSupport support) {
+        int status = resourceTable.getClusterStatus();
+        if (immutable)
+            status |= ClusterStatus.ImmutableMaskSet;
+        else
+            status &= ClusterStatus.ImmutableMaskClear;
+        resourceTable.setClusterStatus(status);
+        support.setImmutable(this, immutable);
+    }
+    
+    @Override
+    public ClusterTables store() throws IOException {
+
+       ClusterTables result = new ClusterTables();
+
+               int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE);
+
+       int byteSize = valueTable.getTableSize();
+       byte[] byteBytes = new byte[byteSize];
+       valueTable.store(byteBytes, 0);
+               
+       //FileUtils.writeFile(bytes, valueTable.table);
+       
+       result.bytes = byteBytes;
+               
+       int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize(); 
+       long[] longBytes = new long[longSize];
+
+       longBytes[0] = 0;
+       longBytes[1] = LONG_HEADER_VERSION;
+       longBytes[2] = 0;
+       longBytes[3] = clusterUID.second;
+
+//        Bytes.writeLE8(longBytes, 0, 0);
+//        Bytes.writeLE8(longBytes, 8, LONG_HEADER_VERSION);
+//        Bytes.writeLE8(longBytes, 16, 0);
+//        Bytes.writeLE8(longBytes, 24, clusterUID.second);
+
+       int longPos = resourceTable.store(longBytes, LONG_HEADER_SIZE);
+       foreignTable.store(longBytes, longPos);
+       
+       result.longs = longBytes;
+       
+//     FileUtils.writeFile(longs, longBytes);
+
+       int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize();
+       int[] intBytes = new int[intSize];
+       int intPos = INT_HEADER_SIZE;
+       intPos = predicateTable.store(intBytes, intPos);
+       intPos = objectTable.store(intBytes, intPos);
+       intPos = completeTable.store(intBytes, intPos);
+       // write header
+               for(int i=0;i<INT_HEADER_SIZE;i++) {
+                       int v = headerTable[i];
+                       intBytes[i] = v;
+//                     Bytes.writeLE(intBytes, i<<2, v);
+               }
+       
+               result.ints = intBytes;
+
+//     FileUtils.writeFile(ints, intBytes);
+       
+       for(int i=0;i<INT_HEADER_SIZE;i++)
+               headerTable[i] = currentHeader[i];
+       
+       return result;
+       
+    }
+    
+    @Override
+    protected int getResourceTableCount() {
+       return resourceTable.getTableCount();
+    }
+    @Override
+    public boolean getDeleted() {
+        int status = resourceTable.getClusterStatus();
+        return (status & ClusterStatus.DeletedMaskSet) == ClusterStatus.DeletedMaskSet;
+    }
+    @Override
+    public void setDeleted(boolean deleted, ClusterSupport support) {
+        int status = resourceTable.getClusterStatus();
+        if (deleted)
+            status |= ClusterStatus.DeletedMaskSet;
+        else
+            status &= ClusterStatus.DeletedMaskClear;
+        resourceTable.setClusterStatus(status);
+        support.setDeleted(this, deleted);
+    }
+    @Override
+    public Table<?> getPredicateTable() {
+        return predicateTable;
+    }
+    @Override
+    public Table<?> getForeignTable() {
+        return foreignTable;
+    }
+    @Override
+    public Table<?> getCompleteTable() {
+        return completeTable;
+    }
+    @Override
+    public Table<?> getValueTable() {
+        return valueTable;
+    }
+    @Override
+    public Table<?> getObjectTable() {
+        return objectTable;
+    }
+}
+
+class SizeOfPredicateTable implements ClusterI.ObjectProcedure<Integer> {
+    private final ResourceTable mrResourceTable;
+    private final PredicateTable mrPredicateTable;
+    private int size = 0;
+    SizeOfPredicateTable(ResourceTable resourceTable
+            , PredicateTable predicateTable) {
+        mrResourceTable = resourceTable;
+        mrPredicateTable = predicateTable;
+    }
+    @Override
+    public boolean execute(Integer i, int resourceRef) {
+        int predicateIndex = mrResourceTable.getPredicateIndex(resourceRef);
+        if (0 == predicateIndex)
+            return false; // continue loop
+        size += mrPredicateTable.getPredicateSetSize(predicateIndex);
+        return false; // continue loop
+    }
+    
+    public int getSize() {
+        return size;
+    }
+    
+}
+
+class SizeOfObjectTable implements ClusterI.ObjectProcedure<Integer> {
+    private final ResourceTable mrResourceTable;
+    private final PredicateTable mrPredicateTable;
+    private final ObjectTable mrObjectTable;
+    private int size = 0;
+    SizeOfObjectTable(ResourceTable resourceTable
+            , PredicateTable predicateTable, ObjectTable objectTable) {
+        mrResourceTable = resourceTable;
+        mrPredicateTable = predicateTable;
+        mrObjectTable = objectTable;
+    }
+
+    @Override
+    public boolean execute(Integer i, int resourceRef) {
+        int predicateIndex = mrResourceTable.getPredicateIndex(resourceRef);
+        if (0 == predicateIndex)
+            return false; // continue loop
+        ClusterI.PredicateProcedure<Object> procedure = new PredicateProcedure<Object>() {
+            @Override
+            public boolean execute(Object context, int pRef, int oIndex) {
+                if (ClusterTraits.statementIndexIsDirect(oIndex))
+                    return false; // no table space reserved, continue looping
+                int objectIndex;
+                try {
+                    objectIndex = ClusterTraits.statementIndexGet(oIndex);
+                    size += mrObjectTable.getObjectSetSize(objectIndex);
+                } catch (DatabaseException e) {
+                    e.printStackTrace();
+                }
+                return false; // continue looping
+            }
+        };
+        try {
+            mrPredicateTable.foreachPredicate(predicateIndex, procedure, null, null, null);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+        }
+        return false; // continue loop
+    }
+    
+    public int getSize() {
+        return size;
+    }
+    
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java
new file mode 100644 (file)
index 0000000..353d938
--- /dev/null
@@ -0,0 +1,226 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.cluster;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.UUID;
+
+import org.simantics.acorn.internal.Change;
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.InvalidClusterException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.impl.Modifier;
+import org.simantics.db.service.ClusterCollectorPolicy.CollectorCluster;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.db.service.ClusteringSupport.Id;
+import org.simantics.utils.strings.AlphanumComparator;
+
+public abstract class ClusterImpl extends ClusterBase implements Modifier, CollectorCluster {
+    protected static final int LONG_HEADER_SIZE = 7;
+    protected static final long LONG_HEADER_VERSION = 1;
+    protected static ClusterUID checkValidity(long type, long[] longs, int[] ints, byte[] bytes)
+    throws InvalidClusterException {
+        if (longs.length < LONG_HEADER_SIZE)
+            throw new InvalidClusterException("Header size mismatch. Expected=" + ClusterImpl.LONG_HEADER_SIZE + ", got=" + longs.length);
+        if (longs[0] != type)
+            throw new InvalidClusterException("Type mismatch. Expected=" + type + ", got=" + longs[0] + " " + ClusterUID.make(longs[2], longs[3]));
+        if (longs[1] != ClusterImpl.LONG_HEADER_VERSION)
+            throw new InvalidClusterException("Header size mismatch. Expected=" + ClusterImpl.LONG_HEADER_VERSION + ", got=" + longs[1]);
+        return ClusterUID.make(longs[2], longs[3]);
+    }
+    protected static Id getUniqueId(long[] longs) {
+        return new IdImpl(new UUID(longs[3], longs[4]));
+    }
+    static final boolean DEBUG = false;
+    final public IClusterTable clusterTable;
+    // This can be null iff the cluster has been converted to big
+    public Change change = new Change();
+    public ClusterChange cc;
+    public byte[] foreignLookup;
+    
+    private boolean dirtySizeInBytes = true;
+    private long sizeInBytes = 0;
+    
+    protected ClusterImpl() {
+        clusterTable = null;
+    }
+    
+    public ClusterImpl(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport support) {
+        super(support, clusterUID, clusterKey);
+//        SessionImplSocket session = (SessionImplSocket)support.getSession();
+//        if(session != null)
+               this.clusterTable = clusterTable;
+//        else
+    }
+    
+    public static ClusterImpl dummy() {
+       return new ClusterSmall();
+    }
+    
+    public static ClusterImpl make(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) {
+        return new ClusterSmall(clusterUID, clusterKey, support, clusterTable);
+    }
+    public static ClusterSmall proxy(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, long clusterId, ClusterSupport2 support) {
+        if (DEBUG)
+            new Exception("Cluster proxy for " + clusterUID).printStackTrace();
+        return new ClusterSmall(null, clusterUID, clusterKey, support);
+    }
+    public static ClusterImpl make(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey)
+    throws DatabaseException {
+        if (longs[0] == 0)
+            return new ClusterBig(clusterTable, longs, ints, bytes, support, clusterKey);
+        else
+            return new ClusterSmall(clusterTable, longs, ints, bytes, support, clusterKey);
+    }
+
+//    public boolean virtual = false;
+    
+    @Override
+    public boolean hasVirtual() {
+       return false;
+//        return clusterTable.hasVirtual(clusterKey);
+    }
+
+    @Override
+    public void markVirtual() {
+//        clusterTable.markVirtual(clusterKey);
+//        virtual = true;
+    }
+    
+    @Override
+    public boolean isWriteOnly() {
+        return false;
+    }
+    @Override
+    public boolean isLoaded() {
+        return true;
+    }
+    
+    @Override
+    public void resized() {
+        dirtySizeInBytes = true;
+//        if(clusterTable != null)
+//             clusterTable.setDirtySizeInBytes(true);
+    }
+    
+    public long getCachedSize() {
+        if(dirtySizeInBytes) {
+            try {
+                sizeInBytes = getUsedSpace();
+                //System.err.println("recomputed size of cluster " + getClusterId() + " => " + sizeInBytes);
+            } catch (DatabaseException e) {
+                Logger.defaultLogError(e);
+            }
+            dirtySizeInBytes = false;
+        }
+        return sizeInBytes;
+    }
+
+    protected void calculateModifiedId() {
+//        setModifiedId(new IdImpl(UUID.randomUUID()));
+    }
+    
+    public static class ClusterTables {
+       public byte[] bytes;
+       public int[] ints;
+       public long[] longs;
+    }
+    
+    public byte[] storeBytes() throws IOException {
+       throw new UnsupportedOperationException();
+    }
+
+    public ClusterTables store() throws IOException {
+       throw new UnsupportedOperationException();
+    }
+    
+    abstract protected int getResourceTableCount();
+    
+    public String dump(final ClusterSupport support) {
+
+       StringBuilder sb = new StringBuilder();
+       for(int i=1;i<getResourceTableCount();i++) {
+               sb.append(""+i+"\n");
+               final int resourceKey = i;
+               final ArrayList<String> stms = new ArrayList<String>();
+               try {
+                       
+                       byte[] value = getValue(i, support);
+                       if(value != null)
+                               sb.append(" bytes: " + Arrays.toString(value) + "\n");
+                       
+                               forPredicates(i, new PredicateProcedure<Integer>() {
+
+                                       @Override
+                                       public boolean execute(Integer c, final int predicateKey, int objectIndex) {
+                                               
+                                               try {
+                                                       
+                                                       forObjects(resourceKey, predicateKey, objectIndex, new ObjectProcedure<Integer>() {
+
+                                                               @Override
+                                                               public boolean execute(Integer context, int objectKey) throws DatabaseException {
+                                                                       
+                                                                       ClusterUID puid = support.getClusterByResourceKey(predicateKey).getClusterUID();
+                                                                       ClusterUID ouid = support.getClusterByResourceKey(objectKey).getClusterUID();
+                                                                       
+                                                                       stms.add(" " + puid + " " + (predicateKey&0xFFF) + " " + ouid + " " + (objectKey&0xFFF)); 
+                                                                       
+                                                                       return false;
+                                                                       
+                                                               }
+                                                               
+                                                       }, 0, support);
+                                               } catch (DatabaseException e) {
+                                                       e.printStackTrace();
+                                               }
+                                               
+                                               return false;
+                                               
+                                       }
+
+                               },0,support);
+                               
+                               Collections.sort(stms, AlphanumComparator.COMPARATOR);
+                               
+                               for(String s : stms) {
+                                       sb.append(s);
+                                       sb.append("\n");
+                               }
+                               
+                       } catch (DatabaseException e) {
+                               e.printStackTrace();
+                       }
+       }
+       
+       return sb.toString();
+       
+    }
+    
+    abstract public boolean isValueEx(int resourceIndex) throws DatabaseException;
+
+    abstract public ClusterI addRelation(int resourceKey, ClusterUID puid, int predicateKey, ClusterUID ouid, int objectKey, ClusterSupport support) throws DatabaseException;
+    
+    @Override
+    public IClusterTable getClusterTable() {
+        return clusterTable;
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java
new file mode 100644 (file)
index 0000000..726071d
--- /dev/null
@@ -0,0 +1,1304 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.cluster;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterStream;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.acorn.internal.DebugPolicy;
+import org.simantics.db.Resource;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.ExternalValueException;
+import org.simantics.db.exception.ValidationException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.ClusterTraitsBase;
+import org.simantics.db.impl.ForEachObjectContextProcedure;
+import org.simantics.db.impl.ForEachObjectProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueContextProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueProcedure;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.impl.Table;
+import org.simantics.db.impl.TableHeader;
+import org.simantics.db.impl.graph.ReadGraphImpl;
+import org.simantics.db.procedure.AsyncContextMultiProcedure;
+import org.simantics.db.procedure.AsyncMultiProcedure;
+import org.simantics.db.procore.cluster.ClusterMapSmall;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.procore.cluster.ClusterTraitsSmall;
+import org.simantics.db.procore.cluster.CompleteTableSmall;
+import org.simantics.db.procore.cluster.ForeignTableSmall;
+import org.simantics.db.procore.cluster.ObjectTable;
+import org.simantics.db.procore.cluster.OutOfSpaceException;
+import org.simantics.db.procore.cluster.PredicateTable;
+import org.simantics.db.procore.cluster.ResourceTableSmall;
+import org.simantics.db.procore.cluster.ValueTableSmall;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.db.service.ResourceUID;
+import org.simantics.utils.datastructures.Callback;
+
+import gnu.trove.map.hash.TIntShortHashMap;
+import gnu.trove.procedure.TIntProcedure;
+import gnu.trove.set.hash.TIntHashSet;
+
+final public class ClusterSmall extends ClusterImpl {
+    private static final int TABLE_HEADER_SIZE = TableHeader.HEADER_SIZE + TableHeader.EXTRA_SIZE;
+    private static final int RESOURCE_TABLE_OFFSET = 0;
+    private static final int PREDICATE_TABLE_OFFSET = RESOURCE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int OBJECT_TABLE_OFFSET = PREDICATE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int VALUE_TABLE_OFFSET = OBJECT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int FLAT_TABLE_OFFSET = VALUE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int COMPLETE_TABLE_OFFSET = FLAT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int FOREIGN_TABLE_OFFSET = COMPLETE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private static final int INT_HEADER_SIZE = FOREIGN_TABLE_OFFSET + TABLE_HEADER_SIZE;
+    private final int clusterBits;
+    private final ResourceTableSmall resourceTable;
+    private final PredicateTable predicateTable;
+    private final ObjectTable objectTable;
+    private final ValueTableSmall valueTable;
+    private final ForeignTableSmall foreignTable;
+    private final CompleteTableSmall completeTable;
+    private final ClusterMapSmall clusterMap;
+    private final int[] headerTable;
+    public final ClusterSupport2 clusterSupport;
+    private boolean proxy;
+    private boolean deleted = false;
+    
+    protected ClusterSmall() {
+        this.proxy = true;
+        this.headerTable = null;
+        this.resourceTable = null;
+        this.foreignTable = null;
+        this.predicateTable = null;
+        this.objectTable = null;
+        this.valueTable = null;
+        this.completeTable = null;
+        this.clusterMap = null;
+        this.clusterSupport = null;
+        this.clusterBits = 0;
+        this.importance = 0;
+    }
+    
+    public ClusterSmall(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) {
+        super(clusterTable, clusterUID, clusterKey, support);
+        if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+            new Exception(clusterUID.toString()).printStackTrace();
+        this.proxy = true;
+        this.headerTable = null;
+        this.resourceTable = null;
+        this.foreignTable = null;
+        this.predicateTable = null;
+        this.objectTable = null;
+        this.valueTable = null;
+        this.completeTable = null;
+        this.clusterMap = null;
+        this.clusterSupport = support;
+        this.clusterBits = 0;
+        this.importance = 0;
+//        new Exception("ClusterSmall " + clusterKey).printStackTrace();
+    }
+    ClusterSmall(ClusterUID clusterUID, int clusterKey, ClusterSupport2 support, IClusterTable clusterTable) {
+        super(clusterTable, clusterUID, clusterKey, support);
+        if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+            new Exception(clusterUID.toString()).printStackTrace();
+        this.proxy = false;
+        this.clusterSupport = support;
+        this.headerTable = new int[INT_HEADER_SIZE];
+        this.resourceTable = new ResourceTableSmall(this, headerTable, RESOURCE_TABLE_OFFSET);
+        this.foreignTable = new ForeignTableSmall(this, headerTable, FOREIGN_TABLE_OFFSET);
+        this.predicateTable = new PredicateTable(this, headerTable, PREDICATE_TABLE_OFFSET);
+        this.objectTable = new ObjectTable(this, headerTable, OBJECT_TABLE_OFFSET);
+        this.valueTable = new ValueTableSmall(this, headerTable, VALUE_TABLE_OFFSET);
+        this.completeTable = new CompleteTableSmall(this, headerTable, COMPLETE_TABLE_OFFSET);
+        this.clusterMap = new ClusterMapSmall(this, foreignTable);
+        this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+//        if(clusterTable != null)
+//             this.importance = -clusterTable.timeCounter();
+//        else
+               this.importance = 0;
+//        new Exception("ClusterSmall " + clusterKey).printStackTrace();
+    }
+    protected ClusterSmall(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey)
+    throws DatabaseException {
+        super(clusterTable, checkValidity(-1, longs, ints, bytes), clusterKey, support);
+        this.proxy = false;
+        this.clusterSupport = support;
+        if (ints.length < INT_HEADER_SIZE)
+            throw new IllegalArgumentException("Too small integer table for cluster.");
+        this.headerTable = ints;
+        if(DebugPolicy.REPORT_CLUSTER_EVENTS) new Exception(Long.toString(clusterId)).printStackTrace();
+        this.resourceTable = new ResourceTableSmall(this, ints, RESOURCE_TABLE_OFFSET, longs);
+        this.foreignTable = new ForeignTableSmall(this, headerTable, FOREIGN_TABLE_OFFSET, longs);
+        this.predicateTable = new PredicateTable(this, ints, PREDICATE_TABLE_OFFSET, ints);
+        this.objectTable = new ObjectTable(this, ints, OBJECT_TABLE_OFFSET, ints);
+        this.valueTable = new ValueTableSmall(this, ints, VALUE_TABLE_OFFSET, bytes);
+        this.completeTable = new CompleteTableSmall(this, headerTable, COMPLETE_TABLE_OFFSET, ints);
+        this.clusterMap = new ClusterMapSmall(this, foreignTable);
+        this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+//        if(clusterTable != null) {
+//             this.importance = clusterTable.timeCounter();
+//             clusterTable.markImmutable(this, getImmutable());
+//        }
+//        new Exception("ClusterSmall " + clusterKey).printStackTrace();
+    }
+    void analyse() {
+        System.out.println("Cluster " + clusterId);
+        System.out.println("-size:" + getUsedSpace());
+        System.out.println(" -rt:" + (resourceTable.getTableCapacity() * 8 + 8));
+        System.out.println(" -ft:" + foreignTable.getTableCapacity() * 8);
+        System.out.println(" -pt:" + predicateTable.getTableCapacity() * 4);
+        System.out.println(" -ot:" + objectTable.getTableCapacity() * 4);
+        System.out.println(" -ct:" + completeTable.getTableCapacity() * 4);
+        System.out.println(" -vt:" + valueTable.getTableCapacity());
+
+        System.out.println("-resourceTable:");
+        System.out.println(" -resourceCount=" + resourceTable.getResourceCount());
+        System.out.println(" -size=" + resourceTable.getTableSize());
+        System.out.println(" -capacity=" + resourceTable.getTableCapacity());
+        System.out.println(" -count=" + resourceTable.getTableCount());
+        System.out.println(" -size=" + resourceTable.getTableSize());
+        //resourceTable.analyse();
+    }
+    public void checkDirectReference(int dr)
+    throws DatabaseException {
+        if (!ClusterTraits.statementIndexIsDirect(dr))
+            throw new ValidationException("Reference is not direct. Reference=" + dr);
+        if (ClusterTraits.isFlat(dr))
+            throw new ValidationException("Reference is flat. Reference=" + dr);
+        if (ClusterTraits.isLocal(dr)) {
+            if (dr < 1 || dr > resourceTable.getUsedSize())
+                throw new ValidationException("Illegal local reference. Reference=" + dr);
+        } else {
+            int fi = ClusterTraits.getForeignIndexFromReference(dr);
+            int ri = ClusterTraits.getResourceIndexFromForeignReference(dr);
+            if (fi < 1 || fi > foreignTable.getUsedSize())
+                throw new ValidationException("Illegal foreign reference. Reference=" + dr + " foreign index=" + fi);
+            if (ri < 1 || ri > ClusterTraits.getMaxNumberOfResources())
+                throw new ValidationException("Illegal foreign reference. Reference=" + dr + " resource index=" + ri);
+        }
+    }
+    public void checkPredicateIndex(int pi)
+    throws DatabaseException {
+        //        predicateTable.checkPredicateSetIndex(this, pi);
+    }
+    public void checkObjectSetReference(int or)
+    throws DatabaseException {
+        if (ClusterTraits.statementIndexIsDirect(or))
+            throw new ValidationException("Illegal object set reference. Reference=" + or);
+        int oi = ClusterTraits.statementIndexGet(or);
+        this.objectTable.checkObjectSetIndex(this, oi);
+    }
+
+    public void checkValueInit()
+    throws DatabaseException {
+        valueTable.checkValueInit();
+    }
+    public void checkValue(int capacity, int index)
+    throws DatabaseException {
+        valueTable.checkValue(capacity, index);
+    }
+    public void checkValueFini()
+    throws DatabaseException {
+        valueTable.checkValueFini();
+    }
+    public void checkForeingIndex(int fi)
+    throws DatabaseException {
+        if (fi<1 || fi > foreignTable.getUsedSize())
+            throw new ValidationException("Illegal foreign index=" + fi);
+    }
+    public void checkCompleteSetReference(int cr)
+    throws DatabaseException {
+        if (!ClusterTraits.completeReferenceIsMultiple(cr))
+            throw new ValidationException("Illegal complete set reference. Reference=" + cr);
+        int ci = cr;
+        this.completeTable.checkCompleteSetIndex(this, ci);
+    }
+    public void check()
+    throws DatabaseException {
+//        this.completeTable.check(this);
+//        this.objectTable.check(this);
+//        // Must be after object table check.
+//        this.predicateTable.check(this);
+//        this.resourceTable.check(this);
+    }
+    @Override
+    public CompleteTypeEnum getCompleteType(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceRef = getLocalReference(resourceKey);
+        CompleteTypeEnum ct = resourceTable.getCompleteType(resourceRef);
+        if (DEBUG)
+            System.out.println("ClusterSmall.getCompleteType rk=" + resourceKey + " ct=" + ct);
+        return ct;
+    }
+
+    @Override
+    public int getCompleteObjectKey(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceIndexOld = getLocalReference(resourceKey);
+        short completeRef = resourceTable.getCompleteObjectRef(resourceIndexOld);
+        int clusterIndex;
+        int resourceIndex;
+        if (0 == completeRef)
+            throw new DatabaseException("Resource's complete object refernce is null. Resource key=" + resourceKey + ".");
+        ClusterI.CompleteTypeEnum completeType = resourceTable.getCompleteType(resourceIndexOld);
+        if (completeType == ClusterI.CompleteTypeEnum.NotComplete)
+            throw new DatabaseException("Resource has multiple complete objects. Resource key=" + resourceKey + ".");
+        if (ClusterTraitsSmall.resourceRefIsLocal(completeRef)) {
+            clusterIndex = clusterKey;
+            resourceIndex = completeRef;
+        } else { // Resource has one complete statement.
+            ResourceUID resourceUID = clusterMap.getForeignResourceUID(completeRef);
+            ClusterUID uid = resourceUID.asCID();
+            clusterIndex = clusterSupport.getClusterKeyByUID(0, uid.second);
+            //ClusterI c = clusterTable.getClusterByClusterUIDOrMakeProxy(uid);
+            //clusterIndex = c.getClusterKey();
+            //assert(clusterIndex == clusterTable.getClusterByClusterUIDOrMakeProxy(uid).getClusterKey());
+            resourceIndex = resourceUID.getIndex();
+        }
+        int key = ClusterTraits.createResourceKey(clusterIndex, resourceIndex);
+        if (DEBUG)
+            System.out.println("ClusterSmall.complete object rk=" + resourceKey + " ck=" + key);
+        return key;
+    }
+
+    @Override
+    public boolean isComplete(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        final int resourceRef = getLocalReference(resourceKey);
+        final ClusterI.CompleteTypeEnum completeType = resourceTable.getCompleteType(resourceRef);
+        boolean complete = completeType != ClusterI.CompleteTypeEnum.NotComplete;
+        if (DEBUG)
+            System.out.println("ClusterSmall.key=" + resourceKey + " isComplete=" + complete);
+        return complete;
+    }
+    public int getSingleObject(int resourceKey, int predicateKey, int objectIndex, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.getSingleObject: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+            final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        }
+        return objectTable.getSingleObject(objectIndex, support, this);
+    }
+
+    public void forObjects(ReadGraphImpl graph, int resourceKey, int predicateKey, int objectIndex, AsyncMultiProcedure<Resource> procedure,
+            ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+            final int pRef = getInternalReferenceOrZero2(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+            return;
+        }
+        objectTable.foreachObject(graph, objectIndex, procedure, this);
+    }
+
+    public <C> void forObjects(ReadGraphImpl graph, int resourceKey, int predicateKey, int objectIndex, C context, AsyncContextMultiProcedure<C, Resource> procedure,
+            ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+            final int pRef = getInternalReferenceOrZero2(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+            return;
+        }
+        objectTable.foreachObject(graph, objectIndex, context, procedure, this);
+    }
+    
+    @Override
+    public <Context> boolean forObjects(int resourceKey, int predicateKey, int objectIndex, ObjectProcedure<Context> procedure,
+            Context context, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.forObjects2: rk=" + resourceKey + " pk=" + predicateKey);
+        if (0 == objectIndex) {
+            final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+            final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+            final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        }
+        return objectTable.foreachObject(objectIndex, procedure, context, support, this);
+    }
+
+    @Override
+    public int getSingleObject(int resourceKey, int predicateKey, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+        final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+        final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+        final int completeType = ClusterTraitsBase.getCompleteTypeIntFromResourceKey(predicateKey);
+        final ClusterI.CompleteTypeEnum pCompleteType = CompleteTypeEnum.make(completeType);
+        if (completeType > 0)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+        if (0 == predicateIndex) // All relevant data is in resource table.
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+
+    @Override
+    public <T> int getSingleObject(int resourceKey, ForPossibleRelatedValueProcedure<T> procedure, ClusterSupport support) throws DatabaseException {
+        final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+        final int predicateKey = procedure.predicateKey;
+        int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+        short pRef = 0;
+        if(procedure.clusterKey[0] == clusterKey) {
+            pRef = (short)procedure.predicateReference[0];
+        } else {
+            pRef = getInternalReferenceOrZero2(predicateKey, support);
+            procedure.clusterKey[0] = clusterKey;
+            procedure.predicateReference[0] = pRef;
+        }
+        
+        final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+        if (CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+        if (0 == predicateIndex) // All relevant data is in resource table.
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+
+    @Override
+    public <C, T> int getSingleObject(int resourceKey, ForPossibleRelatedValueContextProcedure<C, T> procedure, ClusterSupport support) throws DatabaseException {
+        final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+        final int predicateKey = procedure.predicateKey;
+        int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+        short pRef = 0;
+        if(procedure.clusterKey[0] == clusterKey) {
+            pRef = (short)procedure.predicateReference[0];
+        } else {
+            pRef = getInternalReferenceOrZero2(predicateKey, support);
+            procedure.clusterKey[0] = clusterKey;
+            procedure.predicateReference[0] = pRef;
+        }
+        final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+        if (CompleteTypeEnum.NotComplete != pCompleteType)
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+        if (0 == predicateIndex) // All relevant data is in resource table.
+            return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+        return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+    }
+
+    @Override
+    public void forObjects(ReadGraphImpl graph, int resourceKey,
+            int predicateKey, AsyncMultiProcedure<Resource> procedure) throws DatabaseException {
+       
+       throw new UnsupportedOperationException();
+       
+//        SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//        ClusterSupport support = session.clusterTranslator;
+//        if (DEBUG)
+//            System.out.println("ClusterSmall.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+//        final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+//        final int pRef = getInternalReferenceOrZero2(predicateKey, support);
+//        final int completeType = ClusterTraitsBase.getCompleteTypeIntFromResourceKey(predicateKey);
+//        final ClusterI.CompleteTypeEnum pCompleteType = CompleteTypeEnum.make(completeType);
+//        if (completeType > 0) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+//        if (0 == predicateIndex) {
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+//        forObjects(graph, resourceKey, predicateKey, objectIndex, procedure, support);
+    }
+
+    public void forObjects(ReadGraphImpl graph, int resourceKey, ForEachObjectProcedure procedure) throws DatabaseException {
+       
+       throw new UnsupportedOperationException();
+
+//        final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+//        final int predicateKey = procedure.predicateKey;
+//        int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+//        int pRef = 0;
+//        if(procedure.clusterKey[0] == clusterKey) {
+//            pRef = procedure.predicateReference[0];
+//        } else {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            pRef = getInternalReferenceOrZero2(predicateKey, support);
+//            procedure.clusterKey[0] = clusterKey;
+//            procedure.predicateReference[0] = pRef;
+//        }
+//        final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+//        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+//        if (0 == predicateIndex) {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int hashBase = predicateIndex + predicateTable.offset;
+//        if (predicateTable.table[hashBase-1] < 0) {
+//            int objectIndex = TableIntArraySet2.get(predicateTable.table, hashBase, pRef & 0xFFFF);
+//            //int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            forObjects(graph, resourceKey, predicateKey, objectIndex, procedure, support);
+//        } else {
+//            procedure.finished(graph);
+////            graph.dec();
+//        }
+    }
+
+    public <C> void forObjects(ReadGraphImpl graph, int resourceKey, C context, ForEachObjectContextProcedure<C> procedure) throws DatabaseException {
+       
+       throw new UnsupportedOperationException();
+
+//        final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+//        final int predicateKey = procedure.predicateKey;
+//        int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+//        int pRef = 0;
+//        if(procedure.clusterKey[0] == clusterKey) {
+//            pRef = procedure.predicateReference[0];
+//        } else {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            pRef = getInternalReferenceOrZero2(predicateKey, support);
+//            procedure.clusterKey[0] = clusterKey;
+//            procedure.predicateReference[0] = pRef;
+//        }
+//        
+//        final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+//        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+//        if (0 == predicateIndex) {
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+//            return;
+//        }
+//        int hashBase = predicateIndex + predicateTable.offset;
+//        if(predicateTable.table[hashBase-1] < 0) {
+//            int objectIndex = TableIntArraySet2.get(predicateTable.table, hashBase, pRef & 0xFFFF);
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            forObjects(graph, resourceKey, predicateKey, objectIndex, context, procedure, support);
+//        } else {
+//            int objectIndex = TableIntSet2.get(predicateTable.table, hashBase, pRef & 0xFFFF);
+//            SessionImplSocket session = (SessionImplSocket)graph.getSession();
+//            ClusterSupport support = session.clusterTranslator;
+//            forObjects(graph, resourceKey, predicateKey, objectIndex, context, procedure, support);
+//        }
+    }
+    @Override
+    public <Context> boolean forObjects(int resourceKey, int predicateKey,
+            ObjectProcedure<Context> procedure, Context context, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.forObjects4: rk=" + resourceKey + " pk=" + predicateKey);
+        final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+        final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+        final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+        // PredicateType is complete i.e. all relevant data is in resource table.
+        if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { 
+            if (DEBUG)
+                System.out.println("ClusterSmall.forObjects: complete type was " + pCompleteType + " cluster=" + getClusterUID());
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        }
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex) { // All relevant data is in resource table.
+            if (DEBUG)
+                System.out.println("ClusterSmall.forObjects: no predicate table " + pCompleteType);
+            return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+        }
+        int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+        return forObjects(resourceKey, predicateKey, objectIndex, procedure, context, support);
+    }
+    @Override
+    public <Context> boolean forPredicates(int resourceKey,
+            PredicateProcedure<Context> procedure, Context context, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.forPredicates: rk=" + resourceKey );
+        final int resourceIndex = getLocalReference(resourceKey);
+        final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+        if (0 == predicateIndex)
+            return resourceTable.foreachPredicate(resourceIndex,
+                    procedure, context, support, this, completeTable);
+        else {
+            boolean broken = resourceTable.foreachPredicate(resourceIndex,
+                    procedure, context, support, this, completeTable);
+            if (broken)
+                return true;
+        }
+        return predicateTable.foreachPredicate(predicateIndex,
+                procedure, context, support, this);
+    }
+    
+    @Override
+    public ClusterI addRelation(int sResourceKey, ClusterUID puid, int pResourceKey, ClusterUID ouid, int oResourceKey, ClusterSupport support) throws DatabaseException {
+
+        if(proxy) {
+               throw new UnsupportedOperationException();
+//            ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+//            return cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+        }
+
+        //        check();
+        boolean ret;
+        try {
+            short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+            short pri = getReferenceOrCreateIfForeign(pResourceKey, puid, support, ClusterStream.NULL_OPERATION);
+            short ori = getReferenceOrCreateIfForeign(oResourceKey, ouid, support, ClusterStream.NULL_OPERATION);
+            ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+            ret = addRelationInternal(sri, pri, ori, completeType);
+            calculateModifiedId();
+        } catch (OutOfSpaceException e) {
+            boolean streamOff = support.getStreamOff();
+            if (!streamOff) {
+                support.cancelStatement(this);
+                support.setStreamOff(true);
+            }
+            ClusterI cluster = toBig(clusterSupport);
+            if (!streamOff)
+                support.setStreamOff(false);
+            ClusterI cluster2 = cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+            if (cluster != cluster2)
+                throw new DatabaseException("Internal error. Contact application support.");
+            return cluster;
+        }
+//        check();
+        if (ret) {
+            support.addStatement(this);
+            return this;
+        } else {
+            support.cancelStatement(this);
+            return null;
+        }
+       
+    }
+
+    @Override
+    public ClusterI addRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) throws DatabaseException {
+       
+        if (DEBUG)
+            System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey);
+        
+        if(proxy) {
+               throw new UnsupportedOperationException();
+//            ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+//            return cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+        }
+
+        //        check();
+        boolean ret;
+        try {
+            short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+            short pri = getReferenceOrCreateIfForeign(pResourceKey, support, ClusterStream.NULL_OPERATION);
+            short ori = getReferenceOrCreateIfForeign(oResourceKey, support, ClusterStream.NULL_OPERATION);
+            ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+            ret = addRelationInternal(sri, pri, ori, completeType);
+            calculateModifiedId();
+        } catch (OutOfSpaceException e) {
+            boolean streamOff = support.getStreamOff();
+            if (!streamOff) {
+                support.cancelStatement(this);
+                support.setStreamOff(true);
+            }
+            ClusterI cluster = toBig(clusterSupport);
+            if (!streamOff)
+                support.setStreamOff(false);
+            ClusterI cluster2 = cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+            if (cluster != cluster2)
+                throw new DatabaseException("Internal error. Contact application support.");
+            return cluster;
+        }
+//        check();
+        if (ret) {
+            support.addStatement(this);
+            return this;
+        } else {
+            support.cancelStatement(this);
+            return null;
+        }
+    }
+    @Override
+    public boolean removeRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+        //        check();
+        short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.REMOVE_OPERATION);
+        short pri = getInternalReferenceOrZeroAnd(pResourceKey, support, ClusterStream.NULL_OPERATION);
+        short ori = getInternalReferenceOrZeroAnd(oResourceKey, support, ClusterStream.NULL_OPERATION);
+        boolean ret = false;
+        if (0 != pri && 0 != ori) {
+            ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+            ret = removeRelationInternal(sri, pri, ori, completeType, support);
+            calculateModifiedId();
+        }
+        if (ret)
+            support.removeStatement(this);
+        else
+            support.cancelStatement(this);
+        //        check();
+        return ret;
+    }
+    @Override
+    public void denyRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+    throws DatabaseException {
+        short s = checkResourceKeyIsOursAndGetResourceIndexIf(sResourceKey, support);
+        ResourceReferenceAndCluster p = checkResourceKeyAndGetResourceIndexIf(pResourceKey, support);
+        ResourceReferenceAndCluster o = checkResourceKeyAndGetResourceIndexIf(oResourceKey, support);
+        if (0 == s || 0 == p.reference || 0 == o.reference)
+            return;
+        //        check();
+        ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+        boolean ret = removeRelationInternal(s, p.reference, o.reference, completeType, support);
+        if (ret) {
+            support.addStatementIndex(this, sResourceKey, getClusterUID(), ClusterChange.REMOVE_OPERATION);
+            support.addStatementIndex(this, pResourceKey, p.clusterUID, ClusterStream.NULL_OPERATION);
+            support.addStatementIndex(this, oResourceKey, o.clusterUID, ClusterStream.NULL_OPERATION);
+            support.removeStatement(this);
+        }
+        calculateModifiedId();
+        //        check();
+        return;
+    }
+    @Override
+    public InputStream getValueStream(int resourceKey, ClusterSupport support) throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.getValue " + resourceKey);
+        int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+        try {
+            byte[] buffer = resourceTable.getValue(valueTable, resourceIndex);
+            if(buffer == null) return null;
+            return new ByteArrayInputStream(buffer);
+        } catch (ExternalValueException e) {
+            return support.getValueStreamEx(resourceIndex, clusterUID.second);
+        }
+    }
+    @Override
+    public byte[] getValue(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("ClusterSmall.getValue " + resourceKey);
+        int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+        try {
+            return resourceTable.getValue(valueTable, resourceIndex);
+        } catch (ExternalValueException e) {
+               return clusterSupport.impl.getResourceFile(clusterUID.asBytes(), resourceIndex);
+            //return support.getValueEx(resourceIndex, clusterUID.second);
+        }
+    }
+    @Override
+    public boolean hasValue(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(resourceKey);
+        return resourceTable.hasValue(resourceIndex);
+    }
+    @Override
+    public boolean removeValue(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(resourceKey, support, ClusterChange.DELETE_OPERATION);
+        support.removeValue(this);
+        calculateModifiedId();
+        return resourceTable.removeValue(valueTable, resourceIndex);
+    }
+    @Override
+    public ClusterI setValue(int rResourceId, byte[] value, int length, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.SET_OPERATION);
+        support.setValue(this, getClusterId(), value, length);
+        try {
+            resourceTable.setValue(valueTable, resourceIndex, value, length);
+            calculateModifiedId();
+            return this;
+        } catch (OutOfSpaceException e) {
+            boolean streamOff = support.getStreamOff();
+            if (!streamOff)
+                support.setStreamOff(true);
+            ClusterI cluster = toBig(support);
+            cluster.setValue(rResourceId, value, length, support);
+            if (!streamOff)
+                support.setStreamOff(false);
+            return cluster;
+        }
+    }
+    @Override
+    public ClusterI modiValueEx(int rResourceId, long voffset, int length, byte[] value, int offset, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.MODI_OPERATION);
+        support.modiValue(this, getClusterId(), voffset, length, value, offset);
+        resourceTable.setValueEx(valueTable, resourceIndex);
+        calculateModifiedId();
+        return this;
+    }
+    @Override
+    public byte[] readValueEx(int rResourceId, long voffset, int length, ClusterSupport support)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(rResourceId);
+        boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+        if (!isExternal)
+            throw new DatabaseException("ClusterI.readValue supported only for external value. Resource key=" + rResourceId);
+        return support.getValueEx(resourceIndex, getClusterId(), voffset, length);
+    }
+    @Override
+    public boolean isValueEx(int resourceKey) throws DatabaseException {
+        int resourceIndex = getLocalReference(resourceKey);
+       return resourceTable.isValueEx(valueTable, resourceIndex);
+    }
+    @Override
+    public long getValueSizeEx(int rResourceId, ClusterSupport support)
+    throws DatabaseException, ExternalValueException {
+        int resourceIndex = getLocalReference(rResourceId);
+        boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+        if (!isExternal)
+            throw new ExternalValueException("ClusterI.getValueSizeEx supported only for external value. Resource key=" + rResourceId);
+        return support.getValueSizeEx(resourceIndex, getClusterId());
+    }
+    @Override
+    public void setValueEx(int rResourceId)
+    throws DatabaseException {
+        int resourceIndex = getLocalReference(rResourceId);
+        resourceTable.setValueEx(valueTable, resourceIndex);
+    }
+    @Override
+    public int createResource(ClusterSupport support)
+    throws DatabaseException {
+
+        if(proxy) {
+               throw new UnsupportedOperationException();
+//             ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+//            return cluster.createResource(support);
+        }
+        
+        short resourceIndex = resourceTable.createResource();
+        calculateModifiedId();
+        if(DebugPolicy.REPORT_RESOURCE_ID_ALLOCATION)
+            System.out.println("[RID_ALLOCATION]: ClusterSmall[" + clusterId + "] allocates " + resourceIndex);
+        support.createResource(this, resourceIndex, getClusterId());
+        return ClusterTraits.createResourceKey(clusterKey, resourceIndex);
+    }
+    @Override
+    public boolean hasResource(int resourceKey, ClusterSupport support) {
+        int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey);
+        if (this.clusterKey != clusterKey) // foreign resource
+            return false;
+        int resourceIndex;
+        try {
+            resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        } catch (DatabaseException e) {
+            return false;
+        }
+        if (resourceIndex > 0 & resourceIndex <= resourceTable.getTableCount())
+            return true;
+        else
+            return false;
+    }
+    @Override
+    public int getNumberOfResources(ClusterSupport support)
+    throws DatabaseException  {
+        
+        if(proxy) {
+               throw new UnsupportedOperationException();
+//            ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+//            return cluster.getNumberOfResources(support);
+        }
+        
+        return resourceTable.getUsedSize();
+    }
+
+    public int getNumberOfResources() {
+        
+        if(proxy) throw new IllegalStateException();
+        
+        return resourceTable.getUsedSize();
+        
+    }
+
+    @Override
+    public long getUsedSpace() {
+        if(isEmpty()) return 0;
+        long rt = resourceTable.getTableCapacity() * 8 + 8; // (8 = cluster id)
+        long ft = foreignTable.getTableCapacity() * 8;
+        long pt = predicateTable.getTableCapacity() * 4;
+        long ot = objectTable.getTableCapacity() * 4;
+        long ct = completeTable.getTableCapacity() * 4;
+        long vt = valueTable.getTableCapacity() * 1;
+        long cm = clusterMap.getUsedSpace();
+        return rt + ft + pt + ot + ct + vt + cm;
+    }
+    @Override
+    public boolean isEmpty() {
+        if(resourceTable == null) return true;
+        return resourceTable.getTableCount() == 0;
+    }
+    @Override
+    public void printDebugInfo(String message, ClusterSupport support)
+    throws DatabaseException {
+        throw new DatabaseException("Not implemented!");
+    }
+    private short getInternalReferenceOrZero2(int resourceKey, ClusterSupport support) throws DatabaseException {
+        int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+        if (!ClusterTraitsBase.isCluster(clusterBits, resourceKey)) {
+            return clusterMap.getForeignReferenceOrZero(resourceKey);
+        } else {
+            return (short)resourceIndex;
+        }
+    }
+    private short getInternalReferenceOrZeroAnd(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) { // foreign resource
+            ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+            short foreignRef = clusterMap.getForeignReferenceOrZero(resourceKey);
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            return foreignRef;
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return (short)resourceIndex;
+    }
+    private final short getLocalReference(int resourceKey) throws DatabaseException {
+        return ClusterTraits.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+    }
+    private final short getLocalReferenceAnd(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        short resourceIndex = getLocalReference(resourceKey);
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private short checkResourceKeyIsOursAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int clusterShortId = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterShortId)
+            return 0;
+        int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        return (short)resourceIndex;
+    }
+    private short getReferenceOrCreateIfForeign(int resourceKey, ClusterUID clusterUID, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) {
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            short ref = clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+            return ref;
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private short getReferenceOrCreateIfForeign(int resourceKey, ClusterSupport support, byte op)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) {
+            ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+            support.addStatementIndex(this, resourceKey, clusterUID, op);
+            short ref = clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+            return ref;
+        }
+        support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+        return resourceIndex;
+    }
+    private class ResourceReferenceAndCluster {
+        ResourceReferenceAndCluster(short reference, ClusterUID clusterUID) {
+            this.reference = reference;
+            this.clusterUID = clusterUID;
+        }
+        public final short reference;
+        public final ClusterUID clusterUID;
+    }
+    private ResourceReferenceAndCluster checkResourceKeyAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+    throws DatabaseException {
+        int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+        short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+        if (this.clusterKey != clusterKey) { // foreign resource
+            ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey);
+            ClusterUID clusterUID = foreignCluster.getClusterUID();
+            short ref = clusterMap.getForeignReferenceOrZero(resourceKey);
+            return new ResourceReferenceAndCluster(ref, clusterUID);
+        }
+        return new ResourceReferenceAndCluster(resourceIndex, getClusterUID());
+    }
+
+    static long fTime = 0;
+    
+    @Override
+    final public int execute(int resourceReference) throws DatabaseException {
+        short resourceRef = (short)resourceReference;
+        int key;
+        if (ClusterTraitsSmall.resourceRefIsLocal(resourceRef)) {
+            key = clusterBits | resourceRef;
+        } else {
+            short foreignIndex = ClusterTraitsSmall.resourceRefGetForeignIndex((short)resourceRef);
+            //long start = System.nanoTime();
+            ResourceUID resourceUID = foreignTable.getResourceUID(foreignIndex);
+            int clusterKey = clusterSupport.getClusterKeyByClusterUIDOrMake(resourceUID.asCID());
+//            ClusterBase cluster = clusterSupport.getClusterByClusterUIDOrMake(resourceUID.asCID());
+            key = ClusterTraitsBase.createResourceKey(clusterKey, resourceUID.getIndex());
+            //fTime += System.nanoTime() - start;
+            //System.err.println("fTime: " + 1e-9*fTime);
+        }
+        if (DEBUG)
+            System.out.println("ClusterSmall.execute key=" + key);
+        return key;
+    }
+
+    private boolean addRelationInternal(short sReference, short pReference, short oReference, ClusterI.CompleteTypeEnum completeType)
+    throws DatabaseException {
+        int predicateIndex = resourceTable.addStatement(sReference, pReference, oReference, predicateTable, objectTable, completeType, completeTable);
+        if (0 == predicateIndex)
+            return true; // added to resourceTable
+        else if (0 > predicateIndex)
+            return false; // old complete statemenent
+        int newPredicateIndex = predicateTable.addPredicate(predicateIndex, 0xFFFF & pReference, 0xFFFF & oReference, objectTable);
+        if (0 == newPredicateIndex)
+            return false;
+        if (predicateIndex != newPredicateIndex)
+            resourceTable.setPredicateIndex(sReference, newPredicateIndex);
+        return true;
+    }
+    private boolean removeRelationInternal(int sResourceIndex, short pResourceIndex,
+            short oResourceIndex, ClusterI.CompleteTypeEnum completeType, ClusterSupport support)
+    throws DatabaseException {
+        int predicateIndex = resourceTable.getPredicateIndex(sResourceIndex);
+        if (0 == predicateIndex || ClusterI.CompleteTypeEnum.NotComplete != completeType)
+            return resourceTable.removeStatementFromCache(sResourceIndex,
+                    pResourceIndex, oResourceIndex, completeType, completeTable);
+        PredicateTable.Status ret = predicateTable.removePredicate(predicateIndex, 0xFFFF & pResourceIndex, 0xFFFF & oResourceIndex, objectTable);
+        switch (ret) {
+        case NothingRemoved:
+            return false;
+        case PredicateRemoved: {
+            if (0 == predicateTable.getPredicateSetSize(predicateIndex))
+                resourceTable.setPredicateIndex(sResourceIndex, 0);
+            // intentionally dropping to next case
+        } default:
+            break;
+        }
+        resourceTable.removeStatement(sResourceIndex,
+                pResourceIndex, oResourceIndex,
+                completeType, completeTable,
+                predicateTable, objectTable, support);
+        return true;
+    }
+    @Override
+    public void load() {
+        throw new Error("Not supported.");
+    }
+
+    @Override
+    public void load(Callback<DatabaseException> r) {
+        throw new Error("Not supported.");
+    }
+
+    public boolean contains(int resourceKey) {
+        return ClusterTraitsBase.isCluster(clusterBits, resourceKey);
+    }
+    @Override
+    public void load(final ClusterSupport support, final Runnable callback) {
+
+       throw new UnsupportedOperationException();
+
+//     try {
+//            clusterTable.load2(clusterId, clusterKey);
+//            callback.run();
+//        } catch (DatabaseException e) {
+//            e.printStackTrace();
+//        }
+        
+    }
+    @Override
+    public ClusterI getClusterByResourceKey(int resourceKey,
+            ClusterSupport support) {
+        throw new Error();
+    }
+    @Override
+    public void increaseReferenceCount(int amount) {
+        throw new Error();
+    }
+    @Override
+    public void decreaseReferenceCount(int amount) {
+        throw new Error();
+    }
+    @Override
+    public int getReferenceCount() {
+        throw new Error();
+    }
+    @Override
+    public void releaseMemory() {
+    }
+    @Override
+    public void compact() {
+        clusterMap.compact();
+    }
+    @Override
+    public boolean isLoaded() {
+        return !proxy;
+    }
+
+//    public ClusterImpl tryLoad(SessionImplSocket sessionImpl) {
+//
+//     throw new UnsupportedOperationException();
+//        assert(Constants.ReservedClusterId != clusterId);
+//
+//        return clusterTable.tryLoad(clusterId, clusterKey);
+//        
+//    }
+    
+
+    @Override
+    public ClusterBig toBig(ClusterSupport support)
+    throws DatabaseException {
+        if (DEBUG) {
+            System.out.println("DEBUG: toBig cluster=" + clusterId);
+            new Exception().printStackTrace();
+        }
+        ClusterBig big = new ClusterBig(clusterSupport, getClusterUID(), clusterKey, (ClusterSupport2)support);
+        big.cc = this.cc;
+//        if(big.cc != null)
+//             big.cc.clusterImpl = this;
+        resourceTable.toBig(big, support, this);
+        big.foreignLookup = this.foreignLookup;
+        big.change = this.change;
+        this.cc = null;
+        this.foreignLookup = null;
+        this.change = null;
+        return big;
+    }
+    
+    @Override
+    public ClusterTypeEnum getType() {
+        return ClusterTypeEnum.SMALL;
+    }
+    @Override
+    public boolean getImmutable() {
+        int status = resourceTable.getClusterStatus();
+        return (status & ClusterStatus.ImmutableMaskSet) == 1;
+    }
+    @Override
+    public void setImmutable(boolean immutable, ClusterSupport support) {
+        if(resourceTable != null) {
+            int status = resourceTable.getClusterStatus();
+            if (immutable)
+                status |= ClusterStatus.ImmutableMaskSet;
+            else
+                status &= ClusterStatus.ImmutableMaskClear;
+            resourceTable.setClusterStatus(status);
+        }
+        support.setImmutable(this, immutable);
+    }
+    
+    @Override
+    public String toString() {
+        try {
+            final TIntHashSet set = new TIntHashSet();
+            TIntShortHashMap map = foreignTable.getResourceHashMap();
+            map.forEachKey(new TIntProcedure() {
+                @Override
+                public boolean execute(int value) {
+                    set.add(value & 0xfffff000);
+                    return true;
+                }
+            });
+            return "ClusterSmall[" + getClusterUID() + " - " + getClusterId() + " - " + getNumberOfResources() + " - " + foreignTable.getResourceHashMap().size() + " - " + set.size() + "]";
+        } catch (DatabaseException e) {
+            return "ClusterSmall[" + getNumberOfResources() + "]";
+        }
+    }
+    
+    // Memory map
+    // bytes (b) | headers(i) | predicateTable (i) | objectTable (i) | completeTable (i) | resourceTable (l) | foreignTable (l)
+
+    @Override
+    public byte[] storeBytes() throws IOException {
+
+       int byteSize = valueTable.getTableSize();
+       int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize(); 
+       int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize();
+
+       byte[] raw = new byte[12 + byteSize + 8*longSize + 4*intSize];
+
+               int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE);
+               
+               Bytes.writeLE(raw, 0, byteSize);
+               Bytes.writeLE(raw, 4, intSize);
+               Bytes.writeLE(raw, 8, longSize);
+               
+               int rawPos = valueTable.storeBytes(raw, 0, 12);
+               
+               int intBase = rawPos;
+               
+               rawPos += 4*INT_HEADER_SIZE;
+               rawPos = predicateTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos);
+               rawPos = objectTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos);
+               rawPos = completeTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos);
+
+               int longBase = rawPos;
+               
+               rawPos += 8*LONG_HEADER_SIZE;
+               rawPos = resourceTable.storeBytes(raw, (rawPos-longBase)>>3, rawPos);
+               rawPos = foreignTable.storeBytes(raw, (rawPos-longBase)>>3, rawPos);
+               
+               Bytes.writeLE8(raw, longBase, -1);
+               Bytes.writeLE8(raw, longBase+8, LONG_HEADER_VERSION);
+               Bytes.writeLE8(raw, longBase+16, 0);
+               Bytes.writeLE8(raw, longBase+24, clusterUID.second);
+
+       // write header
+               for(int i=0;i<INT_HEADER_SIZE;i++) {
+                       int v = headerTable[i];
+                       Bytes.writeLE(raw, intBase, v);
+                       intBase+=4;
+               }
+
+               for(int i=0;i<INT_HEADER_SIZE;i++)
+               headerTable[i] = currentHeader[i];
+               
+       return raw;
+       
+    }
+
+    @Override
+    public ClusterTables store() throws IOException {
+
+       ClusterTables result = new ClusterTables();
+       
+               int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE);
+
+       int byteSize = valueTable.getTableSize();
+       byte[] byteBytes = new byte[byteSize];
+       valueTable.store(byteBytes, 0);
+       
+       result.bytes = byteBytes;
+
+       int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize(); 
+       long[] longBytes = new long[longSize];
+
+       longBytes[0] = -1;
+       longBytes[1] = LONG_HEADER_VERSION;
+       longBytes[2] = 0;
+       longBytes[3] = clusterUID.second;
+       
+       int longPos = resourceTable.store(longBytes, LONG_HEADER_SIZE);
+       foreignTable.store(longBytes, longPos);
+       
+       result.longs = longBytes;
+       
+       int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize();
+       int[] intBytes = new int[intSize];
+       int intPos = INT_HEADER_SIZE;
+       intPos = predicateTable.store(intBytes, intPos);
+       intPos = objectTable.store(intBytes, intPos);
+       intPos = completeTable.store(intBytes, intPos);
+       // write header
+               for(int i=0;i<INT_HEADER_SIZE;i++) {
+                       int v = headerTable[i];
+                       intBytes[i] = v;
+                       //Bytes.writeLE(intBytes, i<<2, v);
+               }
+               
+               result.ints = intBytes;
+
+       for(int i=0;i<INT_HEADER_SIZE;i++)
+               headerTable[i] = currentHeader[i];
+       
+       return result;
+
+    }
+    
+    @Override
+    protected int getResourceTableCount() {
+       return resourceTable.getTableCount();
+    }
+    
+    @Override
+    public boolean getDeleted() {
+        if (deleted) return true;
+        int status = resourceTable.getClusterStatus();
+        return (status & ClusterStatus.DeletedMaskSet) == ClusterStatus.DeletedMaskSet;
+    }
+    @Override
+    public void setDeleted(boolean set, ClusterSupport support) {
+        deleted = set;
+        if(resourceTable != null) {
+            int status = resourceTable.getClusterStatus();
+            if (set)
+                status |= ClusterStatus.DeletedMaskSet;
+            else
+                status &= ClusterStatus.DeletedMaskClear;
+            resourceTable.setClusterStatus(status);
+        }
+        if (null != support)
+            support.setDeleted(this, set);
+    }
+
+    @Override
+    public Table<?> getPredicateTable() {
+        return predicateTable;
+    }
+
+    @Override
+    public Table getForeignTable() {
+        return foreignTable;
+    }
+
+    @Override
+    public int makeResourceKey(int pRef) throws DatabaseException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public Table<?> getCompleteTable() {
+        return completeTable;
+    }
+
+    @Override
+    public Table<?> getValueTable() {
+        return valueTable;
+    }
+
+    @Override
+    public Table<?> getObjectTable() {
+        return objectTable;
+    }
+    
+}
+
+class ClusterStatus {
+    public static final int ImmutableMaskClear = 0xFFFFFFFE;
+    public static final int ImmutableMaskSet = 0x00000001;
+    public static final int DeletedMaskClear = 0xFFFFFFFD;
+    public static final int DeletedMaskSet = 0x00000002;
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java
new file mode 100644 (file)
index 0000000..be505c6
--- /dev/null
@@ -0,0 +1,263 @@
+package org.simantics.acorn.internal;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.channels.FileLock;
+import java.nio.file.DirectoryStream;
+import java.nio.file.FileVisitOption;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.EnumSet;
+import java.util.Properties;
+
+import org.simantics.acorn.GraphClientImpl2;
+import org.simantics.db.Database;
+import org.simantics.db.DatabaseUserAgent;
+import org.simantics.db.ServiceLocator;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.server.ProCoreException;
+
+/**
+ * @author Tuukka Lehtonen
+ */
+public class AcornDatabase implements Database {
+
+    private final Path folder;
+
+    private DatabaseUserAgent userAgent;
+
+    private RandomAccessFile raLockFile;
+
+    private FileLock lock;
+
+    private boolean isRunning;
+
+    public AcornDatabase(Path folder) {
+        this.folder = folder;
+    }
+
+    @Override
+    public DatabaseUserAgent getUserAgent() {
+        return userAgent;
+    }
+
+    @Override
+    public void setUserAgent(DatabaseUserAgent dbUserAgent) {
+        userAgent = dbUserAgent;
+    }
+
+    @Override
+    public Status getStatus() {
+        return Status.Local;
+    }
+
+    @Override
+    public File getFolder() {
+        return folder.toFile();
+    }
+
+    @Override
+    public boolean isFolderOk() {
+        return isFolderOk(folder.toFile());
+    }
+
+    @Override
+    public boolean isFolderOk(File aFolder) {
+        if (!aFolder.isDirectory())
+            return false;
+        return true;
+    }
+
+    @Override
+    public boolean isFolderEmpty() {
+        return isFolderEmpty(folder.toFile());
+    }
+
+    @Override
+    public boolean isFolderEmpty(File aFolder) {
+        Path path = aFolder.toPath();
+        if (!Files.isDirectory(path))
+            return false;
+        try (DirectoryStream<Path> folderStream = Files.newDirectoryStream(path)) {
+            return !folderStream.iterator().hasNext();
+        } catch (IOException e) {
+            Logger.defaultLogError("Failed to open folder stream. folder=" + path, e);
+            return false;
+        }
+    }
+
+    @Override
+    public void initFolder(Properties properties) throws ProCoreException {
+        try {
+            Files.createDirectories(folder);
+        } catch (IOException e) {
+            throw new ProCoreException(e);
+        }
+    }
+
+    @Override
+    public void deleteFiles() throws ProCoreException {
+        deleteTree(folder);
+    }
+
+    @Override
+    public void start() throws ProCoreException {
+        Path lockFile = folder.resolve("lock");
+        try {
+            if (!Files.exists(lockFile))
+                Files.createFile(lockFile);
+            
+            raLockFile = new RandomAccessFile(lockFile.toFile(), "rw");
+            lock = raLockFile.getChannel().tryLock();
+            if (lock == null) {
+                throw new ProCoreException("The database in folder " + folder.toAbsolutePath() + " is already in use!");
+            }
+            
+            isRunning = true;
+            
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+    }
+
+    @Override
+    public boolean isRunning() throws ProCoreException {
+        return isRunning;
+    }
+
+    @Override
+    public boolean tryToStop() throws ProCoreException {
+        try {
+            lock.release();
+            raLockFile.close();
+            
+            Files.deleteIfExists(folder.resolve("lock"));
+            
+            isRunning = false;
+            
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+        
+        return true;
+    }
+
+    @Override
+    public void connect() throws ProCoreException {
+    }
+
+    @Override
+    public boolean isConnected() throws ProCoreException {
+        return isRunning;
+    }
+
+    @Override
+    public String execute(String command) throws ProCoreException {
+        throw new UnsupportedOperationException("execute(" + command + ")");
+    }
+
+    @Override
+    public void disconnect() throws ProCoreException {
+    }
+
+    @Override
+    public void clone(File to, int revision, boolean saveHistory) throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public Path createFromChangeSets(int revision) throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void deleteGuard() throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public Path dumpChangeSets() throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void purgeDatabase() throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long serverGetTailChangeSetId() throws ProCoreException {
+        // "We have it all"
+        // But after purging we don't so beware.
+        // TODO: beware for purge
+        return 1;
+    }
+
+    @Override
+    public Session newSession(ServiceLocator locator) throws ProCoreException {
+        try {
+            return new GraphClientImpl2(this, folder, locator);
+        } catch (IOException e) {
+            throw new ProCoreException(e);
+        }
+    }
+
+    @Override
+    public Journal getJournal() throws ProCoreException {
+        // TODO: implement
+        throw new UnsupportedOperationException();
+    }
+
+    private static void deleteTree(Path path) throws ProCoreException {
+        if (!Files.exists(path))
+            return;
+
+        class Visitor extends SimpleFileVisitor<Path> {
+            @Override
+            public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+                try {
+                    Files.delete(file);
+                } catch (IOException ioe) {
+                    ioe.printStackTrace();
+                    throw ioe;
+                }
+                return FileVisitResult.CONTINUE;
+            }
+            @Override
+            public FileVisitResult postVisitDirectory(Path dir, IOException e) throws IOException {
+                if (e == null) {
+                    try {
+                        Files.delete(dir);
+                    } catch (IOException ioe) {
+                        ioe.printStackTrace();
+                        throw ioe;
+                    }
+                    return FileVisitResult.CONTINUE;
+                }
+                throw e;
+            }
+        }
+        try {
+            Visitor v = new Visitor();
+            EnumSet<FileVisitOption> opts = EnumSet.noneOf(FileVisitOption.class);
+            Files.walkFileTree(path, opts, Integer.MAX_VALUE, v);
+        } catch (IOException e) {
+            throw new ProCoreException("Could not delete " + path, e);
+        }
+    }
+
+       @Override
+       public String getCompression() {
+               return "LZ4";
+       }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java
new file mode 100644 (file)
index 0000000..b6cb59b
--- /dev/null
@@ -0,0 +1,62 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.internal;
+
+import org.eclipse.core.runtime.Plugin;
+import org.osgi.framework.BundleContext;
+
+/**
+ * @author Antti Villberg
+ */
+public class Activator extends Plugin {
+
+    // The plug-in ID
+    public static final String BUNDLE_ID = "org.simantics.acorn"; //$NON-NLS-1$
+    // The shared instance
+    private static Activator plugin;
+
+    /**
+     * The constructor
+     */
+    public Activator() {
+    }
+
+    /*
+     * (non-Javadoc)
+     * @see org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext)
+     */
+    @Override
+    public void start(BundleContext context) throws Exception {
+        super.start(context);
+        plugin = this;
+    }
+
+    /*
+     * (non-Javadoc)
+     * @see org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext)
+     */
+    @Override
+    public void stop(BundleContext context) throws Exception {
+        plugin = null;
+        super.stop(context);
+    }
+
+    /**
+     * Returns the shared instance
+     *
+     * @return the shared instance
+     */
+    public static Activator getDefault() {
+        return plugin;
+    }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java
new file mode 100644 (file)
index 0000000..3de77d2
--- /dev/null
@@ -0,0 +1,119 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+/*
+ * Created on Jan 21, 2005
+ * 
+ * Copyright Toni Kalajainen
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.simantics.acorn.internal;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Bijection map is a Map that has no values or keys, only 1:1 mappings
+ * of values. These value/keys will be called with left and right side
+ * values.
+ * 
+ * Each value can exist only once on a side
+ * 
+ * @author Toni Kalajainen
+ */
+public class BijectionMap<L, R> {
+
+    /** The keys of tableLeft are left-side-values and
+     * values are right-side-values */
+    private final Map<L, R> tableLeft = new HashMap<L, R>();
+    /** The keys of tableRight are right-side-values and
+     * values on it are left-side-values */
+    private final Map<R, L> tableRight = new HashMap<R, L>();
+
+    public boolean containsLeft(L leftValue)
+    {
+        return tableLeft.containsKey(leftValue);
+    }
+
+    public boolean containsRight(R rightValue)
+    {
+        return tableRight.containsKey(rightValue);
+    }
+
+    public void map(L leftValue, R rightValue)
+    {
+        // Remove possible old mapping
+        R oldRight = tableLeft.remove(leftValue);
+        if (oldRight != null) {
+            tableRight.remove(oldRight);
+        } else {
+            L oldLeft = tableRight.remove(rightValue);
+            if (oldLeft != null) {
+                tableLeft.remove(oldLeft);
+            }
+        }
+
+        tableLeft.put(leftValue, rightValue);
+        tableRight.put(rightValue, leftValue);
+    }
+
+    public int size()
+    {
+        return tableLeft.size();
+    }
+
+    public L getLeft(R rightValue) {
+        return tableRight.get(rightValue);
+    }
+
+    public R getRight(L leftValue) {
+        return tableLeft.get(leftValue);
+    }
+
+    public R removeWithLeft(L leftValue) {
+        R rightValue = tableLeft.remove(leftValue);
+        if (rightValue!=null)
+            tableRight.remove(rightValue);
+        return rightValue;
+    }
+
+    public L removeWithRight(R rightValue) {
+        L leftValue = tableRight.remove(rightValue);
+        if (leftValue!=null)
+            tableLeft.remove(leftValue);
+        return leftValue;
+    }
+
+    public Set<L> getLeftSet() {
+        return tableLeft.keySet();
+    }
+
+    public Set<R> getRightSet() {
+        return tableRight.keySet();
+    }
+
+    public void clear() {
+        tableLeft.clear();
+        tableRight.clear();
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java
new file mode 100644 (file)
index 0000000..305e315
--- /dev/null
@@ -0,0 +1,70 @@
+package org.simantics.acorn.internal;
+
+import org.simantics.db.service.ClusterUID;
+
+final public class Change {
+    
+    byte op0;
+    int key0;
+    int key1;
+    int key2;
+    ClusterUID clusterUID1;
+    ClusterUID clusterUID2;
+    byte[] lookup1;
+    byte[] lookup2;
+    byte lookIndex1;
+    byte lookIndex2;
+    int lastArg = 0;
+
+    @Override
+    public String toString() {
+        return "Change " + (key0&0xffff) + " " + (key1&0xffff) + " " + (key2&0xffff) + " " + clusterUID2 + " " + clusterUID2;
+    }
+    
+    public final void init() {
+        lastArg = 0;
+    }
+
+    public final void initValue() {
+        lastArg = 0;
+    }
+
+    final void addStatementIndex0(int key, byte op) {
+        assert (op != 0);
+        key0 = key;
+        op0 = op;
+    }
+
+    final void addStatementIndex1(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) {
+        key1 = key;
+        clusterUID1 = clusterUID;
+        lookIndex1 = lookIndex;
+        lookup1 = lookup;
+//        if(lookIndex > 0)
+//            System.err.println("statementIndex1 " + pos + " " + lookIndex);
+    }
+
+    final void addStatementIndex2(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) {
+        key2 = key;
+        clusterUID2 = clusterUID;
+        lookIndex2 = lookIndex;
+        lookup2 = lookup;
+    }
+
+    final public void addStatementIndex(int key, ClusterUID clusterUID, byte op) {
+
+        // new Exception("lastArg=" + lastArg).printStackTrace();
+
+        assert (lastArg < 3);
+
+        if (0 == lastArg)
+            addStatementIndex0(key, op);
+        else if (1 == lastArg)
+            addStatementIndex1(key, clusterUID, (byte)0, null);
+        else if (2 == lastArg)
+            addStatementIndex2(key, clusterUID, (byte)0, null);
+
+        lastArg++;
+
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java
new file mode 100644 (file)
index 0000000..b1fbb5d
--- /dev/null
@@ -0,0 +1,735 @@
+package org.simantics.acorn.internal;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.simantics.acorn.internal.ClusterStream.ClusterEnum;
+import org.simantics.acorn.internal.ClusterStream.Data;
+import org.simantics.acorn.internal.ClusterStream.DebugInfo;
+import org.simantics.acorn.internal.ClusterStream.OpEnum;
+import org.simantics.acorn.internal.ClusterStream.StmEnum;
+import org.simantics.compressions.Compressions;
+import org.simantics.db.exception.RuntimeDatabaseException;
+import org.simantics.db.impl.ClusterTraitsBase;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.procore.cluster.ClusterTraitsSmall;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Pair;
+
+import gnu.trove.map.hash.TIntByteHashMap;
+import gnu.trove.map.hash.TLongIntHashMap;
+
+
+public final class ClusterChange {
+       
+    public static final int VERSION = 1;
+    public static final byte ADD_OPERATION = 2;
+    public static final byte REMOVE_OPERATION = 3;
+    public static final byte DELETE_OPERATION = 5;
+
+    public static final boolean DEBUG = false;
+    public static final boolean DEBUG_STAT = false;
+    public static final boolean DEBUG_CCS = false;
+
+    private static DebugInfo sum = new DebugInfo();
+
+    public final TIntByteHashMap foreignTable = new TIntByteHashMap();
+    private final DebugInfo info;
+//    private final GraphSession graphSession;
+    public final ClusterUID clusterUID;
+    private final int SIZE_OFFSET;
+//    private final int HEADER_SIZE;
+    // How much buffer is used before stream is flushed to server. The bigger the better.
+    public static final int MAX_FIXED_BYTES = (1<<15) + (1<<14);
+    private static final int MAX_FIXED_OPERATION_SIZE = 17 + 16;
+    private static final int MAX_FIXED_OPERATION_SIZE_AND_ROOM_FOR_ERROR = MAX_FIXED_OPERATION_SIZE + 36;
+    private int nextSize = MAX_FIXED_BYTES;
+    int byteIndex = 0;
+    private byte[] bytes = null; // Operation data.
+//    private final byte[] header;
+    private boolean flushed = false;
+    private ArrayList<Pair<ClusterUID, byte[]>> stream;
+
+//    public ClusterImpl clusterImpl;
+
+    public ClusterChange( ArrayList<Pair<ClusterUID, byte[]>>  stream, ClusterUID clusterUID) {
+        this.clusterUID = clusterUID;
+        long[] longs = new long[ClusterUID.getLongLength()];
+        clusterUID.toLong(longs, 0);
+        this.stream = stream;
+//        this.graphSession = clusterStream.graphSession;
+        info = new DebugInfo();
+//        HEADER_SIZE = 8 + longs.length * 8;
+//        header = new byte[HEADER_SIZE];
+        SIZE_OFFSET = 0;
+//        Bytes.writeLE(header, SIZE_OFFSET + 0, 0); // Correct byte vector size is set with setHeaderVectorSize() later.
+//        Bytes.writeLE(header, SIZE_OFFSET + 4, VERSION);
+//        for (int i=0, offset=8; i<longs.length; ++i, offset+=8)
+//            Bytes.writeLE(header, offset, longs[i]);
+        //initBuffer();
+//        this.clusterStream = clusterStream;
+//        this.clusterChange2 = new ClusterChange2(clusterUID, clusterImpl);
+//        clusterStream.changes.add(this);
+    }
+
+//    private void setHeaderVectorSize(int size) {
+//        if (size < 0)
+//            throw new RuntimeDatabaseException("Change set size can't be negative.");
+//        int len = size + HEADER_SIZE - SIZE_OFFSET - 4;
+//        Bytes.writeLE(header, SIZE_OFFSET, len);
+//    }
+    @Override
+    public String toString() {
+        return super.toString() + " cluster=" + clusterUID + " off=" + byteIndex;
+    }
+    final public void initBuffer() {
+        flushed = false;
+        if (null == bytes || bytes.length < nextSize) {
+            bytes = new byte[nextSize];
+            nextSize = MAX_FIXED_BYTES;
+        }
+        byteIndex = 0;
+    }
+    private final void clear() {
+//     if(clusterImpl != null && clusterImpl.change != null)
+//             clusterImpl.change.init();
+        foreignTable.clear();
+        //initBuffer();
+        bytes = null;
+        byteIndex = 0;
+        if (DEBUG_STAT)
+            info.clear();
+    }
+    private final void checkInitialization() {
+//        if (0 == byteIndex)
+//            clusterStream.changes.addChange(this);
+    }
+    private final void printlnd(String s) {
+        System.out.println("DEBUG: ClusterChange " + clusterUID + ": " + s);
+    }
+    public final void createResource(short index) {
+        checkInitialization();
+        if (DEBUG)
+            printlnd("New ri=" + index + " offset=" + byteIndex);
+        if (index > ClusterTraits.getMaxNumberOfResources())
+            throw new RuntimeDatabaseException("Illegal resource index=" + index + ".");
+        checkBufferSpace(null);
+        bytes[byteIndex++] = (byte)52;
+        bytes[byteIndex++] = (byte)index;
+        bytes[byteIndex++] = (byte)(index>>>8);
+    }
+    void flushCollect(Change c) {
+       throw new UnsupportedOperationException();
+//        flushInternal(graphSession, clusterUID);
+//        if (DEBUG)
+//            printlnd("Cluster change data was flushed.");
+//        if (null != c) {
+//            if (DEBUG)
+//                printlnd("Clearing lookup for " + c.toString());
+//            c.lookup1 = null;
+//            c.lookup2 = null;
+//        }
+//        if (null != clusterImpl) {
+//            clusterImpl.foreignLookup = null;
+//        }
+    }
+
+    private final boolean checkBufferSpace(Change c) {
+//        clusterStream.changes.checkFlush();
+        if(bytes == null) initBuffer();
+        if (MAX_FIXED_BYTES - byteIndex > MAX_FIXED_OPERATION_SIZE_AND_ROOM_FOR_ERROR) {
+            return false;
+        }
+        flush();
+//        initBuffer();
+        return true;
+    }
+
+    private final void checkBufferSpace(int size) {
+        if(bytes == null) initBuffer();
+        if (bytes.length - byteIndex >= size)
+            return;
+        nextSize = Math.max(MAX_FIXED_BYTES, size);
+        flush();
+        initBuffer();
+    }
+
+    public final void addChange(Change c) {
+        checkInitialization();
+        checkBufferSpace(c);
+        byte operation = c.op0;
+        if(operation == ADD_OPERATION)
+            addStm(c, StmEnum.Add);
+        else if (operation == REMOVE_OPERATION)
+            addStm(c, StmEnum.Remove);
+        else if (operation == DELETE_OPERATION) {
+            if (DEBUG)
+                printlnd("Delete value offset=" + byteIndex + " " + c);
+            addByte(OpEnum.Delete.getOrMask());
+            addShort(ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0));
+        }
+        c.lastArg = 0;
+    }
+
+    private final void addForeignLong(short index, ClusterUID clusterUID) {
+        byteIndex = clusterUID.toByte(bytes, byteIndex);
+        bytes[byteIndex++] = (byte)(index & 0xFF);
+        bytes[byteIndex++] = (byte)(index >>> 8);
+    }
+
+    private final ClusterEnum addIndexAndCluster(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) {
+        assert(!clusterUID.equals(ClusterUID.Null));
+        short resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(key);
+        if (clusterUID.equals(this.clusterUID)) {
+            bytes[byteIndex++] = (byte)(resourceIndex & 0xFF);
+            bytes[byteIndex++] = (byte)(resourceIndex >>> 8);
+            return ClusterEnum.Local;
+        }
+
+        byte foreign = 0;
+        if(lookIndex > 0) {
+            if(lookup != null)
+                foreign = lookup[lookIndex];
+        } else {
+            foreign = foreignTable.get(key);
+        }
+        if (0 != foreign) {
+            if (foreign > 256)
+                throw new RuntimeDatabaseException("Internal error, contact application support." +
+                "Too big foreing index=" + foreign + " max=256");
+            --foreign;
+            bytes[byteIndex++] = foreign;
+            return ClusterEnum.ForeignShort;
+        } else {
+            byte position = (byte) (foreignTable.size() + 1);
+            if(lookup != null)
+                lookup[lookIndex] = position;
+            foreignTable.put(key, position);
+            if (DEBUG_STAT)
+                info.sForeign = foreignTable.size();
+            if (clusterUID.equals(ClusterUID.Null))
+                throw new RuntimeDatabaseException("Internal error, contact application support." +
+                "Cluster unique id not defined for foreing cluster.");
+            addForeignLong(resourceIndex, clusterUID);
+            return ClusterEnum.ForeignLong;
+        }
+    }
+
+    private final void addByte(byte b) {
+        bytes[byteIndex++] = b;
+    }
+
+    private final void addShort(short s) {
+        bytes[byteIndex++] = (byte)(s & 0xFF);
+        bytes[byteIndex++] = (byte)(s >>> 8);
+    }
+
+//    private final void addShort(int s) {
+//        bytes[byteIndex++] = (byte) (s & 0xFF);
+//        bytes[byteIndex++] = (byte) ((s >>> 8) & 0xFF);
+//    }
+
+    private final void addInt(int i) {
+//        System.err.println("addInt " + i + " " + i);
+        bytes[byteIndex++] = (byte) (i & 0xFF);
+        bytes[byteIndex++] = (byte) ((i >>> 8) & 0xFF);
+        bytes[byteIndex++] = (byte) ((i >>> 16) & 0xFF);
+        bytes[byteIndex++] = (byte) ((i >>> 24) & 0xFF);
+        // buffer.asIntBuffer().put(i);
+        // buffer.position(buffer.position()+4);
+    }
+
+//    private void addLong6(long l) {
+////        System.err.println("addLong " + l);
+//        bytes[byteIndex++] = (byte) (l & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF);
+//        // buffer.asLongBuffer().put(l);
+//        // buffer.position(buffer.position() + 6);
+//    }
+
+    private void addLong7(long l) {
+        bytes[byteIndex++] = (byte) (l & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF);
+        bytes[byteIndex++] = (byte) ((l >>> 48) & 0xFF);
+        // buffer.asLongBuffer().put(l);
+        // buffer.position(buffer.position() + 7);
+    }
+
+//    private void addLong(long l) {
+//        bytes[byteIndex++] = (byte) (l & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 48) & 0xFF);
+//        bytes[byteIndex++] = (byte) ((l >>> 56) & 0xFF);
+//    }
+    private final byte bufferPop() {
+        return bytes[--byteIndex];
+    }
+
+    final class DebugStm {
+        StmEnum e;
+        int r;
+        int p;
+        int o;
+        ClusterUID pc;
+        ClusterUID oc;
+
+        DebugStm(StmEnum e, int r, int p, ClusterUID pc, int o, ClusterUID oc) {
+            this.e = e;
+            this.r = r;
+            this.p = p;
+            this.o = o;
+            this.pc = pc;
+            this.oc = oc;
+        }
+
+        @Override
+        public String toString() {
+            short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(r);
+            short pi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(p);
+            short oi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(o);
+            return "" + e + " rk=" + r + " ri=" + ri + " rc=" + clusterUID
+            + " pk=" + p + " pi=" + pi + " pc=" + pc
+            + " ok=" + o + " oi=" + oi + " oc=" + oc;
+        }
+
+        public String toString2() {
+            return "" + e + " r=" + r + " rc=" + clusterUID + " p=" + p
+                    + " pc=" + pc + " o=" + o + " oc=" + oc;
+        }
+
+        public String toString3() {
+            short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(r);
+            short pi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(p);
+            short oi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(o);
+            return "" + e + " ri=" + ri
+            + " pi=" + pi + " pc=" + pc
+            + " oi=" + oi + " oc=" + oc;
+        }
+    }
+
+    private List<DebugStm> debugStms = new ArrayList<DebugStm>();
+
+    @SuppressWarnings("unused")
+    private final void addStm(Change c, StmEnum stmEnum) {
+
+        if (DEBUG_STAT)
+            ++info.nStms;
+        if (DEBUG || DEBUG_CCS) {
+            DebugStm d = new DebugStm(stmEnum, c.key0, c.key1, c.clusterUID1, c.key2, c.clusterUID2);
+            if (DEBUG_CCS)
+                debugStms.add(d);
+            if (DEBUG) {
+                printlnd(d.toString3() + " offset=" + byteIndex);
+            }
+        }
+        // int opPos = buffer.position();
+        int opPos = byteIndex++;
+        // buffer.put((byte)0); // operation code
+        // addByte((byte)0);
+
+        boolean done = true;
+
+        ClusterEnum a = addIndexAndCluster(c.key1, c.clusterUID1, c.lookIndex1, c.lookup1);
+        byte ab = 0;
+
+        // ForeignShort = byte
+        // Local = short
+        // ForeignLong = 8 byte
+        if (a != ClusterEnum.ForeignShort) {
+            ab = bufferPop();
+            done = false;
+        }
+
+        ClusterEnum b = addIndexAndCluster(c.key2, c.clusterUID2, c.lookIndex2, c.lookup2);
+        byte bb = 0;
+        if (b != ClusterEnum.ForeignShort) {
+            bb = bufferPop();
+            done = false;
+        }
+
+        int ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0);
+        if (ClusterTraitsSmall.isIllegalResourceIndex(ri))
+            throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + ri);
+        bytes[byteIndex++] = (byte)ri; // index low byte
+        if(!done) {
+            Data data = ClusterEnum.getData(stmEnum, a, b);
+            int left = 6 - data.bits;
+            int op = ri >>> (8 + left);
+            ri >>>= 8;
+            ri &= (1 << left) - 1;
+            if (a != ClusterEnum.ForeignShort) {
+                ri |= ab << left;
+                left += 6;
+            }
+            if (b != ClusterEnum.ForeignShort) {
+                ri |= bb << left;
+                left += 6;
+            }
+            switch (data.bytes) {
+                default:
+                    throw new RuntimeDatabaseException("Assertion error. Illegal number of bytes=" + data.bytes);
+                case 2:
+                    bytes[byteIndex++] = (byte)(ri & 0xFF);
+                    bytes[byteIndex++] = (byte)((ri >>> 8) & 0xFF);
+                    break;
+                case 1:
+                    bytes[byteIndex++] = (byte)(ri & 0xFF);
+                    break;
+                case 0:
+                    break;
+            }
+            op |= data.mask;
+            this.bytes[opPos] = (byte)op;
+        } else {
+            if (stmEnum == StmEnum.Add)
+                bytes[opPos] = (byte)((ri >>> 8) + 64);
+            else
+                bytes[opPos] = (byte)((ri >>> 8) + 128);
+        }
+        if (DEBUG_STAT) {
+            if (a == ClusterEnum.Local && b == ClusterEnum.Local) {
+                ++info.nLocal;
+            } else if (a == ClusterEnum.Local || b == ClusterEnum.Local) {
+                ++info.nPartly;
+            } else {
+                ++info.nForeign;
+            }
+        }
+        if (foreignTable.size() > 252)
+               flush();
+//             throw new UnsupportedOperationException();
+            //flushInternal(graphSession, clusterUID);
+    }
+
+    private final int modiValue(int ri, long value_offset, byte[] bytes, int offset, int size) {
+        if (DEBUG)
+            printlnd("Modify value ri=" + ri + " vo=" + value_offset + " size=" + size + " total=" + bytes.length);
+        if (ClusterTraitsBase.isIllegalResourceIndex(ri))
+            throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + ri);
+        if (value_offset > (1L << 58 - 1))
+            throw new RuntimeDatabaseException("Illegal value offset="
+                    + value_offset);
+        if (size < 0 || size > MAX_FIXED_BYTES - 1)
+            throw new RuntimeDatabaseException("Illegal value size=" + size);
+        if (offset + size > bytes.length)
+            throw new RuntimeDatabaseException("Illegal value size=" + size);
+        checkBufferSpace(12 + size);
+        addByte(OpEnum.Modify.getOrMask());
+        ri |= (value_offset >>> 56) << 14; // add top two bits
+        addShort((short) ri);
+        value_offset &= (1L << 56) - 1;
+        addLong7(value_offset);
+        addShort((short) size);
+        if (DEBUG)
+            System.out.println("Modify value fixed part end offset=" + byteIndex);
+        int copied = Math.min(size, this.bytes.length - byteIndex);
+        System.arraycopy(bytes, offset, this.bytes, byteIndex, copied);
+        byteIndex += size;
+        return copied;
+    }
+
+//    private final void modiValueBig(int ri, long voffset, int left, byte[] bytes, int offset) {
+//        checkBufferSpace(0);
+//        int current = Math.min(this.bytes.length - byteIndex - 12, left);
+//        if(current >= 0) {
+//            int written = modiValue(ri, voffset, bytes, offset, current);
+//            voffset += written;
+//            offset += written;
+//            left -= written;
+//        }
+////        flushInternal(graphSession, clusterUID);
+//        while (left > 0) {
+//            int length = Math.min(left, (1 << 16) - 1);
+//            if (DEBUG)
+//                printlnd("Modify big value ri=" + ri + " vo=" + voffset + " len=" + length);
+//            int psize = length + 12;
+////            setHeaderVectorSize(psize);
+//            byte[] message = new byte[psize/*+HEADER_SIZE*/];
+////            System.arraycopy(header, 0, message, 0, HEADER_SIZE);
+//            int to = 0;
+//            Bytes.write(message, to++, OpEnum.Modify.getOrMask());
+//            short index = (short)(ri | (voffset >>> 56)<<14); // add top two bits
+//            Bytes.writeLE(message, to, index); to += 2;
+//            Bytes.writeLE7(message, to, voffset & ((1L << 56) - 1)); to += 7;
+//            Bytes.writeLE(message, to, (short)length); to += 2;
+//            System.arraycopy(bytes, offset, message, to, length);
+////            graphSession.updateCluster(new UpdateClusterFunction(message));
+//            voffset += length;
+//            offset += length;
+//            left -= length;
+//        }
+//    }
+
+    private final int setValueBig(int ri, byte[] bytes, int length_) {
+        checkBufferSpace(12);
+        int sum = 0;
+        int voffset = 0;
+        int offset = 0;
+        int left = length_;
+        while (left > 0) {
+            int length = Math.min(left, MAX_FIXED_BYTES - 12 - byteIndex);
+            if (DEBUG)
+                printlnd("Set big value ri=" + ri + " vo=" + voffset + " len=" + length);
+            int written = modiValue(ri, voffset, bytes, offset, length);
+            sum += written;
+            voffset += written;
+            offset += written;
+            left -= written;
+            checkBufferSpace(12);
+        }
+        return sum;
+    }
+
+    private final int setValueSmall(int ri, byte[] bytes, int length) {
+        checkBufferSpace(5 + length);
+        int pos = byteIndex;
+        int i = length << 14 | ri;
+        if (length < 32) {
+            byte op = (byte) (OpEnum.SetShort.getOrMask() | length >>> 2);
+            addByte(op);
+            short s = (short) i;
+            addShort(s);
+        } else {
+            addByte(OpEnum.Set.getOrMask());
+            addInt(i);
+        }
+        System.arraycopy(bytes, 0, this.bytes, byteIndex, length);
+        byteIndex += length;
+        int len = byteIndex - pos;
+        return len;
+    }
+
+    final void setValue(short index, byte[] bytes) {
+        setValue(index, bytes, bytes.length);
+    }
+
+    final public void setValue(short index, byte[] bytes, int length) {
+        checkInitialization();
+        if (ClusterTraitsBase.isIllegalResourceIndex(index))
+            throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + index);
+        if (DEBUG)
+            printlnd("Set value ri=" + index
+                    + " len=" + length
+                    + " bytes=" + Arrays.toString(Arrays.copyOfRange(bytes, 0, Math.min(10, length))));
+        int len;
+        /*
+         * The limit for the cluster stream is (1<18)-1 but this avoids the
+         * conversion to big cluster.
+         */
+        if (length > ClusterTraitsSmall.VALUE_SIZE_MAX)
+            len = setValueBig(index, bytes, length);
+        else
+            len = setValueSmall(index, bytes, length);
+        if (DEBUG_STAT) {
+            ++info.nValues;
+            info.sValues += len + length;
+        }
+    }
+
+//    final void setValue(Change c, byte[] bytes, int length) {
+//        short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0);
+//        setValue(ri, bytes, length);
+//        c.initValue();
+//    }
+
+//    final void modiValue(Change c, long voffset, int length, byte[] bytes, int offset) {
+//        checkInitialization();
+//        int ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0);
+//        if (DEBUG)
+//            printlnd("Modify value ri=" + ri
+//                    + " voff=" + voffset
+//                    + " vlen=" + length
+//                    + " blen=" + bytes.length
+//                    + " boff=" + offset
+//                    + " bytes=" + Arrays.toString(Arrays.copyOfRange(bytes, 0, Math.min(10, bytes.length))));
+//        modiValueBig(ri, voffset, length, bytes, offset);
+//        c.init();
+//        if (DEBUG_STAT) {
+//            ++info.nValues;
+//            info.sValues += length;
+//        }
+//    }
+    final void setImmutable(boolean immutable) {
+        checkInitialization();
+//        clusterChange2.setImmutable(immutable);
+    }
+    final void undoValueEx(int resourceIndex) {
+        checkInitialization();
+//        clusterChange2.undoValueEx(resourceIndex);
+    }
+    final void setDeleted(boolean deleted) {
+        checkInitialization();
+//        clusterChange2.setDeleted(deleted);
+    }
+    final void corrupt() {
+        checkInitialization();
+        addByte((byte)0);
+    }
+    
+    public byte[] getBytes() {
+       byte[] copy = new byte[byteIndex];
+       System.arraycopy(bytes, 0, copy, 0, byteIndex);
+       return copy;
+    }
+    
+    /**
+     * @param graphSession
+     * @param clusterId
+     * @return true if actually flushed something
+     */
+    final boolean flush(/*GraphSession graphSession,*/ ClusterUID clusterUID) {
+       throw new UnsupportedOperationException();
+//        if (byteIndex > 0) {
+//            if(DebugPolicy.REPORT_CLUSTER_STREAM)
+//                System.err.println("Flush cluster change set stream " + this);
+//            setHeaderVectorSize(byteIndex);
+//            byte[] copy = new byte[byteIndex + HEADER_SIZE];
+//            System.arraycopy(header, 0, copy, 0, HEADER_SIZE);
+//            System.arraycopy(bytes, 0, copy, HEADER_SIZE, byteIndex);
+//            UpdateClusterFunction updateClusterFunction = new UpdateClusterFunction(copy);
+//            if (DEBUG_CCS) {
+//                for (DebugStm stm : debugStms)
+//                    printlnd(stm.toString2());
+//                debugStms.clear();
+//            }
+//            if (DEBUG_STAT) {
+//                info.tot = updateClusterFunction.operation.length;
+//                printlnd("ReallyFlush: " + info.toString());
+//                sum.add(info);
+//                printlnd("ReallyFlush sum: " + sum.toString());
+//            }
+//            // long start = System.nanoTime();
+//            graphSession.updateCluster(updateClusterFunction);
+//            // long duration = System.nanoTime() - start;
+//            // duration2 += duration;
+//            // System.err.println("updateCluster " + 1e-9*duration);
+//            // System.err.println("updateCluster total " + 1e-9*duration2);
+//            clear();
+//            clusterChange2.flush(graphSession);
+//            return true;
+//        } else if (clusterChange2.isDirty()) {
+//            clusterChange2.flush(graphSession);
+//            clear();
+//            return true;
+//        } else if (flushed) {
+//            flushed = false;
+//            return true;
+//        } else {
+//            return true;
+//        }
+    }
+
+    final void flushInternal(ClusterUID clusterUID) {
+       throw new UnsupportedOperationException();
+//        flush(graphSession, clusterUID);
+//        flushed = true;
+    }
+
+    final class ForeignTable {
+        private final TLongIntHashMap table = new TLongIntHashMap();
+
+        private long createKey(short index, long cluster) {
+            assert (cluster <= (1L << 48) - 1);
+            return (cluster << 14) | index;
+        }
+
+        public int get(short index, long cluster) {
+            int value = table.get(createKey(index, cluster));
+            if (DEBUG)
+                printlnd("ForeignTable get c=" + clusterUID + " i="
+                        + (value - 1) + " r=" + index + " rc=" + cluster);
+            return value;
+        }
+
+        public int put(short index, long cluster, int value) {
+            if (DEBUG)
+                printlnd("ForeignTable put c=" + clusterUID + " i="
+                        + (value - 1) + " r=" + index + " rc=" + cluster);
+            return table.put(createKey(index, cluster), value);
+        }
+
+        public int size() {
+            return table.size();
+        }
+
+        public void clear() {
+            table.clear();
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return 31*clusterUID.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object object) {
+        if (this == object)
+            return true;
+        else if (object == null)
+            return false;
+        else if (!(object instanceof ClusterChange))
+            return false;
+        ClusterChange r = (ClusterChange)object;
+        return r.clusterUID.equals(clusterUID);
+    }
+    
+    public void flush() {
+       
+       if(byteIndex > 0) {
+               
+               final ClusterUID cuid = clusterUID;
+                       
+                       byte[] block = getBytes();
+                       byte[] raw = new byte[block.length + 28];
+                       Bytes.writeLE(raw, 0, 1);
+                       System.arraycopy(cuid.asBytes(), 0, raw, 4, 16);
+                       Bytes.writeLE(raw, 20, block.length);
+                       System.arraycopy(block, 0, raw, 24, block.length);
+                       Bytes.writeLE(raw, 24+block.length, 0);
+
+                       ByteBuffer rawBB = ByteBuffer.wrap(raw);
+                       ByteBuffer outputBB = ByteBuffer.allocate(raw.length + raw.length/8);
+                       //outputBB.order(ByteOrder.LITTLE_ENDIAN);
+                       int compressedSize = Compressions.get(Compressions.LZ4).compressBuffer(rawBB, 0, raw.length, outputBB, 0);
+
+                       byte[] data_ = null;
+                       if(compressedSize < raw.length) {
+                               data_ = new byte[compressedSize];
+                               outputBB.get(data_,0,compressedSize);   
+                       } else {
+                               data_ = raw;
+                       }
+
+                       byte[] data = new byte[data_.length+24];
+                       Bytes.writeLE(data, 0, 0);
+                       Bytes.writeLE(data, 4, 0);
+                       Bytes.writeLE(data, 8, raw.length);
+                       Bytes.writeLE(data, 12, raw.length);
+                       Bytes.writeLE(data, 16, data_.length);
+                       System.arraycopy(data_, 0, data, 20, data_.length);
+                       Bytes.writeLE(data, 20+data_.length, 0);
+                       
+                       stream.add(Pair.make(clusterUID, data));
+                       clear();
+                       initBuffer();
+               
+       }
+    }
+    
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java
new file mode 100644 (file)
index 0000000..472b4d7
--- /dev/null
@@ -0,0 +1,70 @@
+package org.simantics.acorn.internal;
+
+
+public class ClusterChange2 {
+    public static final int VERSION = 2;
+    public static final byte SET_IMMUTABLE_OPERATION = 1; // <byte : 0 = false>
+    public static final byte UNDO_VALUE_OPERATION = 2; // <int : resource index>
+    private static final int INCREMENT = 1<<10;
+//    private boolean dirty = false;
+//    private byte[] bytes;
+//    private int byteIndex;
+//    private ClusterUID clusterUID;
+//    private ClusterImpl cluster;
+//    ClusterChange2(ClusterUID clusterUID, ClusterImpl cluster) {
+//        this.clusterUID = clusterUID;
+//        this.cluster = cluster;
+//        init();
+//    }
+//    void init() {
+////        System.err.println("clusterChange2 dirty " + cluster.clusterId);
+//        dirty = false;
+//        bytes = new byte[INCREMENT];
+//        byteIndex = 0;
+//        addInt(0); // Size of byte vector. Set by flush.
+//        addInt(VERSION);
+//        byteIndex = clusterUID.toByte(bytes, 8);
+//    }
+//    boolean isDirty() {
+//        return dirty;
+//    }
+//    void flush(GraphSession graphSession) {
+////        System.err.println("flush2 clusterChange2 " + dirty + this);
+//        if (!dirty)
+//            return;
+//        Bytes.writeLE(bytes, 0, byteIndex - 4);
+//        byte[] ops = Arrays.copyOf(bytes, byteIndex);
+////        System.err.println("flush2 clusterChange2 " + cluster.clusterId + " " + ops.length + " bytes.");
+//        graphSession.updateCluster(new UpdateClusterFunction(ops));
+//        init();
+//    }
+//    void setImmutable(boolean immutable) {
+//        dirty = true;
+//        addByte(SET_IMMUTABLE_OPERATION);
+//        addByte((byte)(immutable ? -1 : 0));
+//    }
+//    void undoValueEx(int resourceIndex) {
+//        dirty = true;
+//        addByte(UNDO_VALUE_OPERATION);
+//        addInt(resourceIndex);
+//    }
+//    private final void checkSpace(int len) {
+//        if (bytes.length - byteIndex > len)
+//            return;
+//       bytes = Arrays.copyOf(bytes, bytes.length + len + INCREMENT);
+//    }
+//    private final void addByte(byte value) {
+//        checkSpace(1);
+//        bytes[byteIndex++] = value;
+//    }
+//    private final void addInt(int value) {
+//        checkSpace(4);
+//        Bytes.writeLE(bytes, byteIndex, value);
+//        byteIndex += 4;
+//    }
+////    private void addLong(long value) {
+////        checkSpace(8);
+////        Bytes.writeLE(bytes, byteIndex, value);
+////        byteIndex += 8;
+////    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java
new file mode 100644 (file)
index 0000000..2b1ae19
--- /dev/null
@@ -0,0 +1,437 @@
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.internal;
+
+import java.util.ArrayList;
+
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.ClusterUID;
+
+final public class ClusterStream {
+
+//    // public static long duration2 = 0;
+//
+    public static final boolean DEBUG = false;
+    public static final byte NULL_OPERATION = 0;
+    public static final byte CREATE_OPERATION = 1;
+    public static final byte SET_OPERATION = 4;
+    public static final byte MODI_OPERATION = 6;
+    public static final byte KILL_OPERATION = 7;
+//    boolean off = false;
+//    public GraphSession graphSession;
+//    final SessionImplSocket session;
+////    private int flushCount = 0;
+//    final private boolean alwaysOff;
+//    private int stamp;
+//    private int acceptedStamp;
+//    private boolean dirty = false;
+////    final private ArrayList<ClusterChange> clusterChanges = new ArrayList<ClusterChange>();
+//    
+//    final ClusterChangeManager changes = new ClusterChangeManager();
+//    
+////    final TLongObjectHashMap<ClusterChange> clusterChanges = new TLongObjectHashMap<ClusterChange>();
+//
+//    // private final Change lastChange = new Change();
+//    ClusterStream(SessionImplSocket session, GraphSession graphSession,
+//            boolean alwaysOff) {
+//        this.session = session;
+//        this.graphSession = graphSession;
+//        this.alwaysOff = alwaysOff;
+//    }
+//
+//    
+//    boolean isDirty() {
+//     return dirty;
+//    }
+//    
+//    void markDirty() {
+//     dirty = true;
+//    }
+//    
+//    void setOff(boolean value) {
+//        if (alwaysOff) {
+//            off = true;
+//        } else {
+//            off = value;
+//        }
+//    }
+//
+//    boolean getOff() {
+//        return off;
+//    }
+//
+//    void createResource(ClusterChange cc, short operationIndex, ClusterUID clusterUID) {
+//        if (off)
+//            return;
+//        assert (null != cc);
+//        assert (0 != operationIndex);
+//        assert (!ClusterUID.Null.equals(clusterUID));
+//        if (DEBUG)
+//            System.out.println("DEBUG: Created resource index=" + operationIndex + " cluster=" + clusterUID);
+//        cc.createResource(operationIndex);
+//    }
+//
+//    final void addStatementIndex(Change change, int key, ClusterUID clusterUID, byte op) {
+//        if (off)
+//            return;
+//        assert (key > 0);
+//        assert (null != change);
+//        assert (!ClusterUID.Null.equals(clusterUID));
+//        change.addStatementIndex(key, clusterUID, op);
+//    }
+//
+//    void addStatement(ClusterChange cc, Change change) {
+//        if (off)
+//            return;
+//        assert (null != cc);
+//        assert (null != change);
+//        cc.addChange(change);
+//    }
+//
+//    void cancelStatement(Change change) {
+//        if (off)
+//            return;
+//        assert (null != change);
+//        change.init();
+//    }
+//
+//    void removeStatement(ClusterChange cc, Change change, long clusterId) {
+//        if (off)
+//            return;
+//        assert (null != cc);
+//        assert (null != change);
+//        cc.addChange(change);
+//    }
+//
+//    void cancelValue(Change change) {
+//        if (off)
+//            return;
+//        assert (null != change);
+//        change.init();
+//    }
+//
+//    void removeValue(ClusterChange cc, Change change, long clusterId) {
+//        if (off)
+//            return;
+//        // ClusterChange cc = getClusterChange(clusterId);
+//        assert (null != cc);
+//        assert (null != change);
+//        cc.addChange(change);
+//    }
+//
+//    void setValue(ClusterChange cc, Change change, long clusterId, byte[] bytes, int length) {
+//        if (off)
+//            return;
+//        assert (null != cc);
+//        assert (null != change);
+//        // ClusterChange cc = getClusterChange(clusterId);
+//        cc.setValue(change, bytes, length);
+//    }
+//
+//    void modiValue(ClusterChange cc, Change change, long clusterId,
+//            long voffset, int length, byte[] bytes, int offset) {
+//        assert (null != cc);
+//        assert (null != change);
+//        cc.modiValue(change, voffset, length, bytes, offset);
+//    }
+//
+//    void undoValueEx(ClusterChange cc, Change change, int resourceIndex) {
+//        cc.undoValueEx(resourceIndex);
+//    }
+//    void setImmutable(ClusterChange cc, Change change, long clusterId, boolean immutable) {
+//        if (off)
+//            return;
+//        cc.setImmutable(immutable);
+//    }
+//    public void corruptCluster(ClusterChange cc, long clusterId)
+//            throws DatabaseException {
+//        if (off)
+//            return;
+//        if (DEBUG)
+//            System.out.println("ClusterStream.corrupt cid=" + clusterId + ".");
+//        assert (null != cc);
+//        cc.corrupt();
+//    }
+//
+//    int getStamp() {
+//        return stamp;
+//    }
+//
+//    void flush() {
+//        if (off)
+//            return;
+////        flushCount++;
+//        return;
+//    }
+//
+//    void flush(long clusterId) {
+//        if (off)
+//            return;
+//        ClusterUID clusterUID = session.clusterTable.clusterIds.getClusterUID(clusterId);
+//        ArrayList<ClusterChange> ccs = new ArrayList<ClusterChange>();
+//        for(ClusterChange cc : changes.get()) {
+//            if(cc.clusterUID.equals(clusterUID)) {
+//                if (cc.flush(graphSession, cc.clusterUID)) {
+//                    ccs.add(cc);
+//                    if (stamp == acceptedStamp)
+//                        ++stamp;
+//                } else {
+////                    System.err.println("kasdi");
+//                }
+//            }
+//        }
+//        changes.remove(ccs);
+//    }
+//
+//    /**
+//     * @return true if the stream has accepted all changes
+//     */
+//    public boolean reallyFlush() {
+//        // Last possibility to mark clusters immutable before write only clusters are gone 
+//        session.handleCreatedClusters();
+//        // These shall be requested from server
+//        session.clusterTable.removeWriteOnlyClusters();
+//        if (!off && changes.size() > 0) {
+//            for(ClusterChange cc : changes.get()) {
+//                if (cc.flush(graphSession, cc.clusterUID))
+//                    if (stamp == acceptedStamp)
+//                        ++stamp;
+//            }
+//            changes.clear();
+//        }
+//        dirty = false;
+//        return hasAcceptedAllChanges();
+//    }
+//
+//    /**
+//     * Clear all changes and set stream status to empty.
+//     */
+//    public void clear() {
+//        changes.clear();
+//        acceptedStamp = stamp;
+//        dirty = false;
+//    }
+//
+//    private boolean hasAcceptedAllChanges() {
+//        return stamp == acceptedStamp;
+//    }
+//
+//    void accept() {
+//        acceptedStamp = stamp;
+//    }
+//
+//
+
+    static class DebugInfo {
+        long nStms;
+        long nLocal;
+        long nPartly;
+        long nForeign;
+        long nValues;
+        long sValues;
+        long sForeign;
+        long tot;
+
+        void clear() {
+            nStms = 0;
+            nLocal = 0;
+            nPartly = 0;
+            nForeign = 0;
+            sForeign = 0;
+            nValues = 0;
+            sValues = 0;
+            tot = 0;
+        }
+
+        void add(DebugInfo di) {
+            nStms += di.nStms;
+            nLocal += di.nLocal;
+            nPartly += di.nPartly;
+            nForeign += di.nForeign;
+            sForeign += di.sForeign;
+            nValues += di.nValues;
+            sValues += di.sValues;
+            tot += di.tot;
+        }
+
+        @Override
+        public String toString() {
+            return "val=" + nValues + " stm=" + nStms + " loc=" + nLocal
+                    + " par=" + nPartly + " ful=" + nForeign + " for="
+                    + sForeign + " vat=" + sValues + " tot=" + tot;
+        }
+    }
+
+    enum StmEnum {
+        Add(0, (byte) 0), Remove(1, (byte) 0x20);
+        StmEnum(int ordinal, byte mask) {
+            this.ordinal = ordinal;
+            this.mask = mask;
+        }
+
+        public int ordinal;
+        private byte mask;
+
+        byte getOrMask() {
+            return mask;
+        }
+    }
+
+    final static class Data {
+        
+        final byte mask; // or mask for operation code (don't care bits are zero)
+        final short bits; // how many bits are reserved for resource index (0,2,4,6)
+        final int bytes;
+
+        Data(int mask, int bits, ClusterEnum a, ClusterEnum b) {
+            this.mask = (byte) (mask << bits);
+            this.bits = (short) bits;
+            this.bytes = bytes(bits, a, b);
+        }
+        
+        private static int bytes(int bits, ClusterEnum a, ClusterEnum b) {
+            int left = 6 - bits;
+            if (a != ClusterEnum.ForeignShort) {
+                left += 6;
+            }
+            if (b != ClusterEnum.ForeignShort) {
+                left += 6;
+            }
+            int bytes = left >>> 3;
+            if ((left & 7) != 0)
+                bytes++;
+            return bytes;
+        }
+        
+    }
+
+    enum ClusterEnum {
+        Local(0), ForeignShort(1), ForeignLong(2);
+        public int ordinal;
+
+        ClusterEnum(int ordinal) {
+            this.ordinal = ordinal;
+        }
+
+        static Data[][][] maps = new Data[2][3][3];
+        static {
+               // mask: 00000000
+               // op: 000000|r12-13
+               // p1
+               // o1
+               // r0-7
+               // o2 | p2 | r8-11
+            maps[StmEnum.Add.ordinal][Local.ordinal][Local.ordinal] = new Data(
+                    0, 2, Local, Local);
+               // mask: 11000000
+               // op: 1100 | r10-13
+               // p1
+               // o for index
+               // r0-7
+               // p2 | ri 8-9
+            maps[StmEnum.Add.ordinal][Local.ordinal][ForeignShort.ordinal] = new Data(
+                    12, 4, Local, ForeignShort);
+               // mask: 00001000
+            // op: 000010 | r12-13 
+            maps[StmEnum.Add.ordinal][Local.ordinal][ForeignLong.ordinal] = new Data(
+                    2, 2, Local, ForeignLong);
+               // mask: 11010000
+            // op: 1101 | r10-13 
+            maps[StmEnum.Add.ordinal][ForeignShort.ordinal][Local.ordinal] = new Data(
+                    13, 4, ForeignShort, Local);
+
+            // mask: 01000000
+            // op: 01 | r8-13
+            // p for index
+            // o for index
+               // r0-7
+            maps[StmEnum.Add.ordinal][ForeignShort.ordinal][ForeignShort.ordinal] = new Data(
+                    1, 6, ForeignShort, ForeignShort);
+            // mask: 11100000
+            // op: 1110 | r10-13 
+            maps[StmEnum.Add.ordinal][ForeignShort.ordinal][ForeignLong.ordinal] = new Data(
+                    14, 4, ForeignShort, ForeignLong);
+            // mask: 00010000
+            // op: 000100 | r12-13 
+            maps[StmEnum.Add.ordinal][ForeignLong.ordinal][Local.ordinal] = new Data(
+                    4, 2, ForeignLong, Local);
+            // mask: 11110000
+            // op: 1111 | r10-13 
+            maps[StmEnum.Add.ordinal][ForeignLong.ordinal][ForeignShort.ordinal] = new Data(
+                    15, 4, ForeignLong, ForeignShort);
+            // mask: 00011000
+            // op: 000110 | r12-13 
+            maps[StmEnum.Add.ordinal][ForeignLong.ordinal][ForeignLong.ordinal] = new Data(
+                    6, 2, ForeignLong, ForeignLong);
+
+            // mask: 00000100
+            // op: 000001 | r12-13 
+            maps[StmEnum.Remove.ordinal][Local.ordinal][Local.ordinal] = new Data(
+                    1, 2, Local, Local);
+            // mask: 01100001
+               // op: 01100001
+               // p1
+               // o for index
+               // r0-7
+               // p2 | ri 8-13
+            maps[StmEnum.Remove.ordinal][Local.ordinal][ForeignShort.ordinal] = new Data(
+                    49, 0, Local, ForeignShort);
+            // mask: 00001100
+            // op: 000011 | r12-13 
+            maps[StmEnum.Remove.ordinal][Local.ordinal][ForeignLong.ordinal] = new Data(
+                    3, 2, Local, ForeignLong);
+            // mask: 00100000
+            // op: 0010 | r10-13 
+            maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][Local.ordinal] = new Data(
+                    2, 4, ForeignShort, Local);
+            // mask: 10000000
+            // op: 10 | r8-13
+            maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][ForeignShort.ordinal] = new Data(
+                    2, 6, ForeignShort, ForeignShort);
+            // mask: 00110010
+            // op: 00110010
+            maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][ForeignLong.ordinal] = new Data(
+                    50, 0, ForeignShort, ForeignLong);
+            // mask: 00010100
+            // op: 000101 | r12-13 
+            maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][Local.ordinal] = new Data(
+                    5, 2, ForeignLong, Local);
+            // mask: 00110011
+            // op: 00110011
+            maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][ForeignShort.ordinal] = new Data(
+                    51, 0, ForeignLong, ForeignShort);
+            // mask: 00011100
+            // op: 000111 | r12-13 
+            maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][ForeignLong.ordinal] = new Data(
+                    7, 2, ForeignLong, ForeignLong);
+        }
+
+        static Data getData(StmEnum s, ClusterEnum a, ClusterEnum b) {
+            return maps[s.ordinal][a.ordinal][b.ordinal];
+            // return maps.get(s).get(a).get(b);
+        }
+    }
+
+    enum OpEnum {
+        Create((byte) 52), Set((byte) 53), SetShort((byte) 56), Delete(
+                (byte) 54), Modify((byte) 55);
+        OpEnum(byte mask) {
+            this.mask = mask;
+        }
+
+        public byte getOrMask() {
+            return mask;
+        }
+
+        private byte mask;
+    }
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java
new file mode 100644 (file)
index 0000000..7cd007a
--- /dev/null
@@ -0,0 +1,340 @@
+package org.simantics.acorn.internal;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.db.Session;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.service.ClusterUID;
+
+import gnu.trove.map.hash.TIntObjectHashMap;
+
+public class ClusterSupport2 implements ClusterSupport, IClusterTable {
+       
+       final private static boolean DEBUG = false;
+
+       public ClusterManager impl;
+       
+       public TIntObjectHashMap<ClusterUID> uidCache = new TIntObjectHashMap<ClusterUID>(); 
+       
+       public ClusterSupport2(ClusterManager impl) {
+               this.impl = impl;
+       }
+       
+       @Override
+       public int createClusterKeyByClusterUID(ClusterUID clusterUID, long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) {
+               try {
+            return impl.getClusterByClusterUIDOrMake(clusterUID);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            return null;
+        }
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterId(long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterKey(int clusterKey) {
+               throw new UnsupportedOperationException();
+       }
+
+       ReentrantReadWriteLock uidLock = new ReentrantReadWriteLock();
+       ReadLock uidRead = uidLock.readLock();
+       WriteLock uidWrite = uidLock.writeLock();
+       
+       @Override
+       public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException {
+
+               ClusterUID cuid;
+               
+               uidRead.lock();
+               cuid = uidCache.get(resourceKey >> 12);
+               uidRead.unlock();
+               if(cuid != null) return cuid;
+               uidWrite.lock();
+               cuid = uidCache.get(resourceKey >> 12);
+               if(cuid == null) {
+                       cuid = impl.getClusterUIDByResourceKeyWithoutMutex(resourceKey); 
+                       uidCache.put(resourceKey >> 12, cuid);
+               }
+               uidWrite.unlock();
+               
+               return cuid;
+               
+       }
+       
+       @Override
+       public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+               return impl.getClusterKeyByClusterUIDOrMakeWithoutMutex(clusterUID);
+       }
+       
+    @Override
+    public int getClusterKeyByClusterUIDOrMake(long id1, long id2) {
+               throw new UnsupportedOperationException();
+    }
+
+       @Override
+       public ClusterBase getClusterByResourceKey(int resourceKey) {
+               throw new UnsupportedOperationException();
+//             return impl.getClusterByResourceKey(resourceKey);
+       }
+
+       @Override
+       public long getClusterIdOrCreate(ClusterUID clusterUID) {
+               return impl.getClusterIdOrCreate(clusterUID);
+       }
+
+       @Override
+       public void addStatement(Object cluster) {
+               // nop
+       }
+
+       @Override
+       public void cancelStatement(Object cluster) {
+               // nop
+       }
+
+       @Override
+       public void removeStatement(Object cluster) {
+               // nop
+       }
+
+       @Override
+       public void removeValue(Object cluster) {
+               // nop
+       }
+
+       @Override
+       public void setImmutable(Object cluster, boolean immutable) {
+               // nop
+       }
+
+       @Override
+       public void setDeleted(Object cluster, boolean deleted) {
+               // TODO Auto-generated method stub
+               
+       }
+
+       
+       
+       @Override
+       public void cancelValue(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setValue(Object cluster, long clusterId, byte[] bytes,
+                       int length) {
+               // nop
+       }
+
+       @Override
+       public void modiValue(Object _cluster, long clusterId, long voffset,
+                       int length, byte[] bytes, int offset) {
+               // nop
+       }
+
+       @Override
+       public void createResource(Object cluster, short resourceIndex,
+                       long clusterId) {
+               // No op
+       }
+
+       @Override
+       public void addStatementIndex(Object cluster, int resourceKey,
+                       ClusterUID clusterUID, byte op) {
+               // No op
+       }
+
+       @Override
+       public void setStreamOff(boolean setOff) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public boolean getStreamOff() {
+               return true;
+       }
+
+       
+    private static class ResourceSegment {
+        public long   valueSize;
+
+        public byte[] bytes;
+
+        ResourceSegment(long valueSize, byte[] bytes) {
+            this.valueSize = valueSize;
+            this.bytes = bytes;
+        }
+    }
+
+    public ResourceSegment getResourceSegment(int resourceIndex, ClusterUID clusterUID, long offset, short size)
+    throws DatabaseException {
+        if (DEBUG)
+            System.out.println("DEBUG: getResourceSegment ri=" + resourceIndex + " cid=" + clusterUID + " offset=" + offset + " size=" + size);
+        
+        org.simantics.db.Database.Session.ResourceSegment t = impl.getResourceSegment(clusterUID.asBytes(), resourceIndex, offset, size);
+        return new ResourceSegment(t.getValueSize(), t.getSegment());
+        
+    }
+    
+    protected byte[] getValueBig(ClusterBase cluster, int resourceIndex, int offset, int length) throws DatabaseException {
+    
+       assert(offset == 0);
+       assert(length == 0);
+       
+       ClusterUID clusterUID = cluster.clusterUID;
+       
+       return impl.getResourceFile(clusterUID.asBytes(), resourceIndex);
+       
+    }
+
+    protected InputStream getValueStreamBig(ClusterBase cluster, final int resourceIndex, int offset, int length) throws DatabaseException {
+
+       final ClusterUID clusterUID = cluster.clusterUID;
+       
+       if (DEBUG)
+               System.out.println("DEBUG: getResourceFile ri=" + resourceIndex + " cid=" + clusterUID + " off=" + offset + " len=" + length);
+       final int IMAX = 0xFFFF;
+       short slen = (short)Math.min(length != 0 ? length : IMAX, IMAX);
+       final ResourceSegment s = getResourceSegment(resourceIndex, clusterUID, offset, slen);
+       if (s.valueSize < 0)
+               throw new DatabaseException("Failed to get value for resource index=" + resourceIndex +
+                               " cluster=" + clusterUID + " off=" + offset + " len=" + length + " (1).");
+       int ilen = (int)slen & 0xFFFF;
+       assert(s.bytes.length <= ilen);
+       if (0 == length) {
+               if (s.valueSize > Integer.MAX_VALUE)
+                       throw new DatabaseException("Failed to get value for resource index=" + resourceIndex +
+                                       " cluster=" + clusterUID + " off=" + offset + " len=" + length +
+                                       ". Value size=" + s.valueSize + " (2).");
+               length = (int)s.valueSize;
+       }
+       long rSize = s.valueSize - offset;
+       if (rSize < length)
+               throw new DatabaseException("Failed to get value for resource index=" + resourceIndex +
+                               " cluster=" + clusterUID + " off=" + offset + " len=" + length +
+                               ". Value size=" + s.valueSize + " (3).");
+       else if (length <= IMAX)
+               return new ByteArrayInputStream(s.bytes);
+
+       final int finalLength = length;
+
+       return new InputStream() {
+
+               int left = finalLength;
+               long valueOffset = 0;
+               int offset = 0;
+               ResourceSegment _s = s;
+
+               @Override
+               public int read() throws IOException {
+
+                       if(left <= 0) throw new IllegalStateException();
+
+                       if(offset == _s.bytes.length) {
+                               short slen = (short)Math.min(left, IMAX);
+                               valueOffset += _s.bytes.length;
+                               try {
+                                       _s = getResourceSegment(resourceIndex, clusterUID, valueOffset, slen);
+                               } catch (DatabaseException e) {
+                                       throw new IOException(e);
+                               }
+                               offset = 0;
+                       }
+
+                       left--;
+                       int result = _s.bytes[offset++];
+                       if(result < 0) result += 256;
+                       return result;
+
+               }
+
+       };
+
+    }
+       
+       @Override
+       public InputStream getValueStreamEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               ClusterBase cluster = impl.getClusterByClusterUIDOrMakeProxy(ClusterUID.make(0, clusterId));
+               return getValueStreamBig(cluster, resourceIndex, 0, 0);
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               ClusterBase cluster = impl.getClusterByClusterUIDOrMakeProxy(ClusterUID.make(0, clusterId));
+               return getValueBig(cluster, resourceIndex, 0, 0);
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId, long voffset,
+                       int length) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public long getValueSizeEx(int resourceIndex, long clusterId)
+                       throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int wait4RequestsLess(int limit) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public Session getSession() {
+               return null;
+       }
+
+       @Override
+       public IClusterTable getClusterTable() {
+               return this;
+       }
+
+       @Override
+       public <T extends ClusterI> T getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) {
+               try {
+            return (T)impl.getClusterByClusterUIDOrMakeProxy(clusterUID);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            return null;
+        }
+       }
+
+       @Override
+       public <T extends ClusterI> T getClusterProxyByResourceKey(int resourceKey) {
+               try {
+            return impl.getClusterProxyByResourceKey(resourceKey);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            return null;
+        }
+       }
+
+       @Override
+       public int getClusterKeyByUID(long id1, long id2) throws DatabaseException {
+               return impl.getClusterKeyByUID(id1, id2);
+       }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java
new file mode 100644 (file)
index 0000000..0044d72
--- /dev/null
@@ -0,0 +1,86 @@
+package org.simantics.acorn.internal;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.lru.CachingClusterSupport;
+import org.simantics.acorn.lru.ClusterUpdateOperation;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.service.ClusterUID;
+
+public class ClusterUpdateProcessor extends ClusterUpdateProcessorBase {
+       
+       final ClusterSupport support;
+       final ClusterUpdateOperation info;
+       private ClusterImpl cluster;
+       
+       public ClusterUpdateProcessor(ClusterManager client, ClusterSupport support, byte[] operations, ClusterUpdateOperation info) throws DatabaseException {
+               super(client, operations);
+               this.support = support;
+               this.info = info;
+       }
+       
+       @Override
+       void create() throws DatabaseException {
+               cluster.createResource(support);
+       }
+
+       @Override
+       void delete(int ri) throws DatabaseException {
+               
+               boolean oldValueEx = cluster.isValueEx(ri);
+               byte[] old = cluster.getValue(ri, support);
+               if(old != null) cluster.removeValue(ri, support);
+               info.ccs.oldValueEx.add(oldValueEx ? (byte)1 : 0);
+               info.ccs.oldValues.add(old);
+               
+       }
+
+       @Override
+       void modify(int resourceKey, long offset, int size, byte[] bytes, int pos)
+                       throws DatabaseException {
+               
+               cluster = (ClusterImpl)cluster.modiValueEx(resourceKey, offset, size, bytes, pos, support);
+               manager.modiFileEx(cluster.getClusterUID(), resourceKey, offset, size, bytes, pos, support);
+               
+       }
+
+       @Override
+       void set(int resourceKey, byte[] bytes, int length)
+                       throws DatabaseException {
+               
+               byte[] old = cluster.getValue(resourceKey, support);
+               boolean oldValueEx = cluster.isValueEx(resourceKey);
+               cluster = (ClusterImpl)cluster.setValue(resourceKey, valueBuffer, length, support);
+               info.ccs.oldValueEx.add(oldValueEx ? (byte)1 : 0);
+               info.ccs.oldValues.add(old);
+               
+       }
+
+       @Override
+       void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid)
+                       throws DatabaseException {
+               
+               ClusterImpl c = (ClusterImpl)cluster.addRelation(resourceKey, puid, predicateKey, ouid, objectKey, support);
+               if(c != null) cluster = c;
+               info.ccs.statementMask.add(c != null ? (byte)1 : 0);
+               
+       }
+
+       @Override
+       void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid)
+                       throws DatabaseException {
+               
+               boolean modified = cluster.removeRelation(resourceKey, predicateKey, objectKey, support);
+               info.ccs.statementMask.add(modified ? (byte)1 : 0);
+
+       }
+
+       public ClusterImpl process(ClusterImpl cluster) {
+               this.cluster = cluster;
+               process();
+               info.finish();
+               return this.cluster;
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java
new file mode 100644 (file)
index 0000000..7ce8673
--- /dev/null
@@ -0,0 +1,30 @@
+package org.simantics.acorn.internal;
+
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.lru.ClusterUpdateOperation;
+import org.simantics.db.impl.ClusterSupport;
+
+public class ClusterUpdateProcessor2 extends ClusterUpdateProcessorBase2 {
+
+       final ClusterSupport support;
+       final ClusterUpdateOperation info;
+       private ClusterImpl cluster;
+
+       public ClusterUpdateProcessor2(ClusterSupport support, byte[] operations, ClusterUpdateOperation info) {
+               super(operations);
+               this.support = support;
+               this.info = info;
+       }
+
+       public void process(ClusterImpl cluster) {
+               this.cluster = cluster;
+               process();
+               info.finish();
+       }
+
+       @Override
+       void setImmutable(boolean value) {
+               cluster.setImmutable(value, support);
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java
new file mode 100644 (file)
index 0000000..e0e733c
--- /dev/null
@@ -0,0 +1,475 @@
+package org.simantics.acorn.internal;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.internal.ClusterStream.ClusterEnum;
+import org.simantics.acorn.internal.ClusterStream.Data;
+import org.simantics.acorn.internal.ClusterStream.StmEnum;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+
+abstract public class ClusterUpdateProcessorBase {
+       
+       public final static boolean DEBUG = false;
+
+       final protected ClusterManager manager;
+       final public byte[] bytes;
+       private int pos = 0;
+       final private int len;
+       final private ClusterUID uid;
+       final private int clusterKey;
+       final public int version;
+       
+       final Map<ClusterUID, Integer> clusterKeyCache = new HashMap<ClusterUID, Integer>();
+       
+       public int getResourceKey(ClusterUID uid, int index) {
+               Integer match = clusterKeyCache.get(uid);
+               if(match != null) return match+index;
+               int key = manager.getResourceKeyWitoutMutex(uid, 0);
+               clusterKeyCache.put(uid, key);
+               return key+index;
+       }
+       
+       
+       public ClusterUpdateProcessorBase(ClusterManager client, byte[] operations) throws DatabaseException {
+               this.manager = client;
+               this.bytes = operations;
+               this.len = Bytes.readLE4(bytes, 0)+4; // whatta?
+               version = Bytes.readLE4(bytes, 4);
+               long cuid1 = Bytes.readLE8(bytes, 8);
+               long cuid2 = Bytes.readLE8(bytes, 16);
+               uid = ClusterUID.make(cuid1, cuid2);
+               pos = 24;
+               client.clusterLRU.acquireMutex();
+               try {
+                       clusterKey = client.clusterLRU.getClusterKeyByUID(cuid1, cuid2) << 12;
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       client.clusterLRU.releaseMutex();
+               }
+       }
+       
+       public ClusterUID getClusterUID() {
+               return uid;
+       }
+       
+       private void processCreate() {
+               int r = Bytes.readLE2(bytes, pos);
+               pos+=2;
+               if(DEBUG) System.err.println("DEBUG: New ri=" + r + " offset=" + (pos-3-24));
+               try {
+                       create();
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+               
+       }
+       
+       private void processDelete() {
+               
+               int ri = Bytes.readLE2(bytes, pos);
+               pos += 2;
+               
+               if(DEBUG) System.err.println("DEBUG: Delete " + ri);
+               
+               try {
+                       delete(ri);
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+               
+       }
+
+       private void processModify(int op) {
+               
+               int ri = Bytes.readLE2(bytes, pos);
+               pos+=2;
+               long offset = Bytes.readLE7(bytes, pos);
+               pos+=7;
+               int size = Bytes.readLE2(bytes, pos);
+               pos+=2;
+               
+               offset += (ri>>14) << 56;
+               ri = ri & 0x3FFF;
+               
+               if(size < 0)
+                       throw new IllegalStateException();
+               if(ri < 1)
+                       throw new IllegalStateException();
+               if(ri > 4095)
+                       throw new IllegalStateException();
+               
+               if(DEBUG) System.err.println("DEBUG: Modify " + ri + " " + offset + " " + size + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,size)));
+
+               try {
+                       modify(clusterKey + ri, offset, size, bytes, pos);
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+
+               pos += size;
+               
+       }
+
+       private void processSet(int op) {
+               
+               int s = Bytes.readLE4(bytes, pos);
+               int length = (s >> 14);
+               if(length < 1)
+                       throw new IllegalStateException();
+               int r = s & 0x3FFF;
+               
+               pos += 4;
+               System.arraycopy(bytes, pos, valueBuffer, 0, length);
+               pos += length;
+
+               if(DEBUG) System.err.println("DEBUG: Set " + r + " " + length + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,length)));
+
+               try {
+                       set(clusterKey+r, valueBuffer, length);
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+               
+       }
+
+       byte[] valueBuffer = new byte[65536];
+       
+       private void processSetShort(int op) {
+               
+               int s = Bytes.readLE2(bytes, pos);
+               int length = ((op&7)<<2) + (s >> 14);
+               if(length < 1)
+                       throw new IllegalStateException();
+               if(length > 31)
+                       throw new IllegalStateException();
+               int r = s & 0x3FFF;
+               
+               if(DEBUG) System.err.println("DEBUG: SetShort " + r + " " + length + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,length)));
+               pos += 2;
+
+               System.arraycopy(bytes, pos, valueBuffer, 0, length);
+               pos += length;
+               
+               try {
+                       set(clusterKey+r, valueBuffer, length);
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+               }
+               
+       }
+
+       private void processStatementResource(ClusterEnum enu, int pOrO) {
+               if(ClusterEnum.ForeignShort == enu) {
+                       int fs = bytes[pos++]&0xff;
+                       foreignRefs[pOrO] = fs;
+               } else if(ClusterEnum.Local == enu) {
+                       int lo = bytes[pos++]&0xff;
+                       lows[pOrO] = lo;
+               } else {
+                       long l1 = Bytes.readLE8(bytes, pos);
+                       pos += 8;
+                       long l2 = Bytes.readLE8(bytes, pos);
+                       pos += 8;
+                       ClusterUID cuid = ClusterUID.make(l1, l2);
+                       foreignClusters[foreignPos] = cuid;
+                       int lo = bytes[pos++]&0xff;
+                       foreignIndices[foreignPos] = lo;
+                       foreignRefs[pOrO] = foreignPos;
+                       foreignPos++;
+                       lows[pOrO] = lo;
+               }
+       }
+       
+       ClusterUID[] foreignClusters = new ClusterUID[256];
+       int[] foreignIndices = new int[256];
+       int foreignPos = 0;
+       int lows[] = new int[2];
+       int foreignRefs[] = new int[2];
+       
+       private void processStatement(int op, StmEnum stmEnum, ClusterEnum p, ClusterEnum o) {
+
+               int curPos = pos-1-24;
+               
+               processStatementResource(p, 0);
+               processStatementResource(o, 1);
+
+               int ri = bytes[pos++]&0xff;
+               int pi = 0;
+               int oi = 0;
+               
+               ClusterUID puid = uid;
+               ClusterUID ouid = puid;
+               
+               if(ClusterEnum.ForeignShort == p && ClusterEnum.ForeignShort == o) {
+                       ri |= (op&0x3F) << 8;
+               } else {
+                       Data data = ClusterEnum.getData(stmEnum, p, o);
+                       // data.left is the amount of bytes in last two bytes
+                       if(data.bytes == 0) {
+                               ri = ri | ((op&0x3F)<<8); 
+                       } else {
+                               int extra = 0;
+                               int opBits = data.bits;
+                               int extraBits = 6-opBits;
+                               if(data.bytes == 1) {
+                                       extra = bytes[pos++]&0xff;
+                                       int high = extra >> extraBits;
+                                       if(ClusterEnum.ForeignShort == p) {
+                                               oi = lows[1] + (high<<8);
+                                       } else {
+                                               pi = lows[0] + (high<<8);
+                                       }
+                               } else {
+                                       extra = Bytes.readLE2(bytes, pos);
+                                       pos += 2;
+                                       int high1 = (extra >> extraBits)&((1<<6)-1);
+                                       int high2 = (extra >> (extraBits+6))&((1<<6)-1);
+                                       if(ClusterEnum.ForeignShort == p) {
+                                               oi = lows[1] + (high1<<8);
+                                       } else {
+                                               pi = lows[0] + (high1<<8);
+                                               oi = lows[1] + (high2<<8);
+                                       }
+                               }
+                               ri = ri | ((extra&((1<<extraBits)-1))<<8) | ((op&((1<<opBits)-1))<<(8+extraBits)); 
+                       }
+               }
+
+               // Set foreigns
+               if(ClusterEnum.ForeignLong == p) {
+                       int ref = foreignRefs[0];
+                       foreignIndices[ref] = pi;
+                       puid = foreignClusters[ref];
+               }
+               if(ClusterEnum.ForeignLong == o) {
+                       int ref = foreignRefs[1];
+                       foreignIndices[ref] = oi;
+                       ouid = foreignClusters[ref]; 
+               }
+               // Get foreigns
+               if(ClusterEnum.ForeignShort == p) {
+                       int ref = foreignRefs[0];
+                       pi = foreignIndices[ref];
+                       puid = foreignClusters[ref]; 
+               }
+               if(ClusterEnum.ForeignShort == o) {
+                       int ref = foreignRefs[1];
+                       oi = foreignIndices[ref];
+                       ouid = foreignClusters[ref]; 
+               }
+
+               if(ri < 1)
+                       throw new IllegalStateException();
+               if(pi < 1)
+                       throw new IllegalStateException();
+               if(oi < 1)
+                       throw new IllegalStateException();
+               if(ri > 4095)
+                       throw new IllegalStateException();
+               if(pi > 4095)
+                       throw new IllegalStateException();
+               if(oi > 4095)
+                       throw new IllegalStateException();
+
+               if(StmEnum.Add == stmEnum) {
+                       
+                       if(DEBUG)
+               System.err.println("DEBUG: ClusterChange " + uid + ": Add ri=" + ri + " pi=" + pi + " oi=" + oi + " pc=" + puid + " oc=" + ouid + " offset=" + curPos + " " + p.ordinal + " " + o.ordinal);
+            
+                       int predicateKey = getResourceKey(puid, pi);
+                       int objectKey = getResourceKey(ouid, oi);       
+                       try {
+                               claim(clusterKey+ri, predicateKey, objectKey, puid, ouid);
+                       } catch (DatabaseException e) {
+                               e.printStackTrace();
+                       }
+                       
+               } else {
+                       
+                       if(DEBUG)
+               System.err.println("DEBUG: ClusterChange " + uid + ": Rem ri=" + ri + " pi=" + pi + " oi=" + oi + " pc=" + puid + " oc=" + ouid + " offset=" + curPos + " " + p.ordinal + " " + o.ordinal);
+            
+                       int predicateKey = getResourceKey(puid, pi);
+                       int objectKey = getResourceKey(ouid, oi);
+                       try {
+                               deny(clusterKey+ri, predicateKey, objectKey, puid, ouid);
+                       } catch (DatabaseException e) {
+                               e.printStackTrace();
+                       }
+                       
+               }
+               
+       }
+
+       public void process() {
+               
+               foreignPos = 0;
+
+               if(DEBUG) System.err.println("DEBUG: process " + uid + " " + len);
+               
+               // op resolution for statement operation:
+               
+               // 2 first bits
+               // op: 01 | r8-13
+               // op: 10 | r8-13
+
+               // 3 first bits (000)
+               // op: 000000 | r12-13
+               // op: 000001 | r12-13
+               // op: 000010 | r12-13 
+               // op: 000011 | r12-13
+               // op: 000100 | r12-13 
+               // op: 000101 | r12-13 
+               // op: 000110 | r12-13 
+               // op: 000111 | r12-13 
+
+               // 4 first bits
+               // op: 1100 | r10-13
+               // op: 1101 | r10-13 
+               // op: 1110 | r10-13 
+               // op: 1111 | r10-13
+               // op: 0010 | r10-13
+               
+               // 6 bits
+               // op: 00110001 = 49
+               // op: 00110010 = 50
+               // op: 00110011 = 51
+               // other: 0011xxxx
+               
+               while(pos < len) {
+               
+                       int op = bytes[pos++]&0xff;
+                       
+                       // common prefix: 0011
+                       switch(op) {
+                       
+                       case 49:
+                               processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.ForeignShort);
+                               break;
+                       case 50:
+                               processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.ForeignLong);
+                               break;
+                       case 51:
+                               processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.ForeignShort);
+                               break;
+                       // 52 = 32+16+4 = 00110100
+                       case 52:
+                               processCreate();
+                               break;
+                       // 53 = 32+16+4+1 = 00110101
+                       case 53:
+                               processSet(op);
+                               break;
+                       // 54 = 32+16+4+2 = 00110110
+                       case 54:
+                               processDelete();
+                               break;
+                       // 55 = 32+16+4+2+1 = 00110111
+                       case 55:
+                               processModify(op);
+                               break;
+                       default:
+                               
+                               int bits6 = ((int)op)&0xC0;
+                               switch(bits6) {
+                               
+                               case 0x40:
+                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.ForeignShort);
+                                       break;
+                               case 0x80:
+                                       processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.ForeignShort);
+                                       break;
+                               default:
+
+                                       int bits5 = ((int)op)&0xE0;
+                                       if(bits5 == 0) {
+
+                                               int bits2 = (((int)op)&0xFC) >> 2;      
+                                               
+                                               // 3 top bits are 0
+                                               // 6 bits of op
+                                               
+                                               switch(bits2) {
+
+                                               case 0:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.Local);
+                                                       break;
+                                               case 1:
+                                                       processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.Local);
+                                                       break;
+                                               case 2:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.ForeignLong);
+                                                       break;
+                                               case 3:
+                                                       processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.ForeignLong);
+                                                       break;
+                                               case 4:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.Local);
+                                                       break;
+                                               case 5:
+                                                       processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.Local);
+                                                       break;
+                                               case 6:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.ForeignLong);
+                                                       break;
+                                               case 7:
+                                                       processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.ForeignLong);
+                                                       break;
+                                               
+                                               }
+
+                                       } else {
+
+                                               // 4 top bits of op
+                                               // 4 low bits of payload
+
+                                               int bits4 = (((int)op)&0xF0)>>4;
+                                               switch(bits4) {
+                                               case 0b1100:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.ForeignShort);
+                                                       break;
+                                               case 0b1101:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.Local);
+                                                       break;
+                                               case 0b1110:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.ForeignLong);
+                                                       break;
+                                               case 0b1111:
+                                                       processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.ForeignShort);
+                                                       break;
+                                               case 0b0010:
+                                                       processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.Local);
+                                                       break;
+                                               case 0b0011:
+                                                       int bits3 = (((int)op)&0xF8)>>3;
+                                                       if(bits3 == 7)
+                                                               processSetShort(op);
+                                                       break;
+                                               }
+
+                                       }
+                               
+                               }
+                               
+                       }
+                       
+               }
+               
+       }
+       
+       
+       abstract void create() throws DatabaseException;
+       abstract void delete(int resourceIndex) throws DatabaseException;
+       abstract void modify(int resourceKey, long offset, int size, byte[] bytes, int pos) throws DatabaseException;
+       abstract void set(int resourceKey, byte[] bytes, int length) throws DatabaseException;
+       
+       abstract void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) throws DatabaseException;
+       abstract void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) throws DatabaseException;
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java
new file mode 100644 (file)
index 0000000..e821b46
--- /dev/null
@@ -0,0 +1,63 @@
+package org.simantics.acorn.internal;
+
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+
+public abstract class ClusterUpdateProcessorBase2 {
+
+       final private byte[] bytes;
+       private int pos = 0;
+       final private int len;
+       final private ClusterUID uid;
+       
+       public ClusterUpdateProcessorBase2(byte[] operations) {
+               this.bytes = operations;
+               this.len = Bytes.readLE4(bytes, 0) + 4; // whatta?
+               int version = Bytes.readLE4(bytes, 4);
+               assert(version == ClusterChange2.VERSION);
+               long cuid1 = Bytes.readLE8(bytes, 8);
+               long cuid2 = Bytes.readLE8(bytes, 16);
+               pos = 24;
+               uid = ClusterUID.make(cuid1, cuid2);
+       }
+       
+       public ClusterUID getClusterUID() {
+               return uid;
+       }
+
+       private void processSetImmutable(int op) {
+               int value = bytes[pos++]&0xff;
+               setImmutable(value > 0);
+       }
+
+       private void processUndoValue(int op) {
+               Bytes.readLE4(bytes, pos);
+               pos+=4;
+       }
+
+       public void process() {
+               
+               while(pos < len) {
+               
+                       int op = bytes[pos++]&0xff;
+                       
+                       switch(op) {
+                       
+                       case ClusterChange2.SET_IMMUTABLE_OPERATION:
+                               processSetImmutable(op);
+                               break;
+                       case ClusterChange2.UNDO_VALUE_OPERATION:
+                               processUndoValue(op);
+                               break;
+                       default:
+                               throw new IllegalStateException();
+                               
+                       }
+                       
+               }
+               
+       }
+       
+       abstract void setImmutable(boolean value);
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java
new file mode 100644 (file)
index 0000000..d694abe
--- /dev/null
@@ -0,0 +1,19 @@
+package org.simantics.acorn.internal;
+
+
+/**
+ * @author Antti Villberg
+ */
+public final class DebugPolicy {
+
+    public static final boolean  REPORT_RESOURCE_ID_ALLOCATION = false;
+    public static final boolean  REPORT_CLUSTER_ID_ALLOCATION = false;
+    public static final boolean  REPORT_CLUSTER_EVENTS = false;
+    public static final boolean  REPORT_CLUSTER_LOADING = false;
+    public static final boolean  REPORT_CLUSTER_LOADING_STACKS = false;
+    public static final boolean  REPORT_CLUSTER_STREAM = false;
+    public static final boolean  CLUSTER_COLLECTION = false;
+    public static final boolean  LOG_SERVER_EVENTS = false;
+    public static final boolean  SHOW_SERVER_EVENTS = false; // Requires LOG_SERVER_EVENTS to be true.
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java
new file mode 100644 (file)
index 0000000..d545e51
--- /dev/null
@@ -0,0 +1,112 @@
+package org.simantics.acorn.internal;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.lru.ClusterChangeSet;
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.ClusterChangeSet.Entry;
+import org.simantics.acorn.lru.ClusterChangeSet.Type;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.ClusterUID;
+
+public class UndoClusterUpdateProcessor extends ClusterUpdateProcessorBase {
+       
+       public final static boolean DEBUG = false;
+
+       final private ClusterChangeSet ccs;
+       
+       private int oldValuesIndex = 0;
+       private int statementMaskIndex = 0;
+       
+       final public List<Entry> entries = new ArrayList<Entry>();
+       
+       public UndoClusterUpdateProcessor(ClusterManager client, ClusterStreamChunk chunk, ClusterChangeSet ccs) throws DatabaseException {
+               super(client, readOperation(client, chunk, ccs));
+               this.ccs = ccs;
+       }
+       
+       private static byte[] readOperation(ClusterManager manager, ClusterStreamChunk chunk, ClusterChangeSet ccs) {
+               
+//             ClusterStreamChunk chunk;
+//             manager.streamLRU.acquireMutex();
+//             try {
+//                     chunk = ccs.getChunk(manager);
+//             } catch (Throwable t) {
+//                     throw new IllegalStateException(t);
+//             } finally {
+//                     manager.streamLRU.releaseMutex();
+//             }
+//
+//             chunk.acquireMutex();
+//             try {
+//             chunk.ve
+                       chunk.makeResident();
+                       return chunk.getOperation(ccs.chunkOffset);
+//             } catch (Throwable t) {
+//                     throw new IllegalStateException(t);
+//             } finally {
+//                     chunk.releaseMutex();
+//             }
+       }
+       
+       @Override
+       void create() throws DatabaseException {
+       }
+
+       @Override
+       void delete(int ri) throws DatabaseException {
+               
+               byte[] old = ccs.oldValues.get(oldValuesIndex);
+               boolean oldValueEx = ccs.oldValueEx.get(oldValuesIndex) > 0;
+               oldValuesIndex++;
+               
+               if(old != null) {
+                       entries.add(new Entry(ri, oldValueEx, old, null));
+               }
+               
+       }
+
+       @Override
+       void modify(int resourceKey, long offset, int size, byte[] bytes, int pos)
+                       throws DatabaseException {
+               
+       }
+
+       @Override
+       void set(int resourceKey, byte[] bytes, int length)
+                       throws DatabaseException {
+
+               byte[] old = ccs.oldValues.get(oldValuesIndex);
+               boolean oldValueEx = ccs.oldValueEx.get(oldValuesIndex) > 0;
+               oldValuesIndex++;
+
+               entries.add(new Entry(resourceKey, oldValueEx, old, Arrays.copyOf(valueBuffer, length)));
+               
+       }
+
+       @Override
+       void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid)
+                       throws DatabaseException {
+               
+               boolean add = ccs.statementMask.get(statementMaskIndex++) > 0;
+               if(add) {
+                       entries.add(new Entry(Type.ADD, resourceKey, puid, predicateKey & 0xFFF, ouid, objectKey & 0xFFF));
+               }
+               
+       }
+
+       @Override
+       void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid)
+                       throws DatabaseException {
+               
+               boolean remove = ccs.statementMask.get(statementMaskIndex++) > 0;
+               if(remove) {
+                       entries.add(new Entry(Type.REMOVE, resourceKey, puid, predicateKey & 0xFFF, ouid, objectKey & 0xFFF));
+               }
+
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java
new file mode 100644 (file)
index 0000000..8a32ef2
--- /dev/null
@@ -0,0 +1,23 @@
+package org.simantics.acorn.lru;
+
+public class AccessTime {
+
+       private long last = 0;
+       
+       private static AccessTime INSTANCE = new AccessTime();
+       
+       private AccessTime() {
+               
+       }
+       
+       public static AccessTime getInstance() {
+               return INSTANCE;
+       }
+       
+       public synchronized long getAccessTime() {
+               long result = System.nanoTime();
+               last = Math.max(result, last+1);
+               return last;
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java
new file mode 100644 (file)
index 0000000..a2c4899
--- /dev/null
@@ -0,0 +1,160 @@
+package org.simantics.acorn.lru;
+
+import java.io.InputStream;
+
+import org.simantics.db.Session;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.service.ClusterUID;
+
+public class CachingClusterSupport implements ClusterSupport {
+       
+       private ClusterSupport backend;
+       
+       public CachingClusterSupport(ClusterSupport backend) {
+               this.backend = backend;
+       }
+
+       @Override
+       public int createClusterKeyByClusterUID(ClusterUID clusterUID, long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterId(long clusterId) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByClusterKey(int clusterKey) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int getClusterKeyByClusterUIDOrMake(long id1, long id2) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public ClusterBase getClusterByResourceKey(int resourceKey) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public long getClusterIdOrCreate(ClusterUID clusterUID) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void addStatement(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void cancelStatement(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void removeStatement(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void cancelValue(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void removeValue(Object cluster) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setValue(Object cluster, long clusterId, byte[] bytes, int length) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void modiValue(Object cluster, long clusterId, long voffset, int length, byte[] bytes, int offset) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setImmutable(Object cluster, boolean immutable) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setDeleted(Object cluster, boolean deleted) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void createResource(Object cluster, short resourceIndex, long clusterId) {
+               backend.createResource(cluster, resourceIndex, clusterId);
+       }
+
+       @Override
+       public void addStatementIndex(Object cluster, int resourceKey, ClusterUID clusterUID, byte op) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public void setStreamOff(boolean setOff) {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public boolean getStreamOff() {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public InputStream getValueStreamEx(int resourceIndex, long clusterId) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public byte[] getValueEx(int resourceIndex, long clusterId, long voffset, int length) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public long getValueSizeEx(int resourceIndex, long clusterId) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public int wait4RequestsLess(int limit) throws DatabaseException {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public Session getSession() {
+               throw new UnsupportedOperationException();
+       }
+
+       @Override
+       public IClusterTable getClusterTable() {
+               throw new UnsupportedOperationException();
+       }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java
new file mode 100644 (file)
index 0000000..12351a5
--- /dev/null
@@ -0,0 +1,113 @@
+package org.simantics.acorn.lru;
+
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import org.simantics.db.service.Bytes;
+import org.simantics.utils.datastructures.Pair;
+
+import gnu.trove.list.array.TByteArrayList;
+
+public class ChangeSetInfo extends LRUObject<Long, ChangeSetInfo> {
+
+       private byte[] metadataBytes;
+       private ArrayList<String> clusterChangeSetIds;
+       
+       // Stub
+       public ChangeSetInfo(LRU<Long, ChangeSetInfo> LRU, Path readDir, Long revision, int offset, int length) {
+               super(LRU, revision, readDir, "clusterStream", offset, length, false, false);
+               LRU.map(this);
+       }
+       
+       // New
+       public ChangeSetInfo(LRU<Long, ChangeSetInfo> LRU, Long revision, byte[] bytes, ArrayList<String> clusterChangeSetIds) {
+               super(LRU, revision, LRU.getDirectory(), "clusterStream", true, true);
+               this.metadataBytes = bytes;
+               this.metadataBytes = bytes;
+               this.clusterChangeSetIds = clusterChangeSetIds;
+               LRU.insert(this, accessTime);
+       }
+       
+       public ArrayList<String> getCSSIds() {
+               if(VERIFY) verifyAccess();
+               return clusterChangeSetIds;
+       }
+       
+       public byte[] getMetadataBytes() {
+               
+               if(VERIFY) verifyAccess();
+               
+               makeResident();
+               
+               return metadataBytes;
+               
+       }
+       
+       private static void writeLE(TByteArrayList bytes, int value) {
+               
+               bytes.add( (byte) (value & 0xFF));
+               bytes.add((byte) ((value >>> 8) & 0xFF));
+               bytes.add((byte) ((value >>> 16) & 0xFF));
+               bytes.add((byte) ((value >>> 24) & 0xFF));
+
+       }
+
+       @Override
+       protected Pair<byte[], Integer> toBytes() {
+               
+               TByteArrayList result = new TByteArrayList();
+               writeLE(result, metadataBytes.length);
+               result.add(metadataBytes);
+               writeLE(result, clusterChangeSetIds.size());
+               for(String id : clusterChangeSetIds) {
+                       byte[] bb = id.getBytes();
+                       writeLE(result, bb.length);
+                       result.add(bb);
+               }
+
+               release();
+               
+               byte[] ret = result.toArray();
+               
+               return Pair.make(ret, ret.length);
+               
+       }
+       
+       @Override
+       void release() {
+               clusterChangeSetIds = null;
+               metadataBytes = null;
+       }
+
+       @Override
+       public void fromFile(byte[] data) {
+               
+               clusterChangeSetIds = new ArrayList<String>();
+               
+               int metadataLength = Bytes.readLE4(data, 0);
+               metadataBytes = Arrays.copyOfRange(data, 4, 4+metadataLength);
+               int offset = 4+metadataLength;
+               int numberOfChangeSets = Bytes.readLE4(data, offset);
+               offset += 4;
+               for(int i=0;i<numberOfChangeSets;i++) {
+                       int length = Bytes.readLE4(data, offset);
+                       offset += 4;
+                       String id = new String(Arrays.copyOfRange(data, offset, offset+length));
+                       clusterChangeSetIds.add(id);
+                       offset += length;
+               }
+               
+       }
+
+       @Override
+       String getExtension() {
+               return "cs";
+       }
+       
+       @Override
+       protected boolean overwrite() {
+               return false;
+       }
+
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterChangeSet.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterChangeSet.java
new file mode 100644 (file)
index 0000000..d0f3013
--- /dev/null
@@ -0,0 +1,120 @@
+package org.simantics.acorn.lru;
+
+import java.util.ArrayList;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.internal.Change;
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.service.ClusterUID;
+
+import gnu.trove.list.array.TByteArrayList;
+
+public class ClusterChangeSet {
+
+       public enum Type {
+               ADD,REMOVE,VALUE
+       }
+       
+       public static class Entry {
+               
+               final Type type;
+               final short subject;
+               final short predicate;
+               final short object;
+               final ClusterUID predicateUID;
+               final ClusterUID objectUID;
+               final boolean oldValueEx;
+               final byte[] oldValue;
+               final byte[] newValue;
+               
+               public Entry(Type type, int subject, ClusterUID predicateUID, int predicate, ClusterUID objectUID, int object) {
+                       this.type = type;
+                       this.subject = (short)(subject & 0xFFF);
+                       this.predicate = (short)predicate;
+                       this.object = (short)object;
+                       this.predicateUID = predicateUID;
+                       this.objectUID = objectUID;
+                       this.oldValueEx = false;
+                       this.oldValue = null;
+                       this.newValue = null;
+               }
+
+               public Entry(int subject, boolean oldValueEx, byte[] oldValue, byte[] newValue) {
+                       if(oldValue == null && newValue == null)
+                               throw new IllegalStateException();
+                       this.type = Type.VALUE;
+                       this.subject = (short)(subject & 0xFFF);
+                       this.predicate = 0;
+                       this.object = 0;
+                       this.predicateUID = null;
+                       this.objectUID = null;
+                       this.oldValueEx = oldValueEx;
+                       this.oldValue = oldValue;
+                       this.newValue = newValue;
+               }
+               
+               public void process(ClusterManager clusters, ClusterChange cs, int clusterKey) {
+                       
+                       Entry e = this;
+                       
+                       if(e.type == Type.VALUE) {
+                               
+                               if(e.oldValue != null) {
+                                       cs.setValue(e.subject, e.oldValue, e.oldValue.length);
+                               } else {
+                                       Change change = new Change();
+                                       change.addStatementIndex(e.subject, null, ClusterChange.DELETE_OPERATION);
+                                       cs.addChange(change);
+                               }
+                               
+                       } else if(e.type == Type.ADD) {
+                               
+                               int s = ClusterTraits.createResourceKeyNoThrow(clusterKey, e.subject);
+                               int p = clusters.getResourceKey(e.predicateUID, e.predicate);
+                               int o = clusters.getResourceKey(e.objectUID, e.object);
+                               Change change = new Change();
+                               change.addStatementIndex(s, null, ClusterChange.REMOVE_OPERATION);
+                               change.addStatementIndex(p, e.predicateUID, (byte)0);
+                               change.addStatementIndex(o, e.objectUID, (byte)0);
+                               cs.addChange(change);
+                               
+                       } else if(e.type == Type.REMOVE) {
+                               
+                               int s = ClusterTraits.createResourceKeyNoThrow(clusterKey, e.subject);
+                               int p = clusters.getResourceKey(e.predicateUID, e.predicate);
+                               int o = clusters.getResourceKey(e.objectUID, e.object);
+                               Change change = new Change();
+                               change.addStatementIndex(s, null, ClusterChange.ADD_OPERATION);
+                               change.addStatementIndex(p, e.predicateUID, (byte)0);
+                               change.addStatementIndex(o, e.objectUID, (byte)0);
+                               cs.addChange(change);
+                               
+                       }
+                       
+               }
+               
+       }
+       
+       final public String id;
+       final public ClusterUID cuid;
+       public String chunkKey;
+       public int chunkOffset = -1;
+       
+       public TByteArrayList statementMask = new TByteArrayList();
+       public TByteArrayList oldValueEx = new TByteArrayList();
+       public ArrayList<byte[]> oldValues = new ArrayList<byte[]>();
+       
+       public ClusterChangeSet(String id ,ClusterUID cuid) {
+               this.id = id;
+               this.cuid = cuid;
+               String[] ss = id.split("\\.");
+               chunkKey = ss[0];
+               chunkOffset = Integer.parseInt(ss[1]);
+       }
+       
+       public ClusterStreamChunk getChunk(ClusterManager manager) {
+               return manager.streamLRU.get(chunkKey);
+       }
+
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java
new file mode 100644 (file)
index 0000000..1cd5822
--- /dev/null
@@ -0,0 +1,346 @@
+package org.simantics.acorn.lru;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.Persistable;
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.cluster.ClusterSmall;
+import org.simantics.acorn.cluster.ClusterImpl.ClusterTables;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.compressions.CompressionCodec;
+import org.simantics.compressions.Compressions;
+import org.simantics.db.ClusterCreator;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Pair;
+
+public class ClusterInfo extends LRUObject<ClusterUID, ClusterInfo> implements Persistable {
+       
+       final private ClusterManager manager;
+       private ClusterImpl cluster;
+       public int changeSetId;
+       private ClusterUpdateState updateState;
+
+       public static final String COMPRESSION = "LZ4";
+       
+       // Stub
+       public ClusterInfo(ClusterManager manager, LRU<ClusterUID, ClusterInfo> LRU, Path readDirectory, ClusterUID uid, int offset, int length) {
+               super(LRU, uid, readDirectory, uid.toString() + ".cluster", offset, length, false, false);
+               this.manager = manager;
+               this.cluster = null;
+               LRU.map(this);
+       }
+       
+       // New
+       public ClusterInfo(ClusterManager manager, LRU<ClusterUID, ClusterInfo> LRU, ClusterImpl cluster) {
+               super(LRU, cluster.getClusterUID(), LRU.getDirectory(), cluster.getClusterUID().toString() + ".cluster", true, true);
+               this.manager = manager;
+               this.cluster = cluster;
+               LRU.insert(this, accessTime);
+               LRU.swap(getKey());
+       }
+       
+       public <T> T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException {
+               
+               // Updates have been ensured at this point
+                       
+               acquireMutex();
+
+               try {
+                       if(isResident()) {
+                               ClusterTables tables = cluster.store();
+                               return creator.create(uid, tables.bytes, tables.ints, tables.longs); 
+                       }
+               } catch (IOException e) {
+                       throw new DatabaseException(e);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+
+               // Ensure pending updates here - this may take some time
+               LRU.waitPending(this, false);
+
+               acquireMutex();
+               try {
+
+                       if(isResident()) {
+                               ClusterTables tables = cluster.store();
+                               return creator.create(uid, tables.bytes, tables.ints, tables.longs); 
+                       } else {
+                               byte[] data = readFile();
+                               ClusterTables tables = new ClusterTables();
+                               loadCluster(getKey(), manager.support, data, tables);
+                               return creator.create(uid, tables.bytes, tables.ints, tables.longs);
+                       }
+
+               } catch (IOException e) {
+                       throw new DatabaseException(e);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+               
+       }
+       
+       static class ClusterDecompressor {
+               
+               byte[] decompressBuffer = new byte[1024*1024];
+
+               public synchronized ClusterTables readCluster(ClusterUID uid, byte[] compressed) throws IOException {
+                       
+                       int deflatedSize = Bytes.readLE4(compressed, compressed.length-4);
+                       
+                       if(decompressBuffer.length < deflatedSize)
+                               decompressBuffer = new byte[Math.max(3*decompressBuffer.length / 2, deflatedSize)];
+                       
+                       CompressionCodec codec = Compressions.get(Compressions.LZ4);
+                       
+                       ByteBuffer input = ByteBuffer.wrap(compressed);
+                       ByteBuffer output = ByteBuffer.wrap(decompressBuffer);
+
+                       int decompressedSize = codec.decompressBuffer(input, 0, compressed.length-4, output, 0, decompressBuffer.length);
+                       assert(decompressedSize <= decompressBuffer.length);
+                       
+                       int byteLength = Bytes.readLE4(decompressBuffer, 0);
+                       int intLength = Bytes.readLE4(decompressBuffer, 4);
+                       int longLength = Bytes.readLE4(decompressBuffer, 8);
+                       
+                       byte[] bytes = new byte[byteLength];
+                       int[] ints = new int[intLength];
+                       long[] longs = new long[longLength];
+                       
+                       System.arraycopy(decompressBuffer, 12, bytes, 0, byteLength);
+                       
+                       int offset = 12+byteLength;
+                       for(int i=0;i<intLength;i++,offset+=4)
+                               ints[i] = Bytes.readLE4(decompressBuffer, offset);
+                       for(int i=0;i<longLength;i++,offset+=8)
+                               longs[i] = Bytes.readLE8(decompressBuffer, offset);
+                       
+                       ClusterTables result = new ClusterTables();
+                       result.bytes = bytes;
+                       result.ints = ints;
+                       result.longs = longs;
+                       return result;
+                       
+               }
+
+               
+       }
+       
+       private static ClusterDecompressor decompressor = new ClusterDecompressor();
+       
+       public void loadCluster(ClusterUID uid, ClusterSupport2 support, byte[] data, ClusterTables tables) throws IOException {
+
+               ClusterTables ts = decompressor.readCluster(uid, data);
+               tables.bytes = ts.bytes;
+               tables.ints = ts.ints;
+               tables.longs = ts.longs;
+
+       }
+       
+       public ClusterImpl loadCluster(ClusterUID uid, ClusterSupport2 support, byte[] data) throws IOException {
+               
+               ClusterTables tables = decompressor.readCluster(uid, data);
+               try {
+                       return ClusterImpl.make(support, tables.longs, tables.ints, tables.bytes, support, support.getClusterKeyByClusterUIDOrMake(uid));
+               } catch (DatabaseException e) {
+                       e.printStackTrace();
+                       return null;
+               }
+               
+       }
+       
+       @Override
+       public void fromFile(byte[] data) {
+               
+               try {
+                       cluster = loadCluster(getKey(), manager.support, data);
+               } catch (IOException e) {
+                       e.printStackTrace();
+               }
+               
+       }
+
+       @Override
+       protected Pair<byte[],Integer> toBytes() {
+               
+               try {
+
+                       byte[] raw = null;
+                       
+                       if(cluster instanceof ClusterSmall) {
+                               raw = cluster.storeBytes();
+                       } else {
+                       
+                               ClusterTables tables = cluster.store();
+                               
+                               raw = new byte[12 + tables.bytes.length + (tables.ints.length<<2) + (tables.longs.length<<3)]; 
+       
+                               Bytes.writeLE(raw, 0, tables.bytes.length);
+                               Bytes.writeLE(raw, 4, tables.ints.length);
+                               Bytes.writeLE(raw, 8, tables.longs.length);
+       
+                               System.arraycopy(tables.bytes, 0, raw, 12, tables.bytes.length);
+                               int offset = 12+tables.bytes.length;
+                               for(int i=0;i<tables.ints.length;i++,offset+=4)
+                                       Bytes.writeLE(raw, offset, tables.ints[i]);
+                               for(int i=0;i<tables.longs.length;i++,offset+=8)
+                                       Bytes.writeLE8(raw, offset, tables.longs[i]);
+                               
+                       }
+
+                       byte[] result = new byte[raw.length + raw.length/8];
+                       
+                       CompressionCodec codec = Compressions.get(Compressions.LZ4);
+                       ByteBuffer input = ByteBuffer.wrap(raw);
+                       ByteBuffer output = ByteBuffer.wrap(result);
+                       //ByteBuffer output = ByteBuffer.allocate(raw.length + raw.length/8);
+                       int compressedSize = codec.compressBuffer(input, 0, raw.length, output, 0);
+
+                       // We append inflated size - cannot prepend since decompression cannot handle offsets in input
+//                     final byte[] rawOutput = new byte[compressedSize+4];
+//                     output.get(rawOutput,0,compressedSize);
+//                     Bytes.writeLE(rawOutput, compressedSize, raw.length);
+//                     return Pair.make(rawOutput, rawOutput.length);
+
+                       Bytes.writeLE(result, compressedSize, raw.length);
+                       return Pair.make(result, compressedSize+4);
+
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       release();
+               }
+               
+       }
+       
+       @Override
+       void release() {
+               cluster = null;
+       }
+
+
+       @Override
+       String getExtension() {
+               return "cluster";
+       }
+       
+       public void scheduleUpdate() {
+
+               if(VERIFY) verifyAccess();
+
+               if(updateState == null)
+                       updateState = new ClusterUpdateState(getKey());
+               updateState.incRef();
+
+       }
+       
+       public ClusterImpl getForUpdate() {
+
+               try {
+                       
+                       acquireMutex();
+
+                       assert(updateState != null);
+                       
+                       makeResident();
+                       setDirty(true);
+                       updateState.beginUpdate();
+                       return cluster;
+                       
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       
+                       releaseMutex();
+                       
+               }
+
+       }
+       
+       public void update(ClusterImpl clu) {
+               
+               if(VERIFY) verifyAccess();
+               
+               makeResident();
+               
+               cluster = clu;
+               updateState.endUpdate();
+               updateState = updateState.decRef();
+
+       }
+       
+       public ClusterImpl getCluster() {
+
+               if(VERIFY) verifyAccess();
+
+               makeResident();
+               
+               return cluster;
+               
+       }
+       
+       @Override
+       public boolean canBePersisted() {
+               
+               if(VERIFY) verifyAccess();
+
+               ClusterUpdateState state = getUpdateState();
+               if(state != null) return false;
+               
+               if(!super.canBePersisted()) return false;
+               if(updateState == null) return true;
+               else return !updateState.inUpdate;
+               
+       }
+
+       private ClusterUpdateState getUpdateState() {
+               
+               if(VERIFY) verifyAccess();
+               
+               return updateState;
+               
+       }
+
+       private ClusterUpdateState getUpdateStateWithoutMutex() {
+               
+               try {
+                       acquireMutex();
+                       return getUpdateState();
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+               
+       }
+
+       /*
+        * This method blocks - no locks here
+        */
+       public void waitForUpdates() {
+               
+               ClusterUpdateState state = getUpdateStateWithoutMutex();
+               if(state != null) {
+                       long start = System.nanoTime();
+                       state.waitForUpdates();
+                       long duration = System.nanoTime() - start;
+                       System.err.println("Wait updates to cluster " + getKey() + " for " + (1e-6 * duration) + "ms.");
+               }
+               
+       }
+       
+       @Override
+       protected boolean overwrite() {
+               return true;
+       }
+       
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterLRU.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterLRU.java
new file mode 100644 (file)
index 0000000..1f0db54
--- /dev/null
@@ -0,0 +1,315 @@
+package org.simantics.acorn.lru;
+
+import java.nio.file.Path;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.internal.BijectionMap;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.exception.ClusterDoesNotExistException;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.service.ClusterUID;
+
+import gnu.trove.TIntIntHashMap;
+
+public class ClusterLRU extends LRU<ClusterUID, ClusterInfo> {
+
+       final private BijectionMap<ClusterUID, Integer> clusterMapping = new BijectionMap<ClusterUID, Integer>();
+       final private ClusterManager manager;
+       
+       public ClusterLRU(ClusterManager manager, String identifier, Path writeDir) {
+               
+               super(identifier, writeDir);
+               this.manager = manager;
+               
+               clusterMapping.map(ClusterUID.make(0,2), clusterMapping.size() + 1);
+
+       }
+
+       public ClusterInfo getOrCreate(ClusterUID uid, boolean makeIfNull) {
+               
+               try {
+                       
+                       acquireMutex();
+                       
+                       ClusterInfo info = get(uid);
+
+                       if (info == null) {
+                               
+                               if(!makeIfNull) throw new IllegalStateException("Asked for an existing cluster " + uid + " that was not found.");
+
+                               Integer clusterKey = clusterMapping.getRight(uid);
+                               if (clusterKey == null) {
+                                       clusterKey = clusterMapping.size() + 1;
+                                       clusterMapping.map(uid, clusterKey);
+                               }
+
+                               info = new ClusterInfo(manager, this, ClusterImpl.make(manager.support,
+                                               uid, clusterKey, manager.support));
+
+                       }
+
+                       return info;
+                       
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       
+                       releaseMutex();
+                       
+               }
+               
+       }
+
+       /*
+        * This method waits - we have no locks here
+        */
+       public void ensureUpdates(ClusterUID uid) throws DatabaseException {
+
+               ClusterInfo info = getWithoutMutex(uid);
+               if(info == null)
+                   throw new ClusterDoesNotExistException("Asked a cluster which does not exist: " + uid);
+               info.waitForUpdates();
+               
+       }
+
+       public ClusterInfo get(ClusterUID uid, boolean makeIfNull, boolean ensureUpdates) throws DatabaseException {
+
+               if (ensureUpdates) {
+                   try {
+                       ensureUpdates(uid);
+                   } catch (ClusterDoesNotExistException e) {
+                       if (makeIfNull) {
+                           Logger.defaultLogError("For debug purposes, creating cluster which does not exist", e);
+                       } else {
+                           throw e;
+                       }
+                   }
+               }
+               return getOrCreate(uid, makeIfNull);
+       }
+       
+       public ClusterInfo get(ClusterUID uid, boolean makeIfNull) throws DatabaseException {
+               return get(uid, makeIfNull, true);
+               
+       }
+
+       public int getResourceKey(ClusterUID uid, int index) {
+
+               if(VERIFY) verifyAccess();
+
+               Integer i = clusterMapping.getRight(uid);
+               if (i == null) {
+                       i = clusterMapping.size() + 1;
+                       clusterMapping.map(uid, i);
+               }
+               return (i << 12) + index;
+
+       }
+
+       public int getResourceKeyWithoutMutex(ClusterUID uid, int index) {
+               
+               acquireMutex();
+               try {
+                       return getResourceKey(uid, index);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+               
+       }
+
+       public int createClusterKeyByClusterUID(ClusterUID uid) {
+               
+               if(VERIFY) verifyAccess();
+               
+               Integer i = clusterMapping.getRight(uid);
+               if (i == null) {
+                       i = clusterMapping.size() + 1;
+                       clusterMapping.map(uid, i);
+               }
+               return i;
+               
+       }
+
+       public ClusterBase getClusterByClusterUIDOrMake(ClusterUID uid) throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+               
+               int key = createClusterKeyByClusterUID(uid);
+               return getClusterByClusterKey(key);
+               
+       }
+
+       public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+
+               if(VERIFY) verifyAccess();
+
+               return createClusterKeyByClusterUID(clusterUID);
+                       
+       }
+       
+       public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) {
+               acquireMutex();
+               try {
+                       return getClusterKeyByClusterUIDOrMake(clusterUID);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+       }
+
+       public ClusterBase getClusterByClusterKey(int clusterKey) throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+               
+               ClusterUID uid = clusterMapping.getLeft(clusterKey);
+               ClusterInfo info = get(uid, true);
+               info.acquireMutex();
+               try {
+                       return info.getCluster();
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       info.releaseMutex();
+               }
+               
+       }
+
+       public ClusterUID getClusterUIDByResourceKey(int resourceKey)
+                       throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+               
+               int clusterKey = resourceKey >> 12;
+               return clusterMapping.getLeft(clusterKey);
+               
+       }
+       
+       public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws DatabaseException {
+               acquireMutex();
+               try {
+                       return getClusterUIDByResourceKey(resourceKey);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+       }
+
+       @SuppressWarnings("unchecked")
+       public <T extends ClusterI> T getClusterByClusterUIDOrMakeProxy(ClusterUID uid) throws DatabaseException {
+               return (T) getClusterByClusterUIDOrMake(uid);
+       }
+
+       @SuppressWarnings("unchecked")
+       public <T extends ClusterI> T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+
+               return (T) getClusterByClusterKey(resourceKey >> 12);
+               
+       }
+
+       public int getClusterKeyByUID(long id1, long id2) throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+
+               return getClusterKeyByClusterUIDOrMake(ClusterUID.make(id1, id2));
+               
+       }
+       
+       public int getClusterKeyByUIDWithoutMutex(long id1, long id2) throws DatabaseException {
+
+               acquireMutex();
+               try {
+                       return getClusterKeyByClusterUIDOrMake(ClusterUID.make(id1, id2));
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+
+       }
+
+       
+       public static void main(String[] args) throws Exception {
+               
+               long start = System.nanoTime();
+               
+               final TIntIntHashMap map = new TIntIntHashMap(0, 0.9f);
+
+               AtomicInteger counter = new AtomicInteger(0);
+               AtomicBoolean written = new AtomicBoolean(false);
+               
+               //final Semaphore ws = new Semaphore(1);
+               
+               Thread write = new Thread() {
+                       
+                       @Override
+                       public void run() {
+                               try {
+                                       for(int i=0;i<100000000;i++) {
+                                               synchronized(map) {
+//                                             ws.acquire();
+                                               map.put(i, i);
+//                                             ws.release();
+                                               }
+                                               //if((i & 0xfffff) == 0) System.err.println("Write " + i);
+                                               counter.incrementAndGet();
+                                       }
+                                       written.set(true);
+                               } catch (Throwable e) {
+                                       e.printStackTrace();
+                               }
+                       }
+                       
+               };
+               write.start();
+               
+               Thread read = new Thread() {
+                       
+                       @Override
+                       public void run() {
+                               try {
+                                       while(!written.get()) {
+                                               double r = Math.random();
+                                               double max = counter.get();
+                                               int key = (int)(max*r);
+                                               int value = map.get(key);
+                                               if(key != value) {
+                                                       //System.err.println("Read failed " + key + " vs. " + value);
+                                                       //ws.acquire();
+                                                       synchronized(map) {
+                                                               value = map.get(key);
+                                                               if(key != value) {
+                                                                       System.err.println("Read failed for real " + key + " vs. " + value);
+                                                               }
+                                                               //ws.release();
+                                                       }
+                                               }
+                                               //if((key & 0xfffff) == 0) System.err.println("Read " + key);
+                                       }
+                               } catch (Throwable e) {
+                                       e.printStackTrace();
+                               }
+                       }
+                       
+               };
+               read.start();
+               
+               write.join();
+               read.join();
+               
+               long duration = System.nanoTime() - start;
+               System.err.println("took " + 1e-9*duration + "s.");
+               
+       }
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java
new file mode 100644 (file)
index 0000000..23cbfb1
--- /dev/null
@@ -0,0 +1,302 @@
+package org.simantics.acorn.lru;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+import java.util.ArrayList;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.Persistable;
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.UndoClusterUpdateProcessor;
+import org.simantics.compressions.CompressionCodec;
+import org.simantics.compressions.Compressions;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.Bytes;
+import org.simantics.utils.datastructures.Pair;
+
+import gnu.trove.list.array.TByteArrayList;
+
+public class ClusterStreamChunk extends LRUObject<String, ClusterStreamChunk> implements Persistable {
+
+       // 500KB is a fine chunk
+       private static int MAX_CHUNK_SIZE = 500*1024;
+
+       int size = 0;
+       final private ClusterManager manager;
+       private boolean committed = false;
+       
+       public int nextToProcess = 0;
+       
+       public ArrayList<ClusterUpdateOperation> operations = new ArrayList<ClusterUpdateOperation>();
+       
+       // Stub
+       public ClusterStreamChunk(ClusterManager manager, LRU<String, ClusterStreamChunk> LRU, Path readDir, String id, int offset, int length) {
+               super(LRU, id, readDir, "clusterStream", offset, length, false, false);
+               this.manager = manager;
+               LRU.map(this);
+       }
+       
+       // Creation
+       public ClusterStreamChunk(ClusterManager manager, LRU<String, ClusterStreamChunk> LRU, String id) {
+               super(LRU, id, LRU.getDirectory(), "clusterStream", true, true);
+               this.manager = manager;
+               LRU.insert(this, accessTime);
+       }
+       
+       public UndoClusterUpdateProcessor getUndoProcessor(ClusterManager clusters, int chunkOffset, String ccsId) throws DatabaseException {
+               
+               if(VERIFY) verifyAccess();
+               
+               makeResident(true);
+
+               ClusterUpdateOperation op = operations.get(chunkOffset);
+               if(op == null) throw new IllegalStateException("Cluster Update Operation " + ccsId + " was not found.");
+               if(op.ccs == null) throw new IllegalStateException("Cluster ChangeSet " + ccsId + " was not found.");
+
+               UndoClusterUpdateProcessor proc = new UndoClusterUpdateProcessor(clusters, this, op.ccs);
+               if(proc.version != ClusterChange.VERSION)
+                       return null;
+
+               // This cluster and CCS can still be under preparation => wait
+               clusters.clusterLRU.ensureUpdates(proc.getClusterUID());
+
+               proc.process();
+
+               cancelForceResident();
+               
+               return proc;
+               
+       }
+       
+       public void addOperation(ClusterUpdateOperation op) {
+               if(committed)
+                       throw new IllegalStateException();
+               operations.add(op);
+               size += op.data.length;
+//             if(isCommitted()) {
+//                     LRU.refresh(this);
+//             }
+       }
+       
+       public byte[] getOperation(int index) {
+               return operations.get(index).data;
+       }
+       
+       public void commit() {
+               committed = true;
+       }
+       
+       public boolean isCommitted() {
+               if(size > MAX_CHUNK_SIZE) committed = true;
+               return committed;
+       }
+       
+       @Override
+       public boolean canBePersisted() {
+               if(!super.canBePersisted()) return false;
+               if(!isCommitted()) return false;
+               for(ClusterUpdateOperation op : operations) {
+                       if(!op.finished) return false;
+               }
+               return true;
+       }
+
+       private static void writeLE(TByteArrayList bytes, int value) {
+               
+               bytes.add( (byte) (value & 0xFF));
+               bytes.add((byte) ((value >>> 8) & 0xFF));
+               bytes.add((byte) ((value >>> 16) & 0xFF));
+               bytes.add((byte) ((value >>> 24) & 0xFF));
+
+       }
+
+       final public static void writeLE8(TByteArrayList bytes, long value) {
+               
+               bytes.add( (byte) (value & 0xFF));
+               bytes.add((byte) ((value >>> 8) & 0xFF));
+               bytes.add((byte) ((value >>> 16) & 0xFF));
+               bytes.add((byte) ((value >>> 24) & 0xFF));
+               bytes.add((byte) ((value >>> 32) & 0xFF));
+               bytes.add((byte) ((value >>> 40) & 0xFF));
+               bytes.add((byte) ((value >>> 48) & 0xFF));
+               bytes.add((byte) ((value >>> 56) & 0xFF));
+               
+       }
+       
+       @Override
+       protected Pair<byte[], Integer> toBytes() {
+               
+               assert(isCommitted());
+               
+               TByteArrayList raw = new TByteArrayList();
+               
+               writeLE(raw, operations.size());
+               
+               for(ClusterUpdateOperation op : operations) {
+                       
+                       writeLE(raw, op.data.length);
+                       raw.add(op.data);
+                       op.data = null;
+                       
+                       writeLE(raw, op.ccs.statementMask.size());
+                       raw.add(op.ccs.statementMask.toArray());
+                       writeLE(raw, op.ccs.oldValueEx.size());
+                       raw.add(op.ccs.oldValueEx.toArray());
+                       writeLE(raw, op.ccs.oldValues.size());
+                       
+                       for(byte[] oldValue : op.ccs.oldValues) {
+                               int len = (oldValue != null ? oldValue.length : -1);
+                               writeLE(raw, len);
+                               if(oldValue != null) {
+                                       raw.add(oldValue);
+                               }
+                       }
+                       
+               }
+               
+               byte[] raw_ = raw.toArray();
+               CompressionCodec codec = Compressions.get(Compressions.LZ4);
+               ByteBuffer input = ByteBuffer.wrap(raw_);
+               ByteBuffer output = ByteBuffer.allocate(raw_.length + raw_.length/8);
+               int compressedSize = codec.compressBuffer(input, 0, raw_.length, output, 0);
+
+               // We append inflated size - cannot prepend since decompression cannot handle offsets in input
+               final byte[] rawOutput = new byte[compressedSize+4];
+               output.get(rawOutput,0,compressedSize);
+               Bytes.writeLE(rawOutput, compressedSize, raw_.length);
+
+               release();
+               
+               return Pair.make(rawOutput, rawOutput.length);
+               
+       }
+       
+       @Override
+       void release() {
+               
+               for(ClusterUpdateOperation op : operations) {
+                       op.data = null;
+                       op.ccs = null;
+               }
+
+       }
+
+       static class StreamDecompressor {
+               
+//             byte[] decompressBuffer = new byte[1024*1024];
+
+               public synchronized byte[] decompressBuffer(byte[] compressed) throws IOException {
+                       
+                       int deflatedSize = Bytes.readLE4(compressed, compressed.length-4);
+                       
+                       byte[] result = new byte[deflatedSize];
+                       
+//                     if(decompressBuffer.length < deflatedSize)
+//                             decompressBuffer = new byte[Math.max(3*decompressBuffer.length / 2, deflatedSize)];
+                       
+                       CompressionCodec codec = Compressions.get(Compressions.LZ4);
+                       
+                       ByteBuffer input = ByteBuffer.wrap(compressed);
+                       ByteBuffer output = ByteBuffer.wrap(result);
+
+                       int decompressedSize = codec.decompressBuffer(input, 0, compressed.length-4, output, 0, result.length);
+                       assert(decompressedSize  == deflatedSize);
+                       
+                       return result;
+                       
+               }
+
+               
+       }
+       
+       private static StreamDecompressor decompressor = new StreamDecompressor();
+       
+       @Override
+       public void fromFile(byte[] data_) {
+
+               try {
+                       
+                       byte[] data = decompressor.decompressBuffer(data_);
+                       
+                       operations = new ArrayList<ClusterUpdateOperation>();
+                       
+                       int offset = 0;
+                       int opLen = Bytes.readLE4(data, offset);
+                       offset += 4;
+                       
+                       for(int i=0;i<opLen;i++) {
+                               
+                               int len = Bytes.readLE4(data, offset);
+                               offset += 4;
+                               byte[] bytes  = new byte[len];
+                               System.arraycopy(data, offset, bytes, 0, len);
+                               offset += len;
+                               
+                               ClusterUpdateOperation op = new ClusterUpdateOperation(manager, bytes);
+                               
+                               String ccsKey = getKey() + "." + i;
+                               
+                               op.ccs = new ClusterChangeSet(ccsKey, op.uid);
+                               op.chunk = this;
+                               
+                               int statementMaskLen = Bytes.readLE4(data, offset);
+                               offset += 4;
+                               op.ccs.statementMask = new TByteArrayList(statementMaskLen);
+                               for(int j=0;j<statementMaskLen;j++)
+                                       op.ccs.statementMask.add(data[offset++]);
+                               
+                               int oldValueExLen = Bytes.readLE4(data, offset);
+                               offset += 4;
+                               op.ccs.oldValueEx = new TByteArrayList(oldValueExLen);
+                               for(int j=0;j<oldValueExLen;j++)
+                                       op.ccs.oldValueEx.add(data[offset++]);
+                               
+                               int oldValuesSize = Bytes.readLE4(data, offset);
+                               offset += 4;
+                               
+                               op.ccs.oldValues = new ArrayList<>(oldValuesSize);
+                               for(int j=0;j<oldValuesSize;j++) {
+
+                                       int oldValueSize = Bytes.readLE4(data, offset);
+                                       offset += 4;
+
+                                       if(oldValueSize == -1) {
+                                               op.ccs.oldValues.add(null);
+                                       } else {
+                                               byte[] oldValue = new byte[oldValueSize];
+                                               System.arraycopy(data, offset, oldValue, 0, oldValueSize);
+                                               offset += oldValueSize;
+                                               op.ccs.oldValues.add(oldValue);
+                                       }
+
+                               }
+                               
+                               operations.add(op);
+                               
+                       }
+                       
+               } catch (IOException e) {
+                       
+                       throw new IllegalStateException(e);
+                       
+               }
+               
+       }
+
+       @Override
+       String getExtension() {
+               return "stream";
+       }
+       
+       @Override
+       public String toString() {
+               return  "ClusterUpdateOperationChunk " + getKey();
+       }
+       
+       @Override
+       protected boolean overwrite() {
+               return false;
+       }
+       
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateOperation.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateOperation.java
new file mode 100644 (file)
index 0000000..40a44bc
--- /dev/null
@@ -0,0 +1,91 @@
+package org.simantics.acorn.lru;
+
+import org.simantics.acorn.ClusterManager;
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterChange2;
+import org.simantics.acorn.internal.ClusterUpdateProcessor;
+import org.simantics.acorn.internal.ClusterUpdateProcessor2;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+
+final public class ClusterUpdateOperation {
+       
+       final public ClusterUID uid;
+       final protected ClusterManager manager;
+       final protected ClusterInfo info;
+
+       public byte[] data;
+       
+       public ClusterStreamChunk chunk;
+       public ClusterChangeSet ccs;
+       boolean finished = false;
+
+       public ClusterUpdateOperation(ClusterManager manager, byte[] data) {
+               
+               long cuid1 = Bytes.readLE8(data, 8);
+               long cuid2 = Bytes.readLE8(data, 16);
+
+               this.manager = manager;
+               this.uid = ClusterUID.make(cuid1, cuid2);
+               this.data = data;
+               this.info = manager.clusterLRU.getOrCreate(uid, true);
+                               
+       }
+       
+       public void finish() {
+               finished = true;
+       }
+       
+       public void scheduled(String ccsInfoId) {
+               ccs = new ClusterChangeSet(ccsInfoId, uid);
+               chunk = ccs.getChunk(manager);
+               manager.addIntoCurrentChangeSet(ccsInfoId);
+       }
+       
+       public void run() {
+               ClusterUpdateOperation op = null;
+               byte[] data = null;
+               chunk.acquireMutex();
+               try {
+                       chunk.makeResident();
+                       op = chunk.operations.get(ccs.chunkOffset);
+                       data = op.data;
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       chunk.releaseMutex();
+               }
+               op.runWithData(data);
+       }
+       
+       public void runWithData(byte[] data) {
+               
+               try {
+                       int version = Bytes.readLE4(data, 4);
+                       if(version == ClusterChange.VERSION) {
+                               ClusterUpdateProcessor processor = new ClusterUpdateProcessor(manager, manager.support, data, this);
+                               ClusterImpl cluster = info.getForUpdate();
+                               cluster = processor.process(cluster);
+                               manager.update(uid, cluster);
+                       } else if (version == ClusterChange2.VERSION) {
+                               ClusterUpdateProcessor2 processor = new ClusterUpdateProcessor2(manager.support, data, this);
+                               ClusterImpl cluster = info.getForUpdate();
+                               processor.process(cluster);
+                               manager.update(uid, cluster);
+                       } else {
+                               throw new IllegalStateException();
+                       }
+               } catch (Throwable t) {
+                       t.printStackTrace();
+               }
+               
+       }
+       
+       @Override
+       public String toString() {
+           StringBuilder sb = new StringBuilder();
+           sb.append("ClusterUpdateOperation [uid=").append(uid).append("] [info=").append(info).append("] [ccs=").append(ccs).append("] [chunk=").append("]");
+           return sb.toString();
+       }
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateState.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateState.java
new file mode 100644 (file)
index 0000000..5ce4688
--- /dev/null
@@ -0,0 +1,51 @@
+package org.simantics.acorn.lru;
+
+import java.util.concurrent.Semaphore;
+
+import org.simantics.db.service.ClusterUID;
+
+public class ClusterUpdateState {
+       
+       final ClusterUID uid;
+       final Semaphore lock = new Semaphore(0);
+       int referenceCount = 0;
+       boolean inUpdate = false;
+       
+       ClusterUpdateState(ClusterUID uid) {
+               this.uid = uid;
+       }
+       
+       public void waitForUpdates() {
+               try {
+                       lock.acquire();
+               } catch (InterruptedException e) {
+                       e.printStackTrace();
+               }
+       }
+       
+       public synchronized void beginUpdate() {
+//         System.err.println("ClusterUpdateState.beginUpdate() for " + uid + ", inUpdate=" + inUpdate);
+               assert(!inUpdate);
+               inUpdate = true;
+       }
+       
+       public synchronized void endUpdate() {
+//         System.err.println("ClusterUpdateState.endUpdate() for " + uid + ", inUpdate=" + inUpdate);
+               assert(inUpdate);
+               inUpdate = false;
+       }
+       
+       public synchronized void incRef() {
+               referenceCount++;
+       }
+       
+       public synchronized ClusterUpdateState decRef() {
+               referenceCount--;
+               if(referenceCount == 0) {
+                       lock.release(Integer.MAX_VALUE);
+                       return null;
+               }
+               return this;
+       }
+       
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/FileInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/FileInfo.java
new file mode 100644 (file)
index 0000000..660f245
--- /dev/null
@@ -0,0 +1,139 @@
+package org.simantics.acorn.lru;
+
+import java.nio.file.Path;
+
+import org.simantics.db.Database.Session.ResourceSegment;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.utils.datastructures.Pair;
+
+import gnu.trove.list.array.TByteArrayList;
+
+public class FileInfo extends LRUObject<String, FileInfo> {
+
+       private TByteArrayList bytes;
+       
+       // Stub
+       public FileInfo(LRU<String, FileInfo> LRU, Path readDir, String id, int offset, int length) {
+               super(LRU, id, readDir, id.toString() + ".extFile", offset, length, false, false);
+               LRU.map(this);
+       }
+       
+       // New
+       public FileInfo(LRU<String, FileInfo> LRU, String id, int size) {
+               super(LRU, id, LRU.getDirectory(), id.toString() + ".extFile", true, true);
+               this.bytes = new TByteArrayList(size);
+               LRU.insert(this, accessTime);
+       }
+
+       public byte[] getResourceFile() {
+               
+               if(VERIFY) verifyAccess();
+               
+               makeResident();
+               return bytes.toArray();
+
+       }
+       
+       
+       public ResourceSegment getResourceSegment(final byte[] clusterUID,
+                       final int resourceIndex, final long segmentOffset, short segmentSize)
+                       throws ProCoreException {
+
+               if(VERIFY) verifyAccess();
+
+               makeResident();
+
+               try {
+
+                       int segSize = segmentSize;
+                       if (segSize < 0)
+                               segSize += 65536;
+                       if (segmentSize == -1)
+                               segSize = Math.min(65535, bytes.size());
+
+                       final long valueSize = bytes.size();
+
+                       final byte[] segment = bytes.toArray((int) segmentOffset, segSize);
+
+                       return new ResourceSegment() {
+
+                               @Override
+                               public long getValueSize() {
+                                       return valueSize;
+                               }
+
+                               @Override
+                               public byte[] getSegment() {
+                                       return segment;
+                               }
+
+                               @Override
+                               public int getResourceIndex() {
+                                       return resourceIndex;
+                               }
+
+                               @Override
+                               public long getOffset() {
+                                       return segmentOffset;
+                               }
+
+                               @Override
+                               public byte[] getClusterId() {
+                                       return clusterUID;
+                               }
+                       };
+
+               } catch (Throwable t) {
+
+                       t.printStackTrace();
+
+               }
+
+               throw new UnsupportedOperationException();
+
+       }
+       
+       public void updateData(byte[] newBytes, long offset, long pos, long size) {
+
+               if(VERIFY) verifyAccess();
+               makeResident();
+
+               if(size == 0) {
+                       bytes.remove((int)offset, (int)(bytes.size()-offset));
+               } else {
+                       bytes.fill((int) (offset + size), (int) (offset + size), (byte) 0);
+                       bytes.set((int) offset, newBytes, (int) pos, (int) size);
+               }
+               
+               setDirty();
+               
+       }
+       
+       @Override
+       public Pair<byte[], Integer> toBytes() {
+               byte[] result = bytes.toArray();
+               release();
+               return Pair.make(result, result.length);
+       }
+       
+       @Override
+       protected void release() {
+               bytes = null;
+       }
+
+       @Override
+       public void fromFile(byte[] data) {
+               bytes = new TByteArrayList(data);
+       }
+
+       @Override
+       protected String getExtension() {
+               return "extFile";
+       }
+       
+       @Override
+       protected boolean overwrite() {
+               return true;
+       }
+
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java
new file mode 100644 (file)
index 0000000..508127d
--- /dev/null
@@ -0,0 +1,624 @@
+package org.simantics.acorn.lru;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import org.simantics.acorn.GraphClientImpl2;
+import org.simantics.db.common.utils.Logger;
+
+/*
+ * The order rule of synchronization for LRU and LRUObject is:
+ *   § Always lock LRUObject first!
+ * 
+ */
+
+public class LRU<MapKey,MapValue extends LRUObject<MapKey, MapValue>> {
+       
+       public static boolean VERIFY = true;
+
+       final private long swapTime = 5L*1000000000L;
+       final private int swapSize = 200;
+
+       final private HashMap<MapKey, MapValue> map = new HashMap<MapKey, MapValue>();
+       final private TreeMap<Long, MapKey> priorityQueue = new TreeMap<Long, MapKey>();
+       
+       final private Semaphore mutex = new Semaphore(1);
+       final private String identifier;
+       
+       private Path writeDir;
+       
+       private Thread mutexOwner;
+       
+       public Map<String, WriteRunnable> pending = new HashMap<String, WriteRunnable>();
+       
+       public LRU(String identifier, Path writeDir) {
+               this.identifier = identifier;
+               this.writeDir = writeDir;
+               resume();
+       }
+       
+       /*
+        * Public interface
+        */
+       
+       public void acquireMutex() {
+               
+               try {
+                       
+                       while(!mutex.tryAcquire(3, TimeUnit.SECONDS)) {
+                               System.err.println("Mutex is taking a long time to acquire - owner is " + mutexOwner);
+                       }
+                               
+                       if(VERIFY)
+                               mutexOwner = Thread.currentThread();
+                               
+               } catch (InterruptedException e) {
+                       throw new IllegalStateException(e);
+               }
+       }
+       
+       public void releaseMutex() {
+               mutex.release();
+               mutexOwner = null;
+       }
+
+       public void shutdown() {
+           if (GraphClientImpl2.DEBUG)
+               System.err.println("Shutting down LRU writers " + writers);
+               writers.shutdown();
+               try {
+                       writers.awaitTermination(60, TimeUnit.SECONDS);
+               } catch (InterruptedException e) {
+                       e.printStackTrace();
+               }
+
+       }
+       
+       public void resume() {
+               writers = new ScheduledThreadPoolExecutor(2, new ThreadFactory() {
+                       
+                       @Override
+                       public Thread newThread(Runnable r) {
+                               return new Thread(r, identifier + " File Writer");
+                       }
+                       
+               });
+               if (GraphClientImpl2.DEBUG)
+                   System.err.println("Resuming LRU writers " + writers);
+       }
+
+       /*
+        * This method violates the synchronization order rule between LRU and MapVAlue
+        * External synchronization is used to ensure correct operation
+        */
+       public void persist(ArrayList<String> state) {
+       
+               acquireMutex();
+               
+               try {
+               
+                       for (MapValue value : values()) {
+                               value.acquireMutex();
+                               // for debugging purposes
+                               boolean persisted = false;
+                               try {
+                                       // Persist the value if needed
+                                       persisted = value.persist();
+                               } finally {
+                                       // WriteRunnable may want to 
+                                       value.releaseMutex();
+                               }
+                               // Wait pending if value was actually persisted 
+                               waitPending(value, false);
+                               // Take lock again
+                               value.acquireMutex();
+                               try {
+                                       // Record the value
+                                       state.add(value.getStateKey());
+                               } catch (Throwable t) {
+                                       throw new IllegalStateException(t);
+                               } finally {
+                                       value.releaseMutex();
+                               }
+                       }
+                       
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+
+       }
+
+       public MapValue getWithoutMutex(MapKey key) {
+               
+               acquireMutex();
+               try {
+                       return get(key);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+               
+       }
+
+       public MapValue get(MapKey key) {
+               
+               if(VERIFY) verifyAccess();
+               
+               return map.get(key);
+               
+       }
+
+       public void map(MapValue info) {
+               
+               if(VERIFY) verifyAccess();
+               
+               map.put(info.getKey(), info);
+               
+       }
+
+       public Collection<MapValue> values() {
+               
+               if(VERIFY) verifyAccess();
+               
+               return map.values();
+               
+       }
+
+       public boolean swapForced() {
+               
+               acquireMutex();
+               
+               try {
+                       return swap(0, 0, null);
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+                       releaseMutex();
+               }
+               
+       }
+
+       public boolean swap(long lifeTime, int targetSize) {
+               
+               if(VERIFY) verifyAccess();
+
+               return swap(lifeTime, targetSize, null);
+               
+       }
+
+       /*
+        * This is called under global lock
+        */
+       public void setWriteDir(Path dir) {
+               
+               this.writeDir = dir;
+               
+       }
+
+
+       /*
+        * Package access
+        */
+
+       void insert(MapValue info, long accessTime) {
+               
+               if(VERIFY) verifyAccess();
+
+               map.put(info.getKey(), info);
+               priorityQueue.put(accessTime, info.getKey());
+               
+       }
+
+       /*
+        * We have access to ClusterLRU - try to refresh value if available
+        */
+       boolean tryRefresh(MapValue info) {
+
+               if(VERIFY) verifyAccess();
+               
+               if(!info.tryAcquireMutex())
+                       return false;
+
+               try {
+
+                       priorityQueue.remove(info.getLastAccessTime());
+                       info.accessed();
+                       map.put(info.getKey(), info);
+                       priorityQueue.put(info.getLastAccessTime(), info.getKey());
+                       
+                       return true;
+
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+
+                       info.releaseMutex();
+                       
+               }
+               
+       }
+
+       /*
+        * We have access to MapValue and no access to clusterLRU
+        */
+       void refresh(MapValue info, boolean needMutex) {
+               
+               if(VERIFY) {
+                       if(!needMutex) verifyAccess();
+                       info.verifyAccess();
+               }
+               
+               if(needMutex)
+                       acquireMutex();
+
+               try {
+
+                       priorityQueue.remove(info.getLastAccessTime());
+                       info.accessed();
+                       map.put(info.getKey(), info);
+                       priorityQueue.put(info.getLastAccessTime(), info.getKey());
+
+               } catch (Throwable t) {
+                       throw new IllegalStateException(t);
+               } finally {
+
+                       if(needMutex)
+                               releaseMutex();
+                       
+               }
+               
+       }
+
+       /*
+        * Private implementation
+        */
+
+       public int size() {
+               
+               if(VERIFY) verifyAccess();
+
+               return priorityQueue.size();
+               
+       }
+
+       boolean swap(MapKey excluded) {
+               
+               if(VERIFY) verifyAccess();
+
+               return swap(swapTime, swapSize, excluded);
+               
+       }
+
+       boolean swap(long lifeTime, int targetSize, MapKey excluded) {
+
+               if(VERIFY) verifyAccess();
+
+               MapValue valueToSwap = getValueToSwap(lifeTime, targetSize, excluded);
+               if(valueToSwap != null) {
+                       
+                       if(valueToSwap.tryAcquireMutex()) {
+                               
+                               try {
+                                       
+                                       if(valueToSwap.canBePersisted()) {
+                                               valueToSwap.persist();
+                                               return true;
+                                       }
+                                       
+                               } catch (Throwable t) {
+                                       throw new IllegalStateException(t);
+                               } finally {
+                                       valueToSwap.releaseMutex();
+                               }
+                       }
+                       
+               }
+               
+               return false;
+
+       }
+
+       
+       private MapValue getValueToSwap1(long lifeTime, int targetSize, MapKey excluded) {
+
+               if(VERIFY) verifyAccess();
+
+               for(int i=0;i<10;i++) {
+
+                       long candidate = getSwapCandidate(lifeTime, targetSize);
+                       if(candidate == 0) return null;
+                       
+                       MapKey key = priorityQueue.remove(candidate);
+                       if(key.equals(excluded)) {
+                               tryRefresh(map.get(key));
+                               continue;
+                       }
+                       
+                       return map.get(key);
+                       
+               }
+               
+               return null;
+
+       }
+       
+       
+       private MapValue getValueToSwap(long lifeTime, int targetSize, MapKey excluded) {
+
+               if(VERIFY) verifyAccess();
+
+               for(int i=0;i<10;i++) {
+                       
+                       // Lock LRU and get a candidate
+                       MapValue value = getValueToSwap1(lifeTime, targetSize, excluded);
+                       if(value == null) return null;
+                       
+                       if(value.tryAcquireMutex()) {
+
+                               try {
+                                       
+                                       // This may lock the object
+                                       if(value.canBePersisted()) return value;
+                                       // Insert back the value
+                                       refresh(value, false);
+       
+                               } catch (Throwable t) {
+                                       throw new IllegalStateException(t);
+                               } finally {
+                                       
+                                       value.releaseMutex();
+                                       
+                               }
+                               
+                       }
+                       
+               }
+               
+               return null;
+               
+       }
+       
+       private long getSwapCandidate(long lifeTime, int targetSize) {
+               
+               if(VERIFY) verifyAccess();
+
+               if(priorityQueue.isEmpty()) return 0;
+
+               long currentTime = System.nanoTime();
+               Long lowest = priorityQueue.firstKey();
+
+               if(currentTime - lowest > lifeTime || priorityQueue.size() > targetSize) {
+                       return lowest;
+               }
+
+               return 0;
+
+       }
+       
+       /*
+        * Tries to persist this object. Can fail if the object cannot be persisted at this time.
+        * 
+        */
+       boolean persist(Object object_) {
+               
+               MapValue object = (MapValue)object_;
+               
+               if(VERIFY) object.verifyAccess();
+               
+               if(object.isDirty()) {
+
+                       // It is possible that this just became unpersistable. Fail here in this case.
+                       if(!object.canBePersisted()) {
+                               return false;
+                       }
+
+                       assert(object.isResident());
+
+                       Path f = writeDir.resolve(object.getFileName());
+
+                       WriteRunnable runnable = new WriteRunnable(f, object);
+                       
+                       synchronized(pending) {
+                               WriteRunnable existing = pending.put(object.getKey().toString(), runnable);
+                               assert(existing == null);
+                       }
+
+                       writers.execute(runnable);
+
+                       object.setResident(false);
+                       object.setDirty(false);
+
+                       return true;
+
+               } else if(object.isResident()) {
+
+                       object.release();
+                       object.setResident(false);
+                       return false;
+
+               }
+
+               return false;
+               
+       }
+       
+       int makeResident(Object object_, boolean keepResident) {
+
+               MapValue object = (MapValue)object_;
+
+               if(VERIFY) object.verifyAccess();
+
+               try {
+
+                       object.setForceResident(keepResident);
+
+                       if(object.isResident()) {
+                               refresh(object, true);
+                               return 0;
+                       }
+
+                       waitPending(object, true);
+
+                       byte[] data = object.readFile();
+
+                       object.fromFile(data);
+                       object.setResident(true);
+
+                       acquireMutex();
+                       try {
+                               refresh(object, false);
+                               swap(swapTime, swapSize, object.getKey());
+                       } catch (Throwable t) {
+                               throw new IllegalStateException(t);
+                       } finally {
+                               releaseMutex();
+                       }
+
+                       return data.length;
+
+               } catch (IOException e) {
+                       
+                       e.printStackTrace();
+                       
+               }
+               
+               return 0;
+               
+       }
+
+       static int readCounter = 0;
+       static int writeCounter = 0;
+       
+       ScheduledThreadPoolExecutor writers;
+       
+       void waitPending(MapValue value, boolean hasMutex) {
+               
+               WriteRunnable r = null;
+               boolean inProgress = false;
+               synchronized(pending) {
+                       r = pending.get(value.getKey().toString());
+                       if(r != null) {
+                               synchronized(r) {
+                                       if(r.committed) {
+                                               // just being written - just need to wait
+                                               inProgress = true;
+                                       } else {
+                                               r.committed = true;
+                                               // we do the writing
+                                       }
+                               }
+                       }
+               }
+               if(r != null) {
+                       if(inProgress) {
+//                             System.err.println("reader waits for WriteRunnable to finish");
+                               try {
+                                       r.s.acquire();
+                               } catch (InterruptedException e) {
+                                       e.printStackTrace();
+                               }
+                       } else {
+//                             System.err.println("reader took WriteRunnable");
+                               try {
+                    r.runReally(hasMutex);
+                } catch (Throwable e) {
+                    e.printStackTrace();
+                    Logger.defaultLogError(e);
+                }
+                       }
+               }
+               
+       }
+       
+       public class WriteRunnable implements Runnable {
+
+               Path bytes;
+               MapValue impl;
+               boolean committed = false;
+               private Semaphore s = new Semaphore(0);
+               
+               WriteRunnable(Path bytes, MapValue impl) {
+                       this.bytes = bytes;
+                       this.impl = impl;
+               }
+               
+               @Override
+               public void run() {
+                       synchronized(impl) {
+
+                               synchronized(this) {
+                               
+                                       if(committed) return;
+                                       
+                                       committed = true;
+                               
+                               }
+                   try {
+                       runReally(false);
+                   } catch (Throwable e) {
+                       e.printStackTrace();
+                       Logger.defaultLogError(e);
+                   }
+                       }
+               }
+
+               public void runReally(boolean hasMutex) throws IOException {
+               
+                       if(!hasMutex)
+                               impl.acquireMutex();
+                       
+                       try {
+                               
+                               // These have been set in method persist
+                               assert(!impl.isResident());
+                               assert(!impl.isDirty());
+                               
+                               impl.toFile(bytes);
+
+                               synchronized(pending) {
+                                       pending.remove(impl.getKey().toString());
+                                       s.release(Integer.MAX_VALUE);
+                               }
+                       } finally {
+                               if(!hasMutex)
+                                       impl.releaseMutex();
+                       }
+                       
+               }
+       
+       }
+       
+       public Path getDirectory() {
+               return writeDir;
+       }
+       
+       /*
+        * Protected implementation 
+        * 
+        */
+       
+       protected void verifyAccess() {
+//         assert (mutex.availablePermits() == 0);
+               if (mutex.availablePermits() != 0)
+                   throw new IllegalStateException("identifier=" + identifier + " mutex has " + mutex.availablePermits() + " available permits, should be 0! Current mutexOwner is " + mutexOwner);
+       }
+       
+       /*
+        * Private implementation 
+        * 
+        */
+       
+       
+}
diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java
new file mode 100644 (file)
index 0000000..1079bf5
--- /dev/null
@@ -0,0 +1,236 @@
+package org.simantics.acorn.lru;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+import org.simantics.acorn.FileIO;
+import org.simantics.acorn.Persistable;
+import org.simantics.utils.datastructures.Pair;
+
+public abstract class LRUObject<MapKey, MapValue extends LRUObject<MapKey, MapValue>> implements Persistable {
+       
+       public static boolean VERIFY = true;
+       
+       // Final stuff
+       final protected LRU<MapKey, MapValue> LRU;
+       final private Semaphore mutex = new Semaphore(1);
+       final private MapKey key;
+       final private String fileName;
+       
+       // Mutable stuff
+       protected long accessTime = AccessTime.getInstance().getAccessTime();
+       private int offset;
+       private int length;
+       private boolean resident = true;
+       private boolean dirty = true;
+       private boolean forceResident = false;
+       
+       // DEBUG
+//     private boolean isForceResidentSetAfterLastGet = false;
+       
+       private Path readDirectory;
+
+       private Thread mutexOwner;
+
+       // for loading
+       public LRUObject(LRU<MapKey, MapValue> LRU, MapKey key, Path readDirectory, String fileName, int offset, int length, boolean dirty, boolean resident) {
+               this.LRU = LRU;
+               this.key = key;
+               this.fileName = fileName;
+               this.offset = offset;
+               this.length = length;
+               this.readDirectory = readDirectory;
+               this.dirty = dirty;
+               this.resident = resident;
+       }
+
+       // for creating
+       public LRUObject(LRU<MapKey, MapValue> LRU, MapKey key, Path readDirectory, String fileName, boolean dirty, boolean resident) {
+               this(LRU, key, readDirectory, fileName, -1, -1, dirty, resident);
+       }
+
+       /*
+        * Public interface
+        */
+       public MapKey getKey() {
+               // This can be called without mutex
+               return key;
+       }
+       
+       public void acquireMutex() {
+               
+               try {
+
+                       while(!mutex.tryAcquire(3, TimeUnit.SECONDS)) {
+                               System.err.println("Mutex is taking a long time to acquire - owner is " + mutexOwner);
+                       }
+                       
+                       if(VERIFY)
+                               mutexOwner = Thread.currentThread();
+
+               } catch (InterruptedException e) {
+                       throw new IllegalStateException(e);
+               }
+       }
+       
+       public boolean tryAcquireMutex() {
+               return mutex.tryAcquire();
+       }
+       
+       public void releaseMutex() {
+               mutex.release();
+       }
+
+       @Override
+       public void toFile(Path bytes) throws IOException {
+               if(VERIFY) verifyAccess();
+               Pair<byte[],Integer> pair = toBytes();
+               byte[] data = pair.first;
+               int length = pair.second;
+               FileIO fio = FileIO.get(bytes);
+               int offset = fio.saveBytes(data, length, overwrite());
+               setPosition(offset, length);
+       }
+       
+       public int makeResident() {
+               if(VERIFY) verifyAccess();
+               return LRU.makeResident(this, false);
+       }
+
+       public int makeResident(boolean keepResident) {
+               if(VERIFY) verifyAccess();
+               return LRU.makeResident(this, true);
+       }
+
+       /*
+        * Package implementation details
+        */
+
+       abstract void release();
+       abstract String getExtension();
+       
+       String getStateKey() {
+               String result = getKey().toString() + "#" + getDirectory().getFileName() + "#" + getOffset() + "#" + getLength(); 
+               if(offset == -1)
+                   throw new IllegalStateException(result);
+               return result; 
+       }
+
+       long getLastAccessTime() {
+               if(VERIFY) verifyAccess();
+               return accessTime;
+       }
+       
+       void accessed() {
+               if(VERIFY) verifyAccess();
+               accessTime = AccessTime.getInstance().getAccessTime();
+       }
+       
+       boolean persist() {
+               if(VERIFY) verifyAccess();
+               if(LRU.persist(this)) {
+                       readDirectory = LRU.getDirectory();
+                       return true;
+               } else {
+                       return false;
+               }
+       }
+       
+       void setForceResident(boolean value) {
+        if(VERIFY) verifyAccess();
+        forceResident = value;
+//        isForceResidentSetAfterLastGet = true;
+       }
+       
+       boolean canBePersisted() {
+               if(VERIFY) verifyAccess();
+//             isForceResidentSetAfterLastGet = false;
+               return !forceResident;
+       }
+       
+       boolean isDirty() {
+               if(VERIFY) verifyAccess();
+               return dirty;
+       }
+       
+       boolean isResident() {
+               if(VERIFY) verifyAccess();
+               return resident;
+       }
+       
+       String getFileName() {
+               if(VERIFY) verifyAccess();
+               return fileName;
+       }
+
+       void setResident(boolean value) {
+               if(VERIFY) verifyAccess();
+               resident = value;
+       }
+       
+       void setDirty(boolean value) {
+               if(VERIFY) verifyAccess();
+               dirty = value;
+       }
+       
+       byte[] readFile() throws IOException {
+               if(VERIFY) verifyAccess();
+               Path dir = getDirectory();
+               Path f = dir.resolve(getFileName());
+               FileIO fio = FileIO.get(f);
+               return fio.readBytes(getOffset(), getLength());
+       }
+       
+       /*
+        * Protected implementation details
+        */
+
+       abstract protected boolean overwrite();
+       
+       abstract protected Pair<byte[],Integer> toBytes();
+       
+       protected void setDirty() {
+               if(VERIFY) verifyAccess();
+               dirty = true;
+       }
+       
+       protected void verifyAccess() {
+               assert(mutex.availablePermits() == 0);
+       }
+
+       protected synchronized void cancelForceResident() {
+               setForceResident(false);
+       }
+       
+       /*
+        * Private implementation details
+        */
+       
+       private int getOffset() {
+               if(VERIFY) verifyAccess();
+               return offset;
+       }
+       
+       private int getLength() {
+               if(VERIFY) verifyAccess();
+               return length;
+       }
+       
+       private void setPosition(int offset, int length) {
+               if(VERIFY) verifyAccess();
+               if(offset == -1)
+                   throw new IllegalStateException();
+               this.offset = offset;
+               this.length = length;
+               if(overwrite() && offset > 0)
+                       throw new IllegalStateException();
+       }
+       
+       private Path getDirectory() {
+               if(VERIFY) verifyAccess();
+               return readDirectory;
+       }
+       
+}
\ No newline at end of file
diff --git a/bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java b/bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java
new file mode 100644 (file)
index 0000000..5a96be2
--- /dev/null
@@ -0,0 +1,73 @@
+package org.simantics.db.javacore;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import org.simantics.acorn.InvalidHeadStateException;
+
+public class HeadState implements Serializable {
+
+    private static final long serialVersionUID = -4135031566499790077L;
+
+    public int headChangeSetId = 0;
+    public long transactionId = 1;
+    public long reservedIds = 3;
+
+    public ArrayList<String> clusters = new ArrayList<>();
+    public ArrayList<String> files = new ArrayList<>();
+    public ArrayList<String> stream = new ArrayList<>();
+    public ArrayList<String> cs = new ArrayList<>();
+//    public ArrayList<String> ccs = new ArrayList<String>();
+
+    public static HeadState load(Path directory) throws InvalidHeadStateException {
+        Path f = directory.resolve("head.state");
+        try {
+            byte[] bytes = Files.readAllBytes(f);
+            MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+            int digestLength = sha1.getDigestLength();
+            sha1.update(bytes, digestLength, bytes.length - digestLength);
+            byte[] newChecksum = sha1.digest();
+            if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) {
+                throw new InvalidHeadStateException(
+                        "Checksum " + Arrays.toString(newChecksum) + " does not match excpected "
+                                + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + f.toAbsolutePath());
+            }
+            try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes, digestLength, bytes.length - digestLength))) {
+                HeadState state = (HeadState) ois.readObject();
+                return state;
+            }
+        } catch (IOException i) {
+            return new HeadState();
+        } catch (ClassNotFoundException c) {
+//            throw new Error("HeadState class not found", c);
+            return new HeadState();
+        } catch (NoSuchAlgorithmException e) {
+            throw new Error("SHA-1 Algorithm not found", e);
+        }
+    }
+
+    public static void validateHeadStateIntegrity(Path headState) throws InvalidHeadStateException, IOException {
+        try {
+            byte[] bytes = Files.readAllBytes(headState);
+            MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+            int digestLength = sha1.getDigestLength();
+            sha1.update(bytes, digestLength, bytes.length - digestLength);
+            byte[] newChecksum = sha1.digest();
+            if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) {
+                throw new InvalidHeadStateException(
+                        "Checksum " + Arrays.toString(newChecksum) + " does not match excpected "
+                                + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + headState.toAbsolutePath());
+            }
+        } catch (NoSuchAlgorithmException e) {
+            throw new Error("SHA-1 digest not found, should not happen", e);
+        }
+    }
+}
index f33c2aa2c88c6d4f2834d4c9750d008801d9398a..4ce75ff74df146bc66ae9f010e2d444a02fa6869 100644 (file)
Binary files a/bundles/org.simantics.action.ontology/graph.tg and b/bundles/org.simantics.action.ontology/graph.tg differ
index c4260ad42fef8e740a2d5ec4015c49968167cb11..2a28024a302837e5e154a0d77475359f2025c8d5 100644 (file)
Binary files a/bundles/org.simantics.annotation.ontology/graph.tg and b/bundles/org.simantics.annotation.ontology/graph.tg differ
index 022de1735b3f377a077bbc56b320d9bf79fb835c..ecdc7c302aa463b9939d114ba5cd6b88ce6f02a1 100644 (file)
@@ -1,6 +1,5 @@
 source.. = src/\r
 output.. = bin/\r
-bin.includes = plugin.xml,\\r
-               META-INF/,\\r
+bin.includes = META-INF/,\\r
                .,\\r
                graph.tg\r
index bcb6d85622b9a98b77b9f2afc83ec82e386dc77f..db70f2a0ed037dbf285ff685a32fb952bf744703 100644 (file)
Binary files a/bundles/org.simantics.backup.ontology/graph.tg and b/bundles/org.simantics.backup.ontology/graph.tg differ
index e12972894b34f5cc0fed2723a8dd1980a285c522..6dbe35e69abb0ce87e59349b65fb1eb8a8141d22 100644 (file)
Binary files a/bundles/org.simantics.browsing.ui.ontology/graph.tg and b/bundles/org.simantics.browsing.ui.ontology/graph.tg differ
index e5cfc80f5d03aca0e0442c25485b11a494ddcd84..f7506b993e100f5333fd226cce0de20476dfb547 100644 (file)
Binary files a/bundles/org.simantics.charts.ontology/graph.tg and b/bundles/org.simantics.charts.ontology/graph.tg differ
index 8c7987177fb32145fe27d62a0c0a7585d998ab38..9329d83c4328f68398cabc8418e153d802b09246 100644 (file)
@@ -134,16 +134,20 @@ public abstract class DecompressingInputStream extends InputStream {
         return true;\r
     }\r
 \r
-    private static ByteBuffer ensureBufferSize(ByteBuffer buffer, int minCapacity) {\r
+    private ByteBuffer ensureBufferSize(ByteBuffer buffer, int minCapacity) {\r
         int oldCapacity = buffer != null ? buffer.capacity() : 0;\r
         if (buffer == null || oldCapacity < minCapacity) {\r
             int newCapacity = grow(oldCapacity, minCapacity);\r
             //System.out.println("ensureBufferSize(" + oldCapacity + ", " + minCapacity + "), new capacity " + newCapacity);\r
-            buffer = ByteBuffer.allocateDirect(newCapacity);\r
+            buffer = allocateBuffer(newCapacity);\r
         }\r
         return buffer;\r
     }\r
 \r
+    protected ByteBuffer allocateBuffer(int capacity) {\r
+        return ByteBuffer.allocateDirect(capacity);\r
+    }\r
+    \r
     /**\r
      * @param oldCapacity current capacity of a buffer\r
      * @param minCapacity\r
index dde498a2c38bdcadbe3fb025cce664e2e0575c74..a11579f07e774404394e448daf3a4d4799651512 100644 (file)
@@ -45,8 +45,7 @@
 \r
 package org.simantics.databoard.util;\r
 \r
-import java.nio.charset.Charset;\r
-import java.util.ArrayList;\r
+import java.util.Arrays;\r
 import java.util.List;\r
 \r
 \r
@@ -194,13 +193,14 @@ public final class URIStringUtils {
             return name;\r
     }\r
 \r
-    final private static int HTTP_POSITION = "http://".length();\r
+    final private static String HTTP_PREFIX = "http://";\r
+    final private static int HTTP_POSITION = HTTP_PREFIX.length();\r
 \r
     public static String[] splitURI(String uri) {\r
         int nextPathSeparator = uri.lastIndexOf(URIStringUtils.NAMESPACE_PATH_SEPARATOR);\r
         if (nextPathSeparator == -1) return null;\r
         if (nextPathSeparator == HTTP_POSITION - 1) {\r
-            if(uri.startsWith("http://")) return new String[] { "http://", uri.substring(HTTP_POSITION, uri.length()) };\r
+            if(uri.startsWith(HTTP_PREFIX)) return new String[] { HTTP_PREFIX, uri.substring(HTTP_POSITION, uri.length()) };\r
             else return null;\r
         }\r
         return new String[] {\r
@@ -208,12 +208,10 @@ public final class URIStringUtils {
                 uri.substring(nextPathSeparator + 1, uri.length())\r
         };\r
     }\r
-    \r
+\r
     public static List<String> splitURISCL(String uri) {\r
         String[] result = splitURI(uri);\r
-        ArrayList<String> list = new ArrayList<String>(result.length);\r
-        for(String s : result) list.add(s);\r
-        return list;\r
+        return Arrays.asList(result);\r
     }\r
 \r
     /**\r
@@ -263,8 +261,7 @@ public final class URIStringUtils {
     public static String escapeURI(String localName) {\r
         if (localName == null)\r
             throw new NullPointerException("null local name");\r
-        String result = encode(localName);\r
-        return result;\r
+        return encode(localName);\r
     }\r
 \r
     /**\r
@@ -276,8 +273,7 @@ public final class URIStringUtils {
      * @return the joined namespace\r
      */\r
     public static String appendURINamespace(String namespace, String suffix) {\r
-        //return namespace + NAMESPACE_PATH_SEPARATOR + suffix;\r
-        return new StringBuffer(namespace.length() + 1 + suffix.length())\r
+        return new StringBuilder(namespace.length() + 1 + suffix.length())\r
         .append(namespace)\r
         .append(NAMESPACE_PATH_SEPARATOR)\r
         .append(suffix)\r
@@ -293,9 +289,8 @@ public final class URIStringUtils {
      * @return the joined URI\r
      */\r
     public static String makeURI(String namespace, String localName) {\r
-        //return namespace + NAMESPACE_LOCAL_SEPARATOR + escapeURI(localName);\r
         String escapedLocalName = escapeURI(localName);\r
-        return new StringBuffer(namespace.length() + 1 + escapedLocalName.length())\r
+        return new StringBuilder(namespace.length() + 1 + escapedLocalName.length())\r
         .append(namespace)\r
         .append(NAMESPACE_LOCAL_SEPARATOR)\r
         .append(escapedLocalName)\r
@@ -332,94 +327,59 @@ public final class URIStringUtils {
     }\r
 \r
 \r
-    final private static Charset UTF8 = Charset.forName("UTF-8");\r
-    final private static Charset ASCII = Charset.forName("US-ASCII");\r
-\r
-    /* Copied and modified from Jena 2.4 com.hp.hpl.jena.util.URIref */\r
-    private static String encode(String unicode) {\r
-        boolean needsEscapes = needsEscaping(unicode);\r
-        if (!needsEscapes)\r
-            return unicode;\r
-\r
-        byte utf8[] = unicode.getBytes(UTF8);\r
-        byte rsltAscii[] = new byte[utf8.length * 6];\r
-        int in = 0;\r
-        int out = 0;\r
-        while (in < utf8.length) {\r
-            switch (utf8[in]) {\r
-                case (byte)'a': case (byte)'b': case (byte)'c': case (byte)'d': case (byte)'e': case (byte)'f': case (byte)'g': case (byte)'h': case (byte)'i': case (byte)'j': case (byte)'k': case (byte)'l': case (byte)'m': case (byte)'n': case (byte)'o': case (byte)'p': case (byte)'q': case (byte)'r': case (byte)'s': case (byte)'t': case (byte)'u': case (byte)'v': case (byte)'w': case (byte)'x': case (byte)'y': case (byte)'z':\r
-                case (byte)'A': case (byte)'B': case (byte)'C': case (byte)'D': case (byte)'E': case (byte)'F': case (byte)'G': case (byte)'H': case (byte)'I': case (byte)'J': case (byte)'K': case (byte)'L': case (byte)'M': case (byte)'N': case (byte)'O': case (byte)'P': case (byte)'Q': case (byte)'R': case (byte)'S': case (byte)'T': case (byte)'U': case (byte)'V': case (byte)'W': case (byte)'X': case (byte)'Y': case (byte)'Z':\r
-                case (byte)'0': case (byte)'1': case (byte)'2': case (byte)'3': case (byte)'4': case (byte)'5': case (byte)'6': case (byte)'7': case (byte)'8': case (byte)'9':\r
-                case (byte)';': case (byte)'?': case (byte)':': case (byte)'@': case (byte)'=': case (byte)'+': case (byte)'$': case (byte)',':\r
-                case (byte)'-': case (byte)'_': case (byte)'.': case (byte)'!': case (byte)'~': case (byte)'*': case (byte)'\'': case (byte)'(': case (byte)')':\r
-                case (byte)'[': case (byte)']':\r
-                    rsltAscii[out] = utf8[in];\r
-                    out++;\r
-                    in++;\r
-                    break;\r
-                case (byte)' ':\r
-                    rsltAscii[out++] = (byte) '%';\r
-                    rsltAscii[out++] = '2';\r
-                    rsltAscii[out++] = '0';\r
-                    in++;\r
-                    break;\r
-                case (byte) '%':\r
-                    // [lehtonen] NOTE: all input needs to be escaped, i.e. "%01" should result in "%2501", not "%01".\r
-                    // escape+unescape is a bijection, not an idempotent operation. \r
-                    // Fall through to to escape '%' as '%25'\r
-                case (byte) '#':\r
-                case (byte) '/':\r
-                    // Fall through to escape '/'\r
-                case (byte)'&':\r
-                    // Fall through to escape '&' characters to avoid them\r
-                    // being interpreted as SGML entities.\r
-                default:\r
-                    rsltAscii[out++] = (byte) '%';\r
-                    // Get rid of sign ...\r
-                    int c = (utf8[in]) & 255;\r
-                    rsltAscii[out++] = hexEncode(c / 16);\r
-                    rsltAscii[out++] = hexEncode(c % 16);\r
-                    in++;\r
-                    break;\r
-            }\r
-        }\r
-        return new String(rsltAscii, 0, out, ASCII);\r
-    }\r
-\r
     /*\r
      * RFC 3986 section 2.2 Reserved Characters (January 2005)\r
      * !*'();:@&=+$,/?#[]\r
      */\r
-    private static boolean needsEscaping(String unicode) {\r
+    private static boolean[] ESCAPED_US_ASCII_CHARS = new boolean[128];\r
+\r
+    static {\r
+        ESCAPED_US_ASCII_CHARS[' '] = true;\r
+        // IMPORTANT NOTE: every time escape is invoked, all input needs to be escaped,\r
+        // i.e. escape("%01") should result in "%2501", not "%01".\r
+        // escape and unescape form a bijection, where neither\r
+        // of them is an idempotent operation. \r
+        ESCAPED_US_ASCII_CHARS['%'] = true;\r
+        // '#' and '/' are URL segment/fragment delimiters, need to be escaped in names.\r
+        ESCAPED_US_ASCII_CHARS['#'] = true;\r
+        ESCAPED_US_ASCII_CHARS['/'] = true;\r
+        // Escape '&' characters to avoid them being interpreted as SGML entities.\r
+        ESCAPED_US_ASCII_CHARS['&'] = true;\r
+    }\r
+\r
+    private static int needsEscaping(String unicode) {\r
         int len = unicode.length();\r
+        int escapeCount = 0;\r
         for (int i = 0; i < len; ++i) {\r
-            switch (unicode.charAt(i)) {\r
-                case (byte)'!':\r
-                case (byte)'*':\r
-                case (byte)'\'':\r
-                case (byte)'(':\r
-                case (byte)')':\r
-                case (byte)';':\r
-                case (byte)':':\r
-                case (byte)'@':\r
-                case (byte)'=': \r
-                case (byte)'+':\r
-                case (byte)'$':\r
-                case (byte)',':\r
-                case (byte)'?':\r
-                case (byte)'~':\r
-                case (byte)'[':\r
-                case (byte)']':\r
-                    break;\r
-                case (byte)' ':\r
-                case (byte) '#':\r
-                case (byte) '%':\r
-                case (byte) '/':\r
-                case (byte)'&':\r
-                    return true;\r
+            char ch = unicode.charAt(i);\r
+            if (ch < 128 && ESCAPED_US_ASCII_CHARS[ch])\r
+                ++escapeCount;\r
+        }\r
+        return escapeCount;\r
+    }\r
+\r
+    private static String encode(String unicode) {\r
+        int needsEscapes = needsEscaping(unicode);\r
+        if (needsEscapes == 0)\r
+            return unicode;\r
+\r
+        int len = unicode.length();\r
+        char result[] = new char[(len - needsEscapes) + needsEscapes * 3];\r
+        int in = 0;\r
+        int out = 0;\r
+        while (in < len) {\r
+            char inCh = unicode.charAt(in++);\r
+            if (inCh >= 128 || !ESCAPED_US_ASCII_CHARS[inCh]) {\r
+                result[out++] = inCh;\r
+            } else {\r
+                // Only selected 7-bit US-ASCII characters are escaped\r
+                int c = inCh & 255;\r
+                result[out++] = '%';\r
+                result[out++] = (char) hexEncode(c / 16);\r
+                result[out++] = (char) hexEncode(c % 16);\r
             }\r
         }\r
-        return false;\r
+        return new String(result, 0, out);\r
     }\r
 \r
     private static boolean needsUnescaping(String unicode) {\r
@@ -427,13 +387,12 @@ public final class URIStringUtils {
     }\r
 \r
     /**\r
-     * Convert a URI, in US-ASCII, with escaped characters taken from UTF-8, to\r
-     * the corresponding Unicode string. On ill-formed input the results are\r
-     * undefined, specifically if the unescaped version is not a UTF-8 String,\r
-     * some String will be returned.\r
+     * Convert a URI, in UTF-16 with escaped characters taken from US-ASCII, to\r
+     * the corresponding unescaped Unicode string. On ill-formed input the results are\r
+     * undefined.\r
      * \r
      * @param uri the uri, in characters specified by RFC 2396 + '#'.\r
-     * @return the corresponding Unicode String.\r
+     * @return the corresponding unescaped Unicode String.\r
      * @exception IllegalArgumentException if a % hex sequence is ill-formed.\r
      */\r
     public static String unescape(String uri) {\r
@@ -441,26 +400,29 @@ public final class URIStringUtils {
             if (!needsUnescaping(uri))\r
                 return uri;\r
 \r
-            byte ascii[] = uri.getBytes("US-ASCII");\r
-            byte utf8[] = new byte[ascii.length];\r
+            int len = uri.length();\r
+            String unicode = uri;\r
+            char result[] = new char[len];\r
             int in = 0;\r
             int out = 0;\r
-            while ( in < ascii.length ) {\r
-                if (ascii[in] == (byte) '%') {\r
-                    in++;\r
-                    utf8[out++] = (byte) (hexDecode(ascii[in]) * 16 | hexDecode(ascii[in + 1]));\r
+            while (in < len) {\r
+                char inCh = unicode.charAt(in++);\r
+                if (inCh == '%') {\r
+                    char d1 = unicode.charAt(in);\r
+                    char d2 = unicode.charAt(in+1);\r
+                    if (d1 > 127 || d2 > 127)\r
+                        throw new IllegalArgumentException("Invalid hex digit escape sequence in " + uri + " at " + in);\r
+                    result[out++] = (char) (hexDecode((byte) d1) * 16 | hexDecode((byte) d2));\r
                     in += 2;\r
                 } else {\r
-                    utf8[out++] = ascii[in++];\r
+                    result[out++] = inCh;\r
                 }\r
             }\r
-            return new String(utf8, 0, out, "UTF-8");\r
+            return new String(result, 0, out);\r
         } catch (IllegalArgumentException e) {\r
             throw new IllegalArgumentException("Problem while unescaping string: " + uri, e);\r
-        } catch (java.io.UnsupportedEncodingException e) {\r
-            throw new Error("The JVM is required to support UTF-8 and US-ASCII encodings.");\r
-        } catch (ArrayIndexOutOfBoundsException ee) {\r
-            throw new IllegalArgumentException("Incomplete Hex escape sequence in " + uri);\r
+        } catch (IndexOutOfBoundsException ee) {\r
+            throw new IllegalArgumentException("Incomplete hex digit escape sequence in " + uri);\r
         }\r
     }\r
 \r
@@ -491,38 +453,36 @@ public final class URIStringUtils {
      * @param args\r
      */\r
     public static void main(String[] args) {\r
-        String s;\r
-        s = "http://www.vtt.fi%2FSome- %25 Namespace/Jotain";\r
-        System.out.println(String.format("escape+unescape: %s -> %s -> %s", s, escape(s), unescape(escape(s))));\r
-        s = "http://www.vtt.fi%2FPSK";\r
-        System.out.println(String.format("unescape: %s -> %s", s, unescape(s)));\r
-        s = "http://www.vtt.fi%2FSome-Namespace/Jotain / Muuta";\r
-        System.out.println(String.format("escape: %s -> %s", s, escape(s)));\r
-        s = "Jotain / Muuta";\r
-        System.out.println(String.format("escape: %s -> %s", s, escape(s)));\r
-\r
-        System.out.println("escapeURI: " + escapeURI("foo/bar/org%2Fnet"));\r
-        System.out.println("escapeURI('...#...'): " + escapeURI("foo/bar#org%2Fnet"));\r
-        s = makeURI("http://foo.bar.com/foo/bar", "baz/guuk/org%2Fnet");\r
+        String s = makeURI("http://foo.bar.com/foo/bar", "baz/guuk/org%2Fnet");\r
         System.out.println("escapeURI: " + s);\r
         System.out.println("getNamespace: " + getNamespace(s));\r
         System.out.println("getLocalName: " + getLocalName(s));\r
 \r
+        System.out.println("escapeURI: " + escapeURI("foo/bar/org%2Fnet"));\r
+        System.out.println("escapeURI('...#...'): " + escapeURI("foo/bar#org%2Fnet"));\r
+\r
         testEscape("/", "%2F");\r
         testEscape("#", "%23");\r
         testEscape("%", "%25");\r
         testEscape("%01", "%2501");\r
         testEscape("%GG", "%25GG");\r
+        testEscape("säätö venttiili", "säätö%20venttiili");\r
+        testEscape("säätö", "säätö");\r
+        testEscape("Something / Else", "Something%20%2F%20Else");\r
+        testEscape("http://www.vtt.fi%2FSome- %25 Namespace/Something", "http:%2F%2Fwww.vtt.fi%252FSome-%20%2525%20Namespace%2FSomething");\r
+        testEscape("http://www.vtt.fi/PSK", "http:%2F%2Fwww.vtt.fi%2FPSK");\r
+        testEscape("http://www.vtt.fi%2FSome-Namespace/Something / Else", "http:%2F%2Fwww.vtt.fi%252FSome-Namespace%2FSomething%20%2F%20Else");\r
     }\r
 \r
     private static void testEscape(String unescaped, String expectedEscaped) {\r
         String esc = escape(unescaped);\r
         String unesc = unescape(esc);\r
-        System.out.format("escape('%s')='%s', unescape('%s')='%s'\n", unescaped, esc, esc, unesc);\r
+        System.out.format("escape('%s') -> '%s', unescape('%s') -> '%s'", unescaped, esc, esc, unesc);\r
         if (!esc.equals(expectedEscaped))\r
             throw new AssertionError("escape('" + unescaped + "') was expected to return '" + expectedEscaped + "' but returned '" + esc + "'");\r
         if (!unesc.equals(unescaped))\r
             throw new AssertionError("unescape(escape('" + unescaped + "'))=unescape(" + esc + ") was expected to return '" + unescaped + "' but returned '" + unesc + "'");\r
+        System.out.println(" OK");\r
     }\r
 \r
 }\r
index df7545aff2d4c674bde9f34ea4e117051f754909..d4a8ba78f9ac0f0e21123af4aecdc4687160f3aa 100644 (file)
Binary files a/bundles/org.simantics.datatypes.ontology/graph.tg and b/bundles/org.simantics.datatypes.ontology/graph.tg differ
index d74cdea3664d0d48d24f5f739c10d1c00315c80e..f86c1499ddee0458013e7207a7109ca56ae6c86d 100644 (file)
@@ -6,7 +6,9 @@ Bundle-Version: 1.1.0.qualifier
 Bundle-Vendor: VTT Technical Research Centre of Finland\r
 Require-Bundle: org.eclipse.core.runtime,\r
  org.simantics.db.layer0;bundle-version="0.8.0",\r
- org.apache.lucene4;bundle-version="4.9.0",\r
+ org.apache.lucene4.core;bundle-version="4.9.0",\r
+ org.apache.lucene4.queryparser;bundle-version="4.9.0",\r
+ org.apache.lucene4.analyzers-common;bundle-version="4.9.0",\r
  org.simantics.db.services;bundle-version="0.8.0"\r
 Bundle-RequiredExecutionEnvironment: JavaSE-1.8\r
 Export-Package: org.simantics.db.indexing\r
index 54b892070e70e4e0c28f534fe3ec98e5e13018eb..9b0438042adf439991a5ffb939a2458563cedb12 100644 (file)
@@ -8,38 +8,38 @@ import org.simantics.db.procore.ProCoreDriver;
 \r
 public final class ProCoreUserAgent implements DatabaseUserAgent {\r
     private static Shell getShell() {\r
-        Shell shell = null;\r
-        Display d = getDisplay();\r
-        if (d == null)\r
-            return null;\r
-        shell = d.getActiveShell();\r
-        if (null == shell) {\r
-            Shell[] shells = d.getShells();\r
-            if (null != shells && shells.length > 0)\r
-                shell = shells[0];\r
-        }\r
-        return shell;\r
+       Shell shell = null;\r
+       Display d = getDisplay();\r
+       if (d == null)\r
+           return null;\r
+       shell = d.getActiveShell();\r
+       if (null == shell) {\r
+           Shell[] shells = d.getShells();\r
+           if (null != shells && shells.length > 0)\r
+               shell = shells[0];\r
+       }\r
+       return shell;\r
     }\r
     private static Display getDisplay() {\r
-        Display d = Display.getCurrent();\r
-        if (d == null)\r
-            d = Display.getDefault();\r
-        return d;\r
+       Display d = Display.getCurrent();\r
+       if (d == null)\r
+           d = Display.getDefault();\r
+       return d;\r
     }\r
-    @Override\r
-    public boolean handleStart(InternalException exception) {\r
-        Shell shell = getShell();\r
-        if (null == shell)\r
-            return false; // no can do\r
-        try {\r
-            return Auxiliary.handleStart(shell, exception);\r
-        } catch (InternalException e) {\r
-            return false; // no could do\r
-        }\r
-    }\r
-    \r
-    @Override\r
-    public String getId() {\r
-        return ProCoreDriver.ProCoreDriverName;\r
-    }\r
-}
\ No newline at end of file
+        @Override\r
+       public boolean handleStart(InternalException exception) {\r
+           Shell shell = getShell();\r
+           if (null == shell)\r
+               return false; // no can do\r
+           try {\r
+               return Auxiliary.handleStart(shell, exception);\r
+           } catch (InternalException e) {\r
+               return false; // no could do\r
+           }\r
+       }\r
+\r
+        @Override\r
+       public String getId() {\r
+           return ProCoreDriver.ProCoreDriverName;\r
+       }\r
+}\r
index 02d354a200add9f6f65a34d7c193934534d0089a..33a3871fc992a5906d0066b237bca8b2a6dec512 100644 (file)
Binary files a/bundles/org.simantics.desktop.ui.ontology/graph.tg and b/bundles/org.simantics.desktop.ui.ontology/graph.tg differ
index 0a440442c852641a211991fcb03ae32a57948373..eda90ce84ae96db74f5a8ad3256925185924be75 100644 (file)
Binary files a/bundles/org.simantics.diagram.ontology/graph.tg and b/bundles/org.simantics.diagram.ontology/graph.tg differ
index f98ba9855cd771ae8a9bb2c6abf3e623985a67c7..c321d212ce1dec3ce2362c7fcce3f2b035766e45 100644 (file)
Binary files a/bundles/org.simantics.document.base.ontology/graph.tg and b/bundles/org.simantics.document.base.ontology/graph.tg differ
index 626013f6a5d172b72a5f17d13b8d543fe6b82912..0a92a9653ccf8a0131436a5ac62a76761eed7110 100644 (file)
Binary files a/bundles/org.simantics.document.linking.ontology/graph.tg and b/bundles/org.simantics.document.linking.ontology/graph.tg differ
index 9c1b455d7f5e920431e54b54d7243bc385d845be..aadabaa9a92d5782509edce10315771b1e9dbcf2 100644 (file)
Binary files a/bundles/org.simantics.document.ontology/graph.tg and b/bundles/org.simantics.document.ontology/graph.tg differ
index e54faf72706b39259fbe810d0fcf242cc3d49751..fb597cc1cac5634dce3932cbda5b52645e80d882 100644 (file)
Binary files a/bundles/org.simantics.document.swt.ontology/graph.tg and b/bundles/org.simantics.document.swt.ontology/graph.tg differ
index 9153239bf0f77ec48ca6ba8c58abb412129e0dc3..357a686bda6c0ff3a1020104ec07593db5499560 100644 (file)
Binary files a/bundles/org.simantics.dublincore.ontology/graph.tg and b/bundles/org.simantics.dublincore.ontology/graph.tg differ
index 299e8e19c0750ea42593758624e4a0ea1ff0fd03..c7d1057251e080430afe0c4c1f707d401188461a 100644 (file)
@@ -24,38 +24,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r
-THE SOFTWARE.\r
-\r
-\r
-LZ4  native/lz4{,hc}.{c,h}\r
----------------------------------------------------------------------\r
-   LZ4 - Fast LZ compression algorithm\r
-   Copyright (C) 2011-2012, Yann Collet.\r
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\r
-\r
-   Redistribution and use in source and binary forms, with or without\r
-   modification, are permitted provided that the following conditions are\r
-   met:\r
-\r
-       * Redistributions of source code must retain the above copyright\r
-   notice, this list of conditions and the following disclaimer.\r
-       * Redistributions in binary form must reproduce the above\r
-   copyright notice, this list of conditions and the following disclaimer\r
-   in the documentation and/or other materials provided with the\r
-   distribution.\r
-\r
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
-   You can contact the author at :\r
-   - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html\r
-   - LZ4 source repository : http://code.google.com/p/lz4/\r
+THE SOFTWARE.
\ No newline at end of file
index 134b4e1115c0fa6fbdded421502aeeb5f85f5179..8f9e06cd31bd008d4727a44070032a9c6a098b67 100644 (file)
@@ -1,12 +1,12 @@
 The native/ -directory contains the sources for the\r
-native parts of the FastLZ and LZ4 compressions algorithms.\r
+native parts of the FastLZ compression algorithm.\r
 \r
 To compile in the native libraries follow these instructions:\r
 \r
 == Windows ==\r
 \r
-* Install MS Visual Studio 2010 with C++ support and Microsoft SDK 7.1 to get 64-bit compiler\r
-* Open native/vs2010/fastlz.sln in visual studio\r
+* Install MS Visual Studio 2012 with C++ support and Microsoft Platform SDK for Windows Server 2003 R2 to get 64-bit compiler\r
+* Open native/vs2012/fastlz.sln in Visual Studio 2012\r
 * Select ''Batch Build'' from the solution context menu, select Win32 + x64 Release and press ''Rebuild''\r
 * The build will copy the resulting fastlz-windows-{x86,x86_64}.dll into src/\r
 \r
diff --git a/bundles/org.simantics.fastlz/native/Makefile b/bundles/org.simantics.fastlz/native/Makefile
deleted file mode 100644 (file)
index 8a2696a..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-OS := $(shell uname)
-
-ifeq ($(OS),Linux)
-  OUTPUT32  = lz4demo32
-  OUTPUT64  = lz4demo64
-else
-  OUTPUT32  = LZ4Demo32.exe
-  OUTPUT64  = LZ4Demo64.exe
-endif
-
-all: lz4demo64 lz4demo32 
-
-lz4demo64: lz4.c lz4.h lz4hc.c lz4hc.h bench.c lz4demo.c
-       gcc      -O3 -I. -std=c99 -Wall -W -Wundef -Wno-implicit-function-declaration lz4hc.c lz4.c bench.c lz4demo.c -o $(OUTPUT64)
-
-lz4demo32: lz4.c lz4.h lz4hc.c lz4hc.h bench.c lz4demo.c
-       gcc -m32 -Os -march=native -I. -std=c99 -Wall -W -Wundef -Wno-implicit-function-declaration lz4hc.c lz4.c bench.c lz4demo.c -o $(OUTPUT32)
-
-clean:
-       rm -f core *.o $(OUTPUT32) $(OUTPUT64)
index c8628b88af424fb2a202a57ed14abc919f9371b7..2eb226eb099ace386eb5742975f98ba38b3c7e01 100644 (file)
@@ -11,6 +11,6 @@
 @rem ***************************************************************************\r
 @echo off\r
 \r
-cl /O2 /Oi /GL /I "%JAVA_HOME%/include/win32" /I "%JAVA_HOME%/include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_USRDLL" /D "VC64_EXPORTS" /D "_UNICODE" /D "UNICODE" /D "_WINDLL" /EHsc /LD /Gy /GS- /W3 /nologo /c /Zi /TC /errorReport:prompt fastlz.c lz4.c lz4hc.c jniWrapper.c\r
+cl /O2 /Oi /GL /I "%JAVA_HOME%/include/win32" /I "%JAVA_HOME%/include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_USRDLL" /D "VC64_EXPORTS" /D "_UNICODE" /D "UNICODE" /D "_WINDLL" /EHsc /LD /Gy /GS- /W3 /nologo /c /Zi /TC /errorReport:prompt fastlz.c jniWrapper.c\r
 \r
-link /OUT:"..\src\fastlz-windows-x86_64.dll" /INCREMENTAL:NO /NOLOGO /DLL /MANIFEST /MANIFESTFILE:"..\fastlz-windows-x86_64.dll.intermediate.manifest" /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /PDB:"fastlz-windows-x86_64.pdb" /SUBSYSTEM:WINDOWS /OPT:REF /OPT:ICF /LTCG /DYNAMICBASE /NXCOMPAT /MACHINE:X64 /ERRORREPORT:PROMPT kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib fastlz.obj lz4.obj lz4hc.obj jniWrapper.obj \r
+link /OUT:"..\src\fastlz-windows-x86_64.dll" /INCREMENTAL:NO /NOLOGO /DLL /MANIFEST /MANIFESTFILE:"..\fastlz-windows-x86_64.dll.intermediate.manifest" /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /PDB:"fastlz-windows-x86_64.pdb" /SUBSYSTEM:WINDOWS /OPT:REF /OPT:ICF /LTCG /DYNAMICBASE /NXCOMPAT /MACHINE:X64 /ERRORREPORT:PROMPT kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib fastlz.obj jniWrapper.obj \r
index 0e95707863877681211fcbf57b436b736b11fa7e..0141073b7b87f2840f3187f50b7d117201efcfd0 100644 (file)
@@ -11,6 +11,6 @@
 @rem ***************************************************************************\r
 @echo off\r
 \r
-cl /O2 /Oi /GL /I "%JAVA_HOME%/include/win32" /I "%JAVA_HOME%/include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_USRDLL" /D "_UNICODE" /D "UNICODE" /D "_WINDLL" /EHsc /LD /Gy /GS- /W3 /nologo /c /Zi /TC /errorReport:prompt fastlz.c lz4.c lz4hc.c jniWrapper.c\r
+cl /O2 /Oi /GL /I "%JAVA_HOME%/include/win32" /I "%JAVA_HOME%/include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_USRDLL" /D "_UNICODE" /D "UNICODE" /D "_WINDLL" /EHsc /LD /Gy /GS- /W3 /nologo /c /Zi /TC /errorReport:prompt fastlz.c jniWrapper.c\r
 \r
-link /OUT:"..\src\fastlz-windows-x86.dll" /INCREMENTAL:NO /NOLOGO /DLL /MANIFEST /MANIFESTFILE:"..\fastlz-windows-x86.dll.intermediate.manifest" /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /PDB:"fastlz-windows-x86.pdb" /SUBSYSTEM:WINDOWS /OPT:REF /OPT:ICF /LTCG /DYNAMICBASE /NXCOMPAT /MACHINE:X86 /ERRORREPORT:PROMPT kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib fastlz.obj lz4.obj lz4hc.obj jniWrapper.obj \r
+link /OUT:"..\src\fastlz-windows-x86.dll" /INCREMENTAL:NO /NOLOGO /DLL /MANIFEST /MANIFESTFILE:"..\fastlz-windows-x86.dll.intermediate.manifest" /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /PDB:"fastlz-windows-x86.pdb" /SUBSYSTEM:WINDOWS /OPT:REF /OPT:ICF /LTCG /DYNAMICBASE /NXCOMPAT /MACHINE:X86 /ERRORREPORT:PROMPT kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib fastlz.obj jniWrapper.obj \r
index 22968b87cc2b8d3bc1e29ec2d6b48b59e7c7999c..7904b51986172cf83527f63f3c2320ed31666fdd 100644 (file)
@@ -9,6 +9,6 @@
 @rem Contributors:\r
 @rem     VTT Technical Research Centre of Finland - initial API and implementation\r
 @rem ***************************************************************************\r
-rem gcc -mno-cygwin "-I$jdk/include" "-I$jdk$jdk/include/win32" -Wl,--add-stdcall-alias -shared -o ../src/fastlz-windows-x86.dll fastlz.c lz4.c lz4hc.c jniWrapper.c\r
-gcc -mno-cygwin "-I%JAVA_HOME%/include" "-I%JAVA_HOME%/include/win32" -Wl,--add-stdcall-alias -shared -o ../src/fastlz-windows-x86.dll fastlz.c lz4.c lz4hc.c jniWrapper.c\r
+rem gcc -mno-cygwin "-I$jdk/include" "-I$jdk$jdk/include/win32" -Wl,--add-stdcall-alias -shared -o ../src/fastlz-windows-x86.dll fastlz.c jniWrapper.c\r
+gcc -mno-cygwin "-I%JAVA_HOME%/include" "-I%JAVA_HOME%/include/win32" -Wl,--add-stdcall-alias -shared -o ../src/fastlz-windows-x86.dll fastlz.c jniWrapper.c\r
 gcc -o fastlz_test.exe fastlz.c fastlz_read.c fastlz_write.c fastlz_test.c\r
index 05aac11f921d6002be1d1903f9ee984dd35adb09..21899d5dda761c5123b1539cb6b00b9241ab0a51 100755 (executable)
@@ -54,7 +54,7 @@ output="../src/libfastlz-${kernel}-${arch}"
 
 case $kernel in
     darwin*)
-       output="${output}.jnilib"
+       output="${output}.dylib"
        ;;
     *)
        output="${output}.so"
@@ -65,7 +65,7 @@ echo "Architecture: $arch"
 echo "Output library: $output"
 echo "Compiler options: $options"
 
-gcc ${options} -o ${output} fastlz.c lz4.c lz4hc.c jniWrapper.c
+gcc ${options} -o ${output} fastlz.c jniWrapper.c
 
 size=`ls -l $output | cut -d " " -f 5`
 echo "library size before stripping: $size"
index 0c696e1f5ae5b83d73b3c43670b67b9886c93ab1..6f497627a2907606121c3e701714d46831dfcf18 100644 (file)
@@ -87,71 +87,3 @@ JNIEXPORT jint JNICALL Java_org_simantics_fastlz_FastLZ_decompressCluster(JNIEnv
 \r
 }\r
 \r
-JNIEXPORT jint JNICALL Java_org_simantics_fastlz_LZ4_compress(JNIEnv* env, jclass clazz, \r
-                                                              jobject input, jint inputOffset, jint length, \r
-                                                              jobject output, jint outputOffset) {\r
-    void* inputAddress = (char*)(*env)->GetDirectBufferAddress(env, input) + inputOffset;\r
-    void* outputAddress = (char*)(*env)->GetDirectBufferAddress(env, output) + outputOffset;\r
-    return LZ4_compress(inputAddress, outputAddress, length);\r
-}\r
-\r
-JNIEXPORT jint JNICALL Java_org_simantics_fastlz_LZ4_decompress(JNIEnv* env, jclass clazz, \r
-                                                                jobject input, jint inputOffset, jint length, \r
-                                                                jobject output, jint outputOffset, jint maxout) {\r
-    void* inputAddress = (char*)(*env)->GetDirectBufferAddress(env, input) + inputOffset;\r
-    void* outputAddress = (char*)(*env)->GetDirectBufferAddress(env, output) + outputOffset;\r
-       return LZ4_uncompress_unknownOutputSize(inputAddress, outputAddress, length, maxout);\r
-}\r
-\r
-JNIEXPORT jint JNICALL Java_org_simantics_fastlz_LZ4_decompressCluster(JNIEnv* env, jclass clazz, jobject deflated, jint deflatedSize, jint inflatedSize, jobjectArray arrays) {\r
-\r
-       static char *inflateBuffer = 0;\r
-       static int inflateBufferSize = 0;\r
-\r
-       int ll, il, bl;\r
-\r
-       jlongArray longs;\r
-       jintArray ints;\r
-       jbyteArray bytes;\r
-\r
-       char *input = (char*)(*env)->GetDirectBufferAddress(env, deflated);\r
-       char *address;\r
-\r
-       if(inflateBufferSize < inflatedSize) {\r
-               if(!inflateBuffer) {\r
-                       if(inflatedSize < INITIAL_SIZE) inflatedSize = INITIAL_SIZE;\r
-                       inflateBuffer = malloc(inflatedSize);\r
-                       inflateBufferSize = inflatedSize;\r
-               } else {\r
-                       if(inflateBuffer) free(inflateBuffer);\r
-                       inflateBuffer = malloc(inflatedSize);\r
-                       inflateBufferSize = inflatedSize;\r
-               }\r
-       }\r
-\r
-       address = inflateBuffer;\r
-\r
-       LZ4_uncompress_unknownOutputSize(input, inflateBuffer, deflatedSize, inflateBufferSize);\r
-\r
-       ll = *(int *)address;\r
-       longs = (*env)->NewLongArray(env, ll);\r
-       (*env)->SetLongArrayRegion(env, longs, 0, ll, (const jlong *)(address + 4));\r
-       (*env)->SetObjectArrayElement(env, arrays, 0, longs);\r
-\r
-       address += 4 + 8 * ll;\r
-\r
-       il = *(int *)address;\r
-       ints = (*env)->NewIntArray(env, il);\r
-       (*env)->SetIntArrayRegion(env, ints, 0, il, (const jint *)(address + 4));\r
-       (*env)->SetObjectArrayElement(env, arrays, 1, ints);\r
-\r
-       address += 4 * il + 4;\r
-\r
-       bl = *(int *)address;\r
-       bytes = (*env)->NewByteArray(env, bl);\r
-       (*env)->SetByteArrayRegion(env, bytes, 0, bl, (const jbyte *)(address + 4));\r
-       (*env)->SetObjectArrayElement(env, arrays, 2, bytes);\r
-\r
-       return 0;\r
-\r
-}\r
diff --git a/bundles/org.simantics.fastlz/native/lz4.c b/bundles/org.simantics.fastlz/native/lz4.c
deleted file mode 100644 (file)
index 06e2829..0000000
+++ /dev/null
@@ -1,819 +0,0 @@
-/*\r
-   LZ4 - Fast LZ compression algorithm\r
-   Copyright (C) 2011-2012, Yann Collet.\r
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\r
-\r
-   Redistribution and use in source and binary forms, with or without\r
-   modification, are permitted provided that the following conditions are\r
-   met:\r
-\r
-       * Redistributions of source code must retain the above copyright\r
-   notice, this list of conditions and the following disclaimer.\r
-       * Redistributions in binary form must reproduce the above\r
-   copyright notice, this list of conditions and the following disclaimer\r
-   in the documentation and/or other materials provided with the\r
-   distribution.\r
-\r
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
-   You can contact the author at :\r
-   - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html\r
-   - LZ4 source repository : http://code.google.com/p/lz4/\r
-*/\r
-\r
-//**************************************\r
-// Tuning parameters\r
-//**************************************\r
-// COMPRESSIONLEVEL :\r
-// Increasing this value improves compression ratio\r
-// Lowering this value reduces memory usage\r
-// Reduced memory usage typically improves speed, due to cache effect (ex : L1 32KB for Intel, L1 64KB for AMD)\r
-// Memory usage formula : N->2^(N+2) Bytes (examples : 12 -> 16KB ; 17 -> 512KB)\r
-#define COMPRESSIONLEVEL 12\r
-\r
-// NOTCOMPRESSIBLE_CONFIRMATION :\r
-// Decreasing this value will make the algorithm skip faster data segments considered "incompressible"\r
-// This may decrease compression ratio dramatically, but will be faster on incompressible data\r
-// Increasing this value will make the algorithm search more before declaring a segment "incompressible"\r
-// This could improve compression a bit, but will be slower on incompressible data\r
-// The default value (6) is recommended\r
-#define NOTCOMPRESSIBLE_CONFIRMATION 6\r
-\r
-// LZ4_COMPRESSMIN :\r
-// Compression function will *fail* if it is not successful at compressing input by at least LZ4_COMPRESSMIN bytes\r
-// Since the compression function stops working prematurely, it results in a speed gain\r
-// The output however is unusable. Compression function result will be zero.\r
-// Default : 0 = disabled\r
-#define LZ4_COMPRESSMIN 0\r
-\r
-// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :\r
-// This will provide a boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.\r
-// You can set this option to 1 in situations where data will stay within closed environment\r
-// This option is useless on Little_Endian CPU (such as x86)\r
-//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1\r
-\r
-\r
-\r
-//**************************************\r
-// CPU Feature Detection\r
-//**************************************\r
-// 32 or 64 bits ?\r
-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) )   // Detects 64 bits mode\r
-#  define LZ4_ARCH64 1\r
-#else\r
-#  define LZ4_ARCH64 0\r
-#endif\r
-\r
-// Little Endian or Big Endian ?\r
-// Note : overwrite the below #define if you know your architecture endianess\r
-#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )\r
-#  define LZ4_BIG_ENDIAN 1\r
-#else\r
-// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.\r
-#endif\r
-\r
-// Unaligned memory access is automatically enabled for "common" CPU, such as x86.\r
-// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected\r
-// If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance\r
-#if defined(__ARM_FEATURE_UNALIGNED)\r
-#  define LZ4_FORCE_UNALIGNED_ACCESS 1\r
-#endif\r
-\r
-// Define this parameter if your target system or compiler does not support hardware bit count\r
-#if defined(_MSC_VER) && defined(_WIN32_WCE)            // Visual Studio for Windows CE does not support Hardware bit count\r
-#  define LZ4_FORCE_SW_BITCOUNT\r
-#endif\r
-\r
-\r
-//**************************************\r
-// Compiler Options\r
-//**************************************\r
-#if __STDC_VERSION__ >= 199901L // C99\r
-/* "restrict" is a known keyword */\r
-#else\r
-#  define restrict // Disable restrict\r
-#endif\r
-\r
-#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)\r
-\r
-#ifdef _MSC_VER  // Visual Studio\r
-#  define inline __forceinline           // Visual is not C99, but supports some kind of inline\r
-#  if LZ4_ARCH64       // 64-bit\r
-#    pragma intrinsic(_BitScanForward64) // For Visual 2005\r
-#    pragma intrinsic(_BitScanReverse64) // For Visual 2005\r
-#  else\r
-#    pragma intrinsic(_BitScanForward)   // For Visual 2005\r
-#    pragma intrinsic(_BitScanReverse)   // For Visual 2005\r
-#  endif\r
-#endif\r
-\r
-#ifdef _MSC_VER\r
-#  define lz4_bswap16(x) _byteswap_ushort(x)\r
-#else\r
-#  define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))\r
-#endif\r
-\r
-#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)\r
-#  define expect(expr,value)    (__builtin_expect ((expr),(value)) )\r
-#else\r
-#  define expect(expr,value)    (expr)\r
-#endif\r
-\r
-#define likely(expr)     expect((expr) != 0, 1)\r
-#define unlikely(expr)   expect((expr) != 0, 0)\r
-\r
-\r
-//**************************************\r
-// Includes\r
-//**************************************\r
-#include <stdlib.h>   // for malloc\r
-#include <string.h>   // for memset\r
-#include "lz4.h"\r
-\r
-\r
-//**************************************\r
-// Basic Types\r
-//**************************************\r
-#if defined(_MSC_VER)    // Visual Studio does not support 'stdint' natively\r
-#  define BYTE unsigned __int8\r
-#  define U16          unsigned __int16\r
-#  define U32          unsigned __int32\r
-#  define S32          __int32\r
-#  define U64          unsigned __int64\r
-#else\r
-#  include <stdint.h>\r
-#  define BYTE uint8_t\r
-#  define U16          uint16_t\r
-#  define U32          uint32_t\r
-#  define S32          int32_t\r
-#  define U64          uint64_t\r
-#endif\r
-\r
-#ifndef LZ4_FORCE_UNALIGNED_ACCESS\r
-#  pragma pack(push, 1)\r
-#endif\r
-\r
-typedef struct _U16_S { U16 v; } U16_S;\r
-typedef struct _U32_S { U32 v; } U32_S;\r
-typedef struct _U64_S { U64 v; } U64_S;\r
-\r
-#ifndef LZ4_FORCE_UNALIGNED_ACCESS\r
-#  pragma pack(pop)\r
-#endif\r
-\r
-#define A64(x) (((U64_S *)(x))->v)\r
-#define A32(x) (((U32_S *)(x))->v)\r
-#define A16(x) (((U16_S *)(x))->v)\r
-\r
-\r
-//**************************************\r
-// Constants\r
-//**************************************\r
-#define MINMATCH 4\r
-\r
-#define HASH_LOG COMPRESSIONLEVEL\r
-#define HASHTABLESIZE (1 << HASH_LOG)\r
-#define HASH_MASK (HASHTABLESIZE - 1)\r
-\r
-#define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION>2?NOTCOMPRESSIBLE_CONFIRMATION:2)\r
-#define STACKLIMIT 13\r
-#define HEAPMODE (HASH_LOG>STACKLIMIT)  // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).\r
-#define COPYLENGTH 8\r
-#define LASTLITERALS 5\r
-#define MFLIMIT (COPYLENGTH+MINMATCH)\r
-#define MINLENGTH (MFLIMIT+1)\r
-\r
-#define MAXD_LOG 16\r
-#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)\r
-\r
-#define ML_BITS 4\r
-#define ML_MASK ((1U<<ML_BITS)-1)\r
-#define RUN_BITS (8-ML_BITS)\r
-#define RUN_MASK ((1U<<RUN_BITS)-1)\r
-\r
-\r
-//**************************************\r
-// Architecture-specific macros\r
-//**************************************\r
-#if LZ4_ARCH64 // 64-bit\r
-#  define STEPSIZE 8\r
-#  define UARCH U64\r
-#  define AARCH A64\r
-#  define LZ4_COPYSTEP(s,d)            A64(d) = A64(s); d+=8; s+=8;\r
-#  define LZ4_COPYPACKET(s,d)          LZ4_COPYSTEP(s,d)\r
-#  define LZ4_SECURECOPY(s,d,e)        if (d<e) LZ4_WILDCOPY(s,d,e)\r
-#  define HTYPE U32\r
-#  define INITBASE(base)                       const BYTE* const base = ip\r
-#else          // 32-bit\r
-#  define STEPSIZE 4\r
-#  define UARCH U32\r
-#  define AARCH A32\r
-#  define LZ4_COPYSTEP(s,d)            A32(d) = A32(s); d+=4; s+=4;\r
-#  define LZ4_COPYPACKET(s,d)          LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);\r
-#  define LZ4_SECURECOPY                       LZ4_WILDCOPY\r
-#  define HTYPE const BYTE*\r
-#  define INITBASE(base)                       const int base = 0\r
-#endif\r
-\r
-#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))\r
-#  define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }\r
-#  define LZ4_WRITE_LITTLEENDIAN_16(p,i)  { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }\r
-#else          // Little Endian\r
-#  define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }\r
-#  define LZ4_WRITE_LITTLEENDIAN_16(p,v)  { A16(p) = v; p+=2; }\r
-#endif\r
-\r
-\r
-//**************************************\r
-// Local structures\r
-//**************************************\r
-struct refTables\r
-{\r
-       HTYPE hashTable[HASHTABLESIZE];\r
-};\r
-\r
-\r
-//**************************************\r
-// Macros\r
-//**************************************\r
-#define LZ4_HASH_FUNCTION(i)   (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))\r
-#define LZ4_HASH_VALUE(p)              LZ4_HASH_FUNCTION(A32(p))\r
-#define LZ4_WILDCOPY(s,d,e)            do { LZ4_COPYPACKET(s,d) } while (d<e);\r
-#define LZ4_BLINDCOPY(s,d,l)   { BYTE* e=(d)+l; LZ4_WILDCOPY(s,d,e); d=e; }\r
-\r
-\r
-//****************************\r
-// Private functions\r
-//****************************\r
-#if LZ4_ARCH64\r
-\r
-inline static int LZ4_NbCommonBytes (register U64 val)\r
-{\r
-#if defined(LZ4_BIG_ENDIAN)\r
-    #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    unsigned long r = 0;\r
-    _BitScanReverse64( &r, val );\r
-    return (int)(r>>3);\r
-    #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    return (__builtin_clzll(val) >> 3);\r
-    #else\r
-       int r;\r
-       if (!(val>>32)) { r=4; } else { r=0; val>>=32; }\r
-       if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }\r
-       r += (!val);\r
-       return r;\r
-    #endif\r
-#else\r
-    #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    unsigned long r = 0;\r
-    _BitScanForward64( &r, val );\r
-    return (int)(r>>3);\r
-    #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    return (__builtin_ctzll(val) >> 3);\r
-    #else\r
-       static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };\r
-       return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];\r
-    #endif\r
-#endif\r
-}\r
-\r
-#else\r
-\r
-inline static int LZ4_NbCommonBytes (register U32 val)\r
-{\r
-#if defined(LZ4_BIG_ENDIAN)\r
-    #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    unsigned long r = 0;\r
-    _BitScanReverse( &r, val );\r
-    return (int)(r>>3);\r
-    #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    return (__builtin_clz(val) >> 3);\r
-    #else\r
-       int r;\r
-       if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }\r
-       r += (!val);\r
-       return r;\r
-    #endif\r
-#else\r
-    #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    unsigned long r = 0;\r
-    _BitScanForward( &r, val );\r
-    return (int)(r>>3);\r
-    #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    return (__builtin_ctz(val) >> 3);\r
-    #else\r
-       static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };\r
-       return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];\r
-    #endif\r
-#endif\r
-}\r
-\r
-#endif\r
-\r
-\r
-//****************************\r
-// Public functions\r
-//****************************\r
-\r
-int LZ4_compressBound(int isize)\r
-{\r
-       return (isize + (isize/255) + 16);\r
-}\r
-\r
-\r
-\r
-//******************************\r
-// Compression functions\r
-//******************************\r
-\r
-int LZ4_compressCtx(void** ctx,\r
-                                const char* source,\r
-                                char* dest,\r
-                                int isize)\r
-{\r
-#if HEAPMODE\r
-       struct refTables *srt = (struct refTables *) (*ctx);\r
-       HTYPE* HashTable;\r
-#else\r
-       HTYPE HashTable[HASHTABLESIZE] = {0};\r
-#endif\r
-\r
-       const BYTE* ip = (BYTE*) source;\r
-       INITBASE(base);\r
-       const BYTE* anchor = ip;\r
-       const BYTE* const iend = ip + isize;\r
-       const BYTE* const mflimit = iend - MFLIMIT;\r
-#define matchlimit (iend - LASTLITERALS)\r
-\r
-       BYTE* op = (BYTE*) dest;\r
-\r
-       int len, length;\r
-       const int skipStrength = SKIPSTRENGTH;\r
-       U32 forwardH;\r
-\r
-\r
-       // Init\r
-       if (isize<MINLENGTH) goto _last_literals;\r
-#if HEAPMODE\r
-       if (*ctx == NULL)\r
-       {\r
-               srt = (struct refTables *) malloc ( sizeof(struct refTables) );\r
-               *ctx = (void*) srt;\r
-       }\r
-       HashTable = (HTYPE*)(srt->hashTable);\r
-       memset((void*)HashTable, 0, sizeof(srt->hashTable));\r
-#else\r
-       (void) ctx;\r
-#endif\r
-\r
-\r
-       // First Byte\r
-       HashTable[LZ4_HASH_VALUE(ip)] = ip - base;\r
-       ip++; forwardH = LZ4_HASH_VALUE(ip);\r
-\r
-       // Main Loop\r
-    for ( ; ; )\r
-       {\r
-               int findMatchAttempts = (1U << skipStrength) + 3;\r
-               const BYTE* forwardIp = ip;\r
-               const BYTE* ref;\r
-               BYTE* token;\r
-\r
-               // Find a match\r
-               do {\r
-                       U32 h = forwardH;\r
-                       int step = findMatchAttempts++ >> skipStrength;\r
-                       ip = forwardIp;\r
-                       forwardIp = ip + step;\r
-\r
-                       if unlikely(forwardIp > mflimit) { goto _last_literals; }\r
-\r
-                       forwardH = LZ4_HASH_VALUE(forwardIp);\r
-                       ref = base + HashTable[h];\r
-                       HashTable[h] = ip - base;\r
-\r
-               } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));\r
-\r
-               // Catch up\r
-               while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; }\r
-\r
-               // Encode Literal length\r
-               length = ip - anchor;\r
-               token = op++;\r
-               if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }\r
-               else *token = (length<<ML_BITS);\r
-\r
-               // Copy Literals\r
-               LZ4_BLINDCOPY(anchor, op, length);\r
-\r
-_next_match:\r
-               // Encode Offset\r
-               LZ4_WRITE_LITTLEENDIAN_16(op,ip-ref);\r
-\r
-               // Start Counting\r
-               ip+=MINMATCH; ref+=MINMATCH;   // MinMatch verified\r
-               anchor = ip;\r
-               while likely(ip<matchlimit-(STEPSIZE-1))\r
-               {\r
-                       UARCH diff = AARCH(ref) ^ AARCH(ip);\r
-                       if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }\r
-                       ip += LZ4_NbCommonBytes(diff);\r
-                       goto _endCount;\r
-               }\r
-               if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }\r
-               if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }\r
-               if ((ip<matchlimit) && (*ref == *ip)) ip++;\r
-_endCount:\r
-\r
-               // Encode MatchLength\r
-               len = (ip - anchor);\r
-               if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }\r
-               else *token += len;\r
-\r
-               // Test end of chunk\r
-               if (ip > mflimit) { anchor = ip;  break; }\r
-\r
-               // Fill table\r
-               HashTable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base;\r
-\r
-               // Test next position\r
-               ref = base + HashTable[LZ4_HASH_VALUE(ip)];\r
-               HashTable[LZ4_HASH_VALUE(ip)] = ip - base;\r
-               if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }\r
-\r
-               // Prepare next loop\r
-               anchor = ip++;\r
-               forwardH = LZ4_HASH_VALUE(ip);\r
-       }\r
-\r
-_last_literals:\r
-       // Encode Last Literals\r
-       {\r
-               int lastRun = iend - anchor;\r
-               if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0;\r
-               if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }\r
-               else *op++ = (lastRun<<ML_BITS);\r
-               memcpy(op, anchor, iend - anchor);\r
-               op += iend-anchor;\r
-       }\r
-\r
-       // End\r
-       return (int) (((char*)op)-dest);\r
-}\r
-\r
-\r
-\r
-// Note : this function is valid only if isize < LZ4_64KLIMIT\r
-#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))\r
-#define HASHLOG64K (HASH_LOG+1)\r
-#define HASH64KTABLESIZE (1U<<HASHLOG64K)\r
-#define LZ4_HASH64K_FUNCTION(i)        (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))\r
-#define LZ4_HASH64K_VALUE(p)   LZ4_HASH64K_FUNCTION(A32(p))\r
-int LZ4_compress64kCtx(void** ctx,\r
-                                const char* source,\r
-                                char* dest,\r
-                                int isize)\r
-{\r
-#if HEAPMODE\r
-       struct refTables *srt = (struct refTables *) (*ctx);\r
-       U16* HashTable;\r
-#else\r
-       U16 HashTable[HASH64KTABLESIZE] = {0};\r
-#endif\r
-\r
-       const BYTE* ip = (BYTE*) source;\r
-       const BYTE* anchor = ip;\r
-       const BYTE* const base = ip;\r
-       const BYTE* const iend = ip + isize;\r
-       const BYTE* const mflimit = iend - MFLIMIT;\r
-#define matchlimit (iend - LASTLITERALS)\r
-\r
-       BYTE* op = (BYTE*) dest;\r
-\r
-       int len, length;\r
-       const int skipStrength = SKIPSTRENGTH;\r
-       U32 forwardH;\r
-\r
-\r
-       // Init\r
-       if (isize<MINLENGTH) goto _last_literals;\r
-#if HEAPMODE\r
-       if (*ctx == NULL)\r
-       {\r
-               srt = (struct refTables *) malloc ( sizeof(struct refTables) );\r
-               *ctx = (void*) srt;\r
-       }\r
-       HashTable = (U16*)(srt->hashTable);\r
-       memset((void*)HashTable, 0, sizeof(srt->hashTable));\r
-#else\r
-       (void) ctx;\r
-#endif\r
-\r
-\r
-       // First Byte\r
-       ip++; forwardH = LZ4_HASH64K_VALUE(ip);\r
-\r
-       // Main Loop\r
-    for ( ; ; )\r
-       {\r
-               int findMatchAttempts = (1U << skipStrength) + 3;\r
-               const BYTE* forwardIp = ip;\r
-               const BYTE* ref;\r
-               BYTE* token;\r
-\r
-               // Find a match\r
-               do {\r
-                       U32 h = forwardH;\r
-                       int step = findMatchAttempts++ >> skipStrength;\r
-                       ip = forwardIp;\r
-                       forwardIp = ip + step;\r
-\r
-                       if (forwardIp > mflimit) { goto _last_literals; }\r
-\r
-                       forwardH = LZ4_HASH64K_VALUE(forwardIp);\r
-                       ref = base + HashTable[h];\r
-                       HashTable[h] = ip - base;\r
-\r
-               } while (A32(ref) != A32(ip));\r
-\r
-               // Catch up\r
-               while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }\r
-\r
-               // Encode Literal length\r
-               length = ip - anchor;\r
-               token = op++;\r
-               if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }\r
-               else *token = (length<<ML_BITS);\r
-\r
-               // Copy Literals\r
-               LZ4_BLINDCOPY(anchor, op, length);\r
-\r
-_next_match:\r
-               // Encode Offset\r
-               LZ4_WRITE_LITTLEENDIAN_16(op,ip-ref);\r
-\r
-               // Start Counting\r
-               ip+=MINMATCH; ref+=MINMATCH;   // MinMatch verified\r
-               anchor = ip;\r
-               while (ip<matchlimit-(STEPSIZE-1))\r
-               {\r
-                       UARCH diff = AARCH(ref) ^ AARCH(ip);\r
-                       if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }\r
-                       ip += LZ4_NbCommonBytes(diff);\r
-                       goto _endCount;\r
-               }\r
-               if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }\r
-               if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }\r
-               if ((ip<matchlimit) && (*ref == *ip)) ip++;\r
-_endCount:\r
-\r
-               // Encode MatchLength\r
-               len = (ip - anchor);\r
-               if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }\r
-               else *token += len;\r
-\r
-               // Test end of chunk\r
-               if (ip > mflimit) { anchor = ip;  break; }\r
-\r
-               // Fill table\r
-               HashTable[LZ4_HASH64K_VALUE(ip-2)] = ip - 2 - base;\r
-\r
-               // Test next position\r
-               ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];\r
-               HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;\r
-               if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; }\r
-\r
-               // Prepare next loop\r
-               anchor = ip++;\r
-               forwardH = LZ4_HASH64K_VALUE(ip);\r
-       }\r
-\r
-_last_literals:\r
-       // Encode Last Literals\r
-       {\r
-               int lastRun = iend - anchor;\r
-               if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0;\r
-               if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }\r
-               else *op++ = (lastRun<<ML_BITS);\r
-               memcpy(op, anchor, iend - anchor);\r
-               op += iend-anchor;\r
-       }\r
-\r
-       // End\r
-       return (int) (((char*)op)-dest);\r
-}\r
-\r
-\r
-\r
-int LZ4_compress(const char* source,\r
-                                char* dest,\r
-                                int isize)\r
-{\r
-#if HEAPMODE\r
-       void* ctx = malloc(sizeof(struct refTables));\r
-       int result;\r
-       if (isize < LZ4_64KLIMIT)\r
-               result = LZ4_compress64kCtx(&ctx, source, dest, isize);\r
-       else result = LZ4_compressCtx(&ctx, source, dest, isize);\r
-       free(ctx);\r
-       return result;\r
-#else\r
-       if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize);\r
-       return LZ4_compressCtx(NULL, source, dest, isize);\r
-#endif\r
-}\r
-\r
-\r
-\r
-\r
-//****************************\r
-// Decompression functions\r
-//****************************\r
-\r
-// Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize()\r
-//             are safe against "buffer overflow" attack type.\r
-//             They will never write nor read outside of the provided output buffers.\r
-//      LZ4_uncompress_unknownOutputSize() also insures that it will never read outside of the input buffer.\r
-//             A corrupted input will produce an error result, a negative int, indicating the position of the error within input stream.\r
-\r
-int LZ4_uncompress(const char* source,\r
-                                char* dest,\r
-                                int osize)\r
-{\r
-       // Local Variables\r
-       const BYTE* restrict ip = (const BYTE*) source;\r
-       const BYTE* restrict ref;\r
-\r
-       BYTE* restrict op = (BYTE*) dest;\r
-       BYTE* const oend = op + osize;\r
-       BYTE* cpy;\r
-\r
-       BYTE token;\r
-\r
-       int     len, length;\r
-       size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};\r
-\r
-\r
-       // Main Loop\r
-       while (1)\r
-       {\r
-               // get runlength\r
-               token = *ip++;\r
-               if ((length=(token>>ML_BITS)) == RUN_MASK)  { for (;(len=*ip++)==255;length+=255){} length += len; }\r
-\r
-               // copy literals\r
-               cpy = op+length;\r
-               if unlikely(cpy>oend-COPYLENGTH)\r
-               {\r
-                       if (cpy > oend) goto _output_error;          // Error : request to write beyond destination buffer\r
-                       memcpy(op, ip, length);\r
-                       ip += length;\r
-                       break;    // Necessarily EOF\r
-               }\r
-               LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;\r
-\r
-               // get offset\r
-               LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;\r
-               if (ref < (BYTE* const)dest) goto _output_error;   // Error : offset create reference outside destination buffer\r
-\r
-               // get matchlength\r
-               if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; }\r
-\r
-               // copy repeated sequence\r
-               if unlikely(op-ref<STEPSIZE)\r
-               {\r
-#if LZ4_ARCH64\r
-                       size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};\r
-                       size_t dec2 = dec2table[op-ref];\r
-#else\r
-                       const int dec2 = 0;\r
-#endif\r
-                       *op++ = *ref++;\r
-                       *op++ = *ref++;\r
-                       *op++ = *ref++;\r
-                       *op++ = *ref++;\r
-                       ref -= dec[op-ref];\r
-                       A32(op)=A32(ref); op += STEPSIZE-4;\r
-                       ref -= dec2;\r
-               } else { LZ4_COPYSTEP(ref,op); }\r
-               cpy = op + length - (STEPSIZE-4);\r
-               if (cpy>oend-COPYLENGTH)\r
-               {\r
-                       if (cpy > oend) goto _output_error;             // Error : request to write beyond destination buffer\r
-                       LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));\r
-                       while(op<cpy) *op++=*ref++;\r
-                       op=cpy;\r
-                       if (op == oend) break;    // Check EOF (should never happen, since last 5 bytes are supposed to be literals)\r
-                       continue;\r
-               }\r
-               LZ4_SECURECOPY(ref, op, cpy);\r
-               op=cpy;         // correction\r
-       }\r
-\r
-       // end of decoding\r
-       return (int) (((char*)ip)-source);\r
-\r
-       // write overflow error detected\r
-_output_error:\r
-       return (int) (-(((char*)ip)-source));\r
-}\r
-\r
-\r
-int LZ4_uncompress_unknownOutputSize(\r
-                               const char* source,\r
-                               char* dest,\r
-                               int isize,\r
-                               int maxOutputSize)\r
-{\r
-       // Local Variables\r
-       const BYTE* restrict ip = (const BYTE*) source;\r
-       const BYTE* const iend = ip + isize;\r
-       const BYTE* restrict ref;\r
-\r
-       BYTE* restrict op = (BYTE*) dest;\r
-       BYTE* const oend = op + maxOutputSize;\r
-       BYTE* cpy;\r
-\r
-       size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};\r
-\r
-\r
-       // Main Loop\r
-       while (ip<iend)\r
-       {\r
-               BYTE token;\r
-               int length;\r
-\r
-               // get runlength\r
-               token = *ip++;\r
-               if ((length=(token>>ML_BITS)) == RUN_MASK) { int s=255; while ((ip<iend) && (s==255)) { s=*ip++; length += s; } }\r
-\r
-               // copy literals\r
-               cpy = op+length;\r
-               if ((cpy>oend-COPYLENGTH) || (ip+length>iend-COPYLENGTH))\r
-               {\r
-                       if (cpy > oend) goto _output_error;          // Error : request to write beyond destination buffer\r
-                       if (ip+length > iend) goto _output_error;    // Error : request to read beyond source buffer\r
-                       memcpy(op, ip, length);\r
-                       op += length;\r
-                       ip += length;\r
-                       if (ip<iend) goto _output_error;             // Error : LZ4 format violation\r
-                       break;    // Necessarily EOF, due to parsing restrictions\r
-               }\r
-               LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;\r
-\r
-               // get offset\r
-               LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;\r
-               if (ref < (BYTE* const)dest) goto _output_error;   // Error : offset creates reference outside of destination buffer\r
-\r
-               // get matchlength\r
-               if ((length=(token&ML_MASK)) == ML_MASK) { while (ip<iend) { int s = *ip++; length +=s; if (s==255) continue; break; } }\r
-\r
-               // copy repeated sequence\r
-               if unlikely(op-ref<STEPSIZE)\r
-               {\r
-#if LZ4_ARCH64\r
-                       size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};\r
-                       size_t dec2 = dec2table[op-ref];\r
-#else\r
-                       const int dec2 = 0;\r
-#endif\r
-                       *op++ = *ref++;\r
-                       *op++ = *ref++;\r
-                       *op++ = *ref++;\r
-                       *op++ = *ref++;\r
-                       ref -= dec[op-ref];\r
-                       A32(op)=A32(ref); op += STEPSIZE-4;\r
-                       ref -= dec2;\r
-               } else { LZ4_COPYSTEP(ref,op); }\r
-               cpy = op + length - (STEPSIZE-4);\r
-               if (cpy>oend-COPYLENGTH)\r
-               {\r
-                       if (cpy > oend) goto _output_error;           // Error : request to write outside of destination buffer\r
-                       LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));\r
-                       while(op<cpy) *op++=*ref++;\r
-                       op=cpy;\r
-                       if (op == oend) break;    // Check EOF (should never happen, since last 5 bytes are supposed to be literals)\r
-                       continue;\r
-               }\r
-               LZ4_SECURECOPY(ref, op, cpy);\r
-               op=cpy;         // correction\r
-       }\r
-\r
-       // end of decoding\r
-       return (int) (((char*)op)-dest);\r
-\r
-       // write overflow error detected\r
-_output_error:\r
-       return (int) (-(((char*)ip)-source));\r
-}\r
-\r
diff --git a/bundles/org.simantics.fastlz/native/lz4.h b/bundles/org.simantics.fastlz/native/lz4.h
deleted file mode 100644 (file)
index ebd62b6..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-/*\r
-   LZ4 - Fast LZ compression algorithm\r
-   Header File\r
-   Copyright (C) 2011-2012, Yann Collet.\r
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\r
-\r
-   Redistribution and use in source and binary forms, with or without\r
-   modification, are permitted provided that the following conditions are\r
-   met:\r
-\r
-       * Redistributions of source code must retain the above copyright\r
-   notice, this list of conditions and the following disclaimer.\r
-       * Redistributions in binary form must reproduce the above\r
-   copyright notice, this list of conditions and the following disclaimer\r
-   in the documentation and/or other materials provided with the\r
-   distribution.\r
-\r
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
-   You can contact the author at :\r
-   - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html\r
-   - LZ4 source repository : http://code.google.com/p/lz4/\r
-*/\r
-#pragma once\r
-\r
-#if defined (__cplusplus)\r
-extern "C" {\r
-#endif\r
-\r
-\r
-//****************************\r
-// Simple Functions\r
-//****************************\r
-\r
-int LZ4_compress   (const char* source, char* dest, int isize);\r
-int LZ4_uncompress (const char* source, char* dest, int osize);\r
-\r
-/*\r
-LZ4_compress() :\r
-       isize  : is the input size. Max supported value is ~1.9GB\r
-       return : the number of bytes written in buffer dest\r
-                        or 0 if the compression fails (if LZ4_COMPRESSMIN is set)\r
-       note : destination buffer must be already allocated.\r
-               destination buffer must be sized to handle worst cases situations (input data not compressible)\r
-               worst case size evaluation is provided by function LZ4_compressBound()\r
-\r
-LZ4_uncompress() :\r
-       osize  : is the output size, therefore the original size\r
-       return : the number of bytes read in the source buffer\r
-                        If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction\r
-                        This function never writes beyond dest + osize, and is therefore protected against malicious data packets\r
-       note : destination buffer must be already allocated\r
-*/\r
-\r
-\r
-//****************************\r
-// Advanced Functions\r
-//****************************\r
-\r
-int LZ4_compressBound(int isize);\r
-\r
-/*\r
-LZ4_compressBound() :\r
-       Provides the maximum size that LZ4 may output in a "worst case" scenario (input data not compressible)\r
-       primarily useful for memory allocation of output buffer.\r
-\r
-       isize  : is the input size. Max supported value is ~1.9GB\r
-       return : maximum output size in a "worst case" scenario\r
-       note : this function is limited by "int" range (2^31-1)\r
-*/\r
-\r
-\r
-int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);\r
-\r
-/*\r
-LZ4_uncompress_unknownOutputSize() :\r
-       isize  : is the input size, therefore the compressed size\r
-       maxOutputSize : is the size of the destination buffer (which must be already allocated)\r
-       return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)\r
-                        If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction\r
-                        This function never writes beyond dest + maxOutputSize, and is therefore protected against malicious data packets\r
-       note   : Destination buffer must be already allocated.\r
-                This version is slightly slower than LZ4_uncompress()\r
-*/\r
-\r
-\r
-int LZ4_compressCtx(void** ctx, const char* source,  char* dest, int isize);\r
-int LZ4_compress64kCtx(void** ctx, const char* source,  char* dest, int isize);\r
-\r
-/*\r
-LZ4_compressCtx() :\r
-       This function explicitly handles the CTX memory structure.\r
-       It avoids allocating/deallocating memory between each call, improving performance when malloc is heavily invoked.\r
-       This function is only useful when memory is allocated into the heap (HASH_LOG value beyond STACK_LIMIT)\r
-       Performance difference will be noticeable only when repetitively calling the compression function over many small segments.\r
-       Note : by default, memory is allocated into the stack, therefore "malloc" is not invoked.\r
-LZ4_compress64kCtx() :\r
-       Same as LZ4_compressCtx(), but specific to small inputs (<64KB).\r
-       isize *Must* be <64KB, otherwise the output will be corrupted.\r
-\r
-       On first call : provide a *ctx=NULL; It will be automatically allocated.\r
-       On next calls : reuse the same ctx pointer.\r
-       Use different pointers for different threads when doing multi-threading.\r
-\r
-*/\r
-\r
-\r
-#if defined (__cplusplus)\r
-}\r
-#endif\r
diff --git a/bundles/org.simantics.fastlz/native/lz4_format_description.txt b/bundles/org.simantics.fastlz/native/lz4_format_description.txt
deleted file mode 100644 (file)
index a170dde..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-LZ4 Format Description\r
-Last revised: 2012-02-27\r
-Author : Y. Collet\r
-\r
-\r
-\r
-This small specification intents to provide enough information\r
-to anyone willing to produce LZ4-compatible compressed streams\r
-using any programming language.\r
-\r
-LZ4 is an LZ77-type compressor with a fixed, byte-oriented encoding.\r
-The most important design principle behind LZ4 is simplicity.\r
-It helps to create an easy to read and maintain source code.\r
-It also helps later on for optimisations, compactness, and speed.\r
-There is no entropy encoder backend nor framing layer.\r
-The latter is assumed to be handled by other parts of the system.\r
-\r
-This document only describes the format,\r
-not how the LZ4 compressor nor decompressor actually work.\r
-The correctness of the decompressor should not depend\r
-on implementation details of the compressor, and vice versa.\r
-\r
-\r
-\r
--- Compressed stream format --\r
-\r
-An LZ4 compressed stream is composed of sequences.\r
-Schematically, a sequence is a suite of literals, followed by a match copy.\r
-\r
-Each sequence starts with a token.\r
-The token is a one byte value, separated into two 4-bits fields.\r
-Therefore each field ranges from 0 to 15.\r
-\r
-\r
-The first field uses the 4 high-bits of the token.\r
-It provides the length of literals to follow.\r
-(Note : a literal is a not-compressed byte).\r
-If the field value is 0, then there is no literal.\r
-If it is 15, then we need to add some more bytes to indicate the full length.\r
-Each additionnal byte then represent a value from 0 to 255,\r
-which is added to the previous value to produce a total length.\r
-When the byte value is 255, another byte is output.\r
-There can be any number of bytes following the token. There is no "size limit".\r
-(Sidenote this is why a not-compressible input stream is expanded by 0.4%).\r
-\r
-Example 1 : A length of 48 will be represented as :\r
-- 15 : value for the 4-bits High field\r
-- 33 : (=48-15) remaining length to reach 48\r
-\r
-Example 2 : A length of 280 will be represented as :\r
-- 15  : value for the 4-bits High field\r
-- 255 : following byte is maxed, since 280-15 >= 255\r
-- 10  : (=280 - 15 - 255) ) remaining length to reach 280\r
-\r
-Example 3 : A length of 15 will be represented as :\r
-- 15 : value for the 4-bits High field\r
-- 0  : (=15-15) yes, the zero must be output\r
-\r
-Following the token and optional length bytes, are the literals themselves.\r
-They are exactly as numerous as previously decoded (length of literals).\r
-It's possible that there are zero literal.\r
-\r
-\r
-Following the literals is the match copy operation.\r
-\r
-It starts by the offset.\r
-This is a 2 bytes value, in little endian format :\r
-the lower byte is the first one in the stream.\r
-\r
-The offset represents the position of the match to be copied from.\r
-1 means "current position - 1 byte".\r
-The maximum offset value is 65535, 65536 cannot be coded.\r
-Note that 0 is an invalid value, not used. \r
-\r
-Then we need to extract the match length.\r
-For this, we use the second token field, the low 4-bits.\r
-Value, obviously, ranges from 0 to 15.\r
-However here, 0 means that the copy operation will be minimal.\r
-The minimum length of a match, called minmatch, is 4. \r
-As a consequence, a 0 value means 4 bytes, and a value of 15 means 19+ bytes.\r
-Similar to literal length, on reaching the highest possible value (15), \r
-we output additional bytes, one at a time, with values ranging from 0 to 255.\r
-They are added to total to provide the final match length.\r
-A 255 value means there is another byte to read and add.\r
-There is no limit to the number of optional bytes that can be output this way.\r
-(This points towards a maximum achievable compression ratio of ~250).\r
-\r
-With the offset and the matchlength,\r
-the decoder can now proceed to copy the data from the already decoded buffer.\r
-On decoding the matchlength, we reach the end of the compressed sequence,\r
-and therefore start another one.\r
-\r
-\r
--- Parsing restrictions --\r
-\r
-There are specific parsing rules to respect in order to remain compatible\r
-with assumptions made by the decoder :\r
-1) The last 5 bytes are always literals\r
-2) The last match must start at least 12 bytes before end of stream\r
-Consequently, a file with less than 13 bytes cannot be compressed.\r
-These rules are in place to ensure that the decoder\r
-will never read beyond the input buffer, nor write beyond the output buffer.\r
-\r
-Note that the last sequence is also incomplete,\r
-and stops right after literals.\r
-\r
-\r
--- Additional notes --\r
-\r
-There is no assumption nor limits to the way the compressor\r
-searches and selects matches within the source stream.\r
-It could be a fast scan, a multi-probe, a full search using BST,\r
-standard hash chains or MMC, well whatever.\r
-\r
-Advanced parsing strategies can also be implemented, such as lazy match,\r
-or full optimal parsing.\r
-\r
-All these trade-off offer distinctive speed/memory/compression advantages.\r
-Whatever the method used by the compressor, its result will be decodable\r
-by any LZ4 decoder if it follows the format specification described above.\r
-\r
diff --git a/bundles/org.simantics.fastlz/native/lz4hc.c b/bundles/org.simantics.fastlz/native/lz4hc.c
deleted file mode 100644 (file)
index cca755c..0000000
+++ /dev/null
@@ -1,663 +0,0 @@
-/*\r
-   LZ4 HC - High Compression Mode of LZ4\r
-   Copyright (C) 2011-2012, Yann Collet.\r
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\r
-\r
-   Redistribution and use in source and binary forms, with or without\r
-   modification, are permitted provided that the following conditions are\r
-   met:\r
-\r
-       * Redistributions of source code must retain the above copyright\r
-   notice, this list of conditions and the following disclaimer.\r
-       * Redistributions in binary form must reproduce the above\r
-   copyright notice, this list of conditions and the following disclaimer\r
-   in the documentation and/or other materials provided with the\r
-   distribution.\r
-\r
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
-   You can contact the author at :\r
-   - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html\r
-   - LZ4 source repository : http://code.google.com/p/lz4/\r
-*/\r
-\r
-\r
-//**************************************\r
-// CPU Feature Detection\r
-//**************************************\r
-// 32 or 64 bits ?\r
-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) )   // Detects 64 bits mode\r
-#define LZ4_ARCH64 1\r
-#else\r
-#define LZ4_ARCH64 0\r
-#endif\r
-\r
-// Little Endian or Big Endian ? \r
-#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )\r
-#define LZ4_BIG_ENDIAN 1\r
-#else\r
-// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.\r
-#endif\r
-\r
-// Unaligned memory access is automatically enabled for "common" CPU, such as x86.\r
-// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected\r
-// If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance\r
-#if defined(__ARM_FEATURE_UNALIGNED)\r
-#define LZ4_FORCE_UNALIGNED_ACCESS 1\r
-#endif\r
-\r
-\r
-//**************************************\r
-// Compiler Options\r
-//**************************************\r
-#if __STDC_VERSION__ >= 199901L    // C99\r
-  /* "restrict" is a known keyword */\r
-#else\r
-#define restrict  // Disable restrict\r
-#endif\r
-\r
-#ifdef _MSC_VER\r
-#define inline __forceinline    // Visual is not C99, but supports some kind of inline\r
-#endif\r
-\r
-#ifdef _MSC_VER  // Visual Studio\r
-#define bswap16(x) _byteswap_ushort(x)\r
-#else\r
-#define bswap16(x)  ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))\r
-#endif\r
-\r
-\r
-//**************************************\r
-// Includes\r
-//**************************************\r
-#include <stdlib.h>   // calloc, free\r
-#include <string.h>   // memset, memcpy\r
-#include "lz4hc.h"\r
-\r
-#define ALLOCATOR(s) calloc(1,s)\r
-#define FREEMEM free\r
-#define MEM_INIT memset\r
-\r
-\r
-//**************************************\r
-// Basic Types\r
-//**************************************\r
-#if defined(_MSC_VER)    // Visual Studio does not support 'stdint' natively\r
-#define BYTE   unsigned __int8\r
-#define U16            unsigned __int16\r
-#define U32            unsigned __int32\r
-#define S32            __int32\r
-#define U64            unsigned __int64\r
-#else\r
-#include <stdint.h>\r
-#define BYTE   uint8_t\r
-#define U16            uint16_t\r
-#define U32            uint32_t\r
-#define S32            int32_t\r
-#define U64            uint64_t\r
-#endif\r
-\r
-#ifndef LZ4_FORCE_UNALIGNED_ACCESS\r
-#pragma pack(push, 1) \r
-#endif\r
-\r
-typedef struct _U16_S { U16 v; } U16_S;\r
-typedef struct _U32_S { U32 v; } U32_S;\r
-typedef struct _U64_S { U64 v; } U64_S;\r
-\r
-#ifndef LZ4_FORCE_UNALIGNED_ACCESS\r
-#pragma pack(pop) \r
-#endif\r
-\r
-#define A64(x) (((U64_S *)(x))->v)\r
-#define A32(x) (((U32_S *)(x))->v)\r
-#define A16(x) (((U16_S *)(x))->v)\r
-\r
-\r
-//**************************************\r
-// Constants\r
-//**************************************\r
-#define MINMATCH 4\r
-\r
-#define DICTIONARY_LOGSIZE 16\r
-#define MAXD (1<<DICTIONARY_LOGSIZE)\r
-#define MAXD_MASK ((U32)(MAXD - 1))\r
-#define MAX_DISTANCE (MAXD - 1)\r
-\r
-#define HASH_LOG (DICTIONARY_LOGSIZE-1)\r
-#define HASHTABLESIZE (1 << HASH_LOG)\r
-#define HASH_MASK (HASHTABLESIZE - 1)\r
-\r
-#define MAX_NB_ATTEMPTS 256\r
-\r
-#define ML_BITS  4\r
-#define ML_MASK  (size_t)((1U<<ML_BITS)-1)\r
-#define RUN_BITS (8-ML_BITS)\r
-#define RUN_MASK ((1U<<RUN_BITS)-1)\r
-\r
-#define COPYLENGTH 8\r
-#define LASTLITERALS 5\r
-#define MFLIMIT (COPYLENGTH+MINMATCH)\r
-#define MINLENGTH (MFLIMIT+1)\r
-#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)\r
-\r
-\r
-//**************************************\r
-// Architecture-specific macros\r
-//**************************************\r
-#if LZ4_ARCH64 // 64-bit\r
-#define STEPSIZE 8\r
-#define LZ4_COPYSTEP(s,d)              A64(d) = A64(s); d+=8; s+=8;\r
-#define LZ4_COPYPACKET(s,d)            LZ4_COPYSTEP(s,d)\r
-#define UARCH U64\r
-#define AARCH A64\r
-#define HTYPE                                  U32\r
-#define INITBASE(b,s)                  const BYTE* const b = s\r
-#else          // 32-bit\r
-#define STEPSIZE 4\r
-#define LZ4_COPYSTEP(s,d)              A32(d) = A32(s); d+=4; s+=4;\r
-#define LZ4_COPYPACKET(s,d)            LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);\r
-#define UARCH U32\r
-#define AARCH A32\r
-#define HTYPE                                  const BYTE*\r
-#define INITBASE(b,s)              const int b = 0\r
-#endif\r
-\r
-#if defined(LZ4_BIG_ENDIAN)\r
-#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = bswap16(v); d = (s) - v; }\r
-#define LZ4_WRITE_LITTLEENDIAN_16(p,i)  { U16 v = (U16)(i); v = bswap16(v); A16(p) = v; p+=2; }\r
-#else          // Little Endian\r
-#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }\r
-#define LZ4_WRITE_LITTLEENDIAN_16(p,v)  { A16(p) = v; p+=2; }\r
-#endif\r
-\r
-\r
-//************************************************************\r
-// Local Types\r
-//************************************************************\r
-typedef struct \r
-{\r
-       const BYTE* base;\r
-       HTYPE hashTable[HASHTABLESIZE];\r
-       U16 chainTable[MAXD];\r
-       const BYTE* nextToUpdate;\r
-} LZ4HC_Data_Structure;\r
-\r
-\r
-//**************************************\r
-// Macros\r
-//**************************************\r
-#define LZ4_WILDCOPY(s,d,e)            do { LZ4_COPYPACKET(s,d) } while (d<e);\r
-#define LZ4_BLINDCOPY(s,d,l)   { BYTE* e=d+l; LZ4_WILDCOPY(s,d,e); d=e; }\r
-#define HASH_FUNCTION(i)       (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))\r
-#define HASH_VALUE(p)          HASH_FUNCTION(*(U32*)(p))\r
-#define HASH_POINTER(p)                (HashTable[HASH_VALUE(p)] + base)\r
-#define DELTANEXT(p)           chainTable[(size_t)(p) & MAXD_MASK] \r
-#define GETNEXT(p)                     ((p) - (size_t)DELTANEXT(p))\r
-#define ADD_HASH(p)                    { size_t delta = (p) - HASH_POINTER(p); if (delta>MAX_DISTANCE) delta = MAX_DISTANCE; DELTANEXT(p) = (U16)delta; HashTable[HASH_VALUE(p)] = (p) - base; }\r
-\r
-\r
-//**************************************\r
-// Private functions\r
-//**************************************\r
-#if LZ4_ARCH64\r
-\r
-inline static int LZ4_NbCommonBytes (register U64 val)\r
-{\r
-#if defined(LZ4_BIG_ENDIAN)\r
-    #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    unsigned long r = 0;\r
-    _BitScanReverse64( &r, val );\r
-    return (int)(r>>3);\r
-    #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    return (__builtin_clzll(val) >> 3); \r
-    #else\r
-       int r;\r
-       if (!(val>>32)) { r=4; } else { r=0; val>>=32; }\r
-       if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }\r
-       r += (!val);\r
-       return r;\r
-    #endif\r
-#else\r
-    #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    unsigned long r = 0;\r
-    _BitScanForward64( &r, val );\r
-    return (int)(r>>3);\r
-    #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    return (__builtin_ctzll(val) >> 3); \r
-    #else\r
-       static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };\r
-       return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];\r
-    #endif\r
-#endif\r
-}\r
-\r
-#else\r
-\r
-inline static int LZ4_NbCommonBytes (register U32 val)\r
-{\r
-#if defined(LZ4_BIG_ENDIAN)\r
-    #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    unsigned long r = 0;\r
-    _BitScanReverse( &r, val );\r
-    return (int)(r>>3);\r
-    #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    return (__builtin_clz(val) >> 3); \r
-    #else\r
-       int r;\r
-       if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }\r
-       r += (!val);\r
-       return r;\r
-    #endif\r
-#else\r
-    #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    unsigned long r = 0;\r
-    _BitScanForward( &r, val );\r
-    return (int)(r>>3);\r
-    #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)\r
-    return (__builtin_ctz(val) >> 3); \r
-    #else\r
-       static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };\r
-       return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];\r
-    #endif\r
-#endif\r
-}\r
-\r
-#endif\r
-\r
-\r
-inline static int LZ4HC_Init (LZ4HC_Data_Structure* hc4, const BYTE* base)\r
-{\r
-       MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));\r
-       MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));\r
-       hc4->nextToUpdate = base + LZ4_ARCH64;\r
-       hc4->base = base;\r
-       return 1;\r
-}\r
-\r
-\r
-inline static void* LZ4HC_Create (const BYTE* base)\r
-{\r
-       void* hc4 = ALLOCATOR(sizeof(LZ4HC_Data_Structure));\r
-\r
-       LZ4HC_Init (hc4, base);\r
-       return hc4;\r
-}\r
-\r
-\r
-inline static int LZ4HC_Free (void** LZ4HC_Data)\r
-{\r
-       FREEMEM(*LZ4HC_Data);\r
-       *LZ4HC_Data = NULL;\r
-       return (1);\r
-}\r
-\r
-\r
-inline static void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const BYTE* ip)\r
-{\r
-       U16*   chainTable = hc4->chainTable;\r
-       HTYPE* HashTable  = hc4->hashTable;\r
-       INITBASE(base,hc4->base);\r
-\r
-       while(hc4->nextToUpdate < ip)\r
-       {\r
-               ADD_HASH(hc4->nextToUpdate);\r
-               hc4->nextToUpdate++;\r
-       }\r
-}\r
-\r
-\r
-inline static int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* const matchlimit, const BYTE** matchpos)\r
-{\r
-       U16* const chainTable = hc4->chainTable;\r
-       HTYPE* const HashTable = hc4->hashTable;\r
-       const BYTE* ref;\r
-       INITBASE(base,hc4->base);\r
-       int nbAttempts=MAX_NB_ATTEMPTS;\r
-       int ml=0;\r
-\r
-       // HC4 match finder\r
-       LZ4HC_Insert(hc4, ip);\r
-       ref = HASH_POINTER(ip);\r
-       while ((ref > (ip-MAX_DISTANCE)) && (nbAttempts))\r
-       {\r
-               nbAttempts--;\r
-               if (*(ref+ml) == *(ip+ml))\r
-               if (*(U32*)ref == *(U32*)ip)\r
-               {\r
-                       const BYTE* reft = ref+MINMATCH;\r
-                       const BYTE* ipt = ip+MINMATCH;\r
-\r
-                       while (ipt<matchlimit-(STEPSIZE-1))\r
-                       {\r
-                               UARCH diff = AARCH(reft) ^ AARCH(ipt);\r
-                               if (!diff) { ipt+=STEPSIZE; reft+=STEPSIZE; continue; }\r
-                               ipt += LZ4_NbCommonBytes(diff);\r
-                               goto _endCount;\r
-                       }\r
-                       if (LZ4_ARCH64) if ((ipt<(matchlimit-3)) && (A32(reft) == A32(ipt))) { ipt+=4; reft+=4; }\r
-                       if ((ipt<(matchlimit-1)) && (A16(reft) == A16(ipt))) { ipt+=2; reft+=2; }\r
-                       if ((ipt<matchlimit) && (*reft == *ipt)) ipt++;\r
-_endCount:\r
-\r
-                       if (ipt-ip > ml) { ml = ipt-ip; *matchpos = ref; }\r
-               }\r
-               ref = GETNEXT(ref);\r
-       }\r
-\r
-       return ml;\r
-}\r
-\r
-\r
-inline static int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* startLimit, const BYTE* matchlimit, int longest, const BYTE** matchpos, const BYTE** startpos)\r
-{\r
-       U16* const  chainTable = hc4->chainTable;\r
-       HTYPE* const HashTable = hc4->hashTable;\r
-       INITBASE(base,hc4->base);\r
-       const BYTE*  ref;\r
-       int nbAttempts = MAX_NB_ATTEMPTS;\r
-       int delta = ip-startLimit;\r
-\r
-       // First Match\r
-       LZ4HC_Insert(hc4, ip);\r
-       ref = HASH_POINTER(ip);\r
-\r
-       while ((ref > ip-MAX_DISTANCE) && (ref >= hc4->base) && (nbAttempts))\r
-       {\r
-               nbAttempts--;\r
-               if (*(startLimit + longest) == *(ref - delta + longest))\r
-               if (*(U32*)ref == *(U32*)ip)\r
-               {\r
-                       const BYTE* reft = ref+MINMATCH;\r
-                       const BYTE* ipt = ip+MINMATCH;\r
-                       const BYTE* startt = ip;\r
-\r
-                       while (ipt<matchlimit-(STEPSIZE-1))\r
-                       {\r
-                               UARCH diff = AARCH(reft) ^ AARCH(ipt);\r
-                               if (!diff) { ipt+=STEPSIZE; reft+=STEPSIZE; continue; }\r
-                               ipt += LZ4_NbCommonBytes(diff);\r
-                               goto _endCount;\r
-                       }\r
-                       if (LZ4_ARCH64) if ((ipt<(matchlimit-3)) && (A32(reft) == A32(ipt))) { ipt+=4; reft+=4; }\r
-                       if ((ipt<(matchlimit-1)) && (A16(reft) == A16(ipt))) { ipt+=2; reft+=2; }\r
-                       if ((ipt<matchlimit) && (*reft == *ipt)) ipt++;\r
-_endCount:\r
-\r
-                       reft = ref;\r
-                       while ((startt>startLimit) && (reft > hc4->base) && (startt[-1] == reft[-1])) {startt--; reft--;}\r
-\r
-                       if ((ipt-startt) > longest)\r
-                       {\r
-                               longest = ipt-startt;\r
-                               *matchpos = reft;\r
-                               *startpos = startt;\r
-                       }\r
-               }\r
-               ref = GETNEXT(ref);\r
-       }\r
-\r
-       return longest;\r
-}\r
-\r
-\r
-inline static int LZ4_encodeSequence(const BYTE** ip, BYTE** op, const BYTE** anchor, int ml, const BYTE* ref)\r
-{\r
-       int length, len; \r
-       BYTE* token;\r
-\r
-       // Encode Literal length\r
-       length = *ip - *anchor;\r
-       token = (*op)++;\r
-       if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*op)++ = 255;  *(*op)++ = (BYTE)len; } \r
-       else *token = (length<<ML_BITS);\r
-\r
-       // Copy Literals\r
-       LZ4_BLINDCOPY(*anchor, *op, length);\r
-\r
-       // Encode Offset\r
-       LZ4_WRITE_LITTLEENDIAN_16(*op,*ip-ref);\r
-\r
-       // Encode MatchLength\r
-       len = (int)(ml-MINMATCH);\r
-       if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *(*op)++ = 255; *(*op)++ = 255; } if (len > 254) { len-=255; *(*op)++ = 255; } *(*op)++ = (BYTE)len; } \r
-       else *token += len;     \r
-\r
-       // Prepare next loop\r
-       *ip += ml;\r
-       *anchor = *ip; \r
-\r
-       return 0;\r
-}\r
-\r
-\r
-//****************************\r
-// Compression CODE\r
-//****************************\r
-\r
-int LZ4_compressHCCtx(LZ4HC_Data_Structure* ctx,\r
-                                const char* source, \r
-                                char* dest,\r
-                                int isize)\r
-{      \r
-       const BYTE* ip = (const BYTE*) source;\r
-       const BYTE* anchor = ip;\r
-       const BYTE* const iend = ip + isize;\r
-       const BYTE* const mflimit = iend - MFLIMIT;\r
-       const BYTE* const matchlimit = (iend - LASTLITERALS);\r
-\r
-       BYTE* op = (BYTE*) dest;\r
-\r
-       int     ml, ml2, ml3, ml0;\r
-       const BYTE* ref=NULL;\r
-       const BYTE* start2=NULL;\r
-       const BYTE* ref2=NULL;\r
-       const BYTE* start3=NULL;\r
-       const BYTE* ref3=NULL;\r
-       const BYTE* start0;\r
-       const BYTE* ref0;\r
-\r
-       ip++;\r
-\r
-       // Main Loop\r
-       while (ip < mflimit)\r
-       {\r
-               ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref));\r
-               if (!ml) { ip++; continue; }\r
-\r
-               // saved, in case we would skip too much\r
-               start0 = ip;\r
-               ref0 = ref;\r
-               ml0 = ml;\r
-\r
-_Search2:\r
-               if (ip+ml < mflimit)\r
-                       ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2);\r
-               else ml2=ml;\r
-\r
-               if (ml2 == ml)  // No better match\r
-               {\r
-                       LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);\r
-                       continue;\r
-               }\r
-\r
-               if (start0 < ip)\r
-               {\r
-                       if (start2 < ip + ml0)   // empirical\r
-                       {\r
-                               ip = start0;\r
-                               ref = ref0;\r
-                               ml = ml0;\r
-                       }\r
-               }\r
-\r
-               // Here, start0==ip\r
-               if ((start2 - ip) < 3)   // First Match too small : removed\r
-               {\r
-                       ml = ml2;\r
-                       ip = start2;\r
-                       ref =ref2;\r
-                       goto _Search2;\r
-               }\r
-\r
-_Search3:\r
-               // Currently we have :\r
-               // ml2 > ml1, and\r
-               // ip1+3 <= ip2 (usually < ip1+ml1)\r
-               if ((start2 - ip) < OPTIMAL_ML)\r
-               {\r
-                       int correction;\r
-                       int new_ml = ml;\r
-                       if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;\r
-                       if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = start2 - ip + ml2 - MINMATCH;\r
-                       correction = new_ml - (start2 - ip);\r
-                       if (correction > 0)\r
-                       {\r
-                               start2 += correction;\r
-                               ref2 += correction;\r
-                               ml2 -= correction;\r
-                       }\r
-               }\r
-               // Now, we have start2 = ip+new_ml, with new_ml=min(ml, OPTIMAL_ML=18)\r
-\r
-               if (start2 + ml2 < mflimit)\r
-                       ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3);\r
-               else ml3=ml2;\r
-\r
-               if (ml3 == ml2) // No better match : 2 sequences to encode\r
-               {\r
-                       // ip & ref are known; Now for ml\r
-                       if (start2 < ip+ml)\r
-                       {\r
-                               if ((start2 - ip) < OPTIMAL_ML)\r
-                               {\r
-                                       int correction;\r
-                                       if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;\r
-                                       if (ip+ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH;\r
-                                       correction = ml - (start2 - ip);\r
-                                       if (correction > 0)\r
-                                       {\r
-                                               start2 += correction;\r
-                                               ref2 += correction;\r
-                                               ml2 -= correction;\r
-                                       }\r
-                               }\r
-                               else\r
-                               {\r
-                                       ml = start2 - ip;\r
-                               }\r
-                       }\r
-                       // Now, encode 2 sequences\r
-                       LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);\r
-                       ip = start2;\r
-                       LZ4_encodeSequence(&ip, &op, &anchor, ml2, ref2);\r
-                       continue;\r
-               }\r
-\r
-               if (start3 < ip+ml+3) // Not enough space for match 2 : remove it\r
-               {\r
-                       if (start3 >= (ip+ml)) // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1\r
-                       {\r
-                               if (start2 < ip+ml)\r
-                               {\r
-                                       int correction = (ip+ml) - start2;\r
-                                       start2 += correction;\r
-                                       ref2 += correction;\r
-                                       ml2 -= correction;\r
-                                       if (ml2 < MINMATCH)\r
-                                       {\r
-                                               start2 = start3;\r
-                                               ref2 = ref3;\r
-                                               ml2 = ml3;\r
-                                       }\r
-                               }\r
-\r
-                               LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);\r
-                               ip  = start3;\r
-                               ref = ref3;\r
-                               ml  = ml3;\r
-\r
-                               start0 = start2;\r
-                               ref0 = ref2;\r
-                               ml0 = ml2;\r
-                               goto _Search2;\r
-                       }\r
-\r
-                       start2 = start3;\r
-                       ref2 = ref3;\r
-                       ml2 = ml3;\r
-                       goto _Search3;\r
-               }\r
-\r
-               // OK, now we have 3 ascending matches; let's write at least the first one\r
-               // ip & ref are known; Now for ml\r
-               if (start2 < ip+ml)\r
-               {\r
-                       if ((start2 - ip) < (int)ML_MASK)\r
-                       {\r
-                               int correction;\r
-                               if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;\r
-                               if (ip + ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH;\r
-                               correction = ml - (start2 - ip);\r
-                               if (correction > 0)\r
-                               {\r
-                                       start2 += correction;\r
-                                       ref2 += correction;\r
-                                       ml2 -= correction;\r
-                               }\r
-                       }\r
-                       else\r
-                       {\r
-                               ml = start2 - ip;\r
-                       }\r
-               }\r
-               LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);\r
-\r
-               ip = start2;\r
-               ref = ref2;\r
-               ml = ml2;\r
-\r
-               start2 = start3;\r
-               ref2 = ref3;\r
-               ml2 = ml3;\r
-\r
-               goto _Search3;\r
-\r
-       }\r
-\r
-       // Encode Last Literals\r
-       {\r
-               int lastRun = iend - anchor;\r
-               if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } \r
-               else *op++ = (lastRun<<ML_BITS);\r
-               memcpy(op, anchor, iend - anchor);\r
-               op += iend-anchor;\r
-       } \r
-\r
-       // End\r
-       return (int) (((char*)op)-dest);\r
-}\r
-\r
-\r
-int LZ4_compressHC(const char* source, \r
-                                char* dest,\r
-                                int isize)\r
-{\r
-       void* ctx = LZ4HC_Create((const BYTE*)source);\r
-       int result = LZ4_compressHCCtx(ctx, source, dest, isize);\r
-       LZ4HC_Free (&ctx);\r
-\r
-       return result;\r
-}\r
-\r
-\r
diff --git a/bundles/org.simantics.fastlz/native/lz4hc.h b/bundles/org.simantics.fastlz/native/lz4hc.h
deleted file mode 100644 (file)
index cb74689..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*\r
-   LZ4 HC - High Compression Mode of LZ4\r
-   Header File\r
-   Copyright (C) 2011-2012, Yann Collet.\r
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\r
-\r
-   Redistribution and use in source and binary forms, with or without\r
-   modification, are permitted provided that the following conditions are\r
-   met:\r
-\r
-       * Redistributions of source code must retain the above copyright\r
-   notice, this list of conditions and the following disclaimer.\r
-       * Redistributions in binary form must reproduce the above\r
-   copyright notice, this list of conditions and the following disclaimer\r
-   in the documentation and/or other materials provided with the\r
-   distribution.\r
-\r
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
-   You can contact the author at :\r
-   - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html\r
-   - LZ4 source repository : http://code.google.com/p/lz4/\r
-*/\r
-#pragma once\r
-\r
-\r
-#if defined (__cplusplus)\r
-extern "C" {\r
-#endif\r
-\r
-\r
-int LZ4_compressHC (const char* source, char* dest, int isize);\r
-\r
-/*\r
-LZ4_compressHC :\r
-       return : the number of bytes in compressed buffer dest\r
-       note : destination buffer must be already allocated. \r
-               To avoid any problem, size it to handle worst cases situations (input data not compressible)\r
-               Worst case size evaluation is provided by function LZ4_compressBound() (see "lz4.h")\r
-*/\r
-\r
-\r
-/* Note :\r
-Decompression functions are provided within regular LZ4 source code (see "lz4.h") (BSD license)\r
-*/\r
-\r
-\r
-#if defined (__cplusplus)\r
-}\r
-#endif\r
diff --git a/bundles/org.simantics.fastlz/native/vs2012/fastlz.sln b/bundles/org.simantics.fastlz/native/vs2012/fastlz.sln
new file mode 100644 (file)
index 0000000..61b8965
--- /dev/null
@@ -0,0 +1,26 @@
+\r
+Microsoft Visual Studio Solution File, Format Version 12.00\r
+# Visual Studio Express 2012 for Windows Desktop\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fastlz", "fastlz.vcxproj", "{2C249AD2-A0AE-4A88-8DCD-71F96133690E}"\r
+EndProject\r
+Global\r
+       GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+               Debug|Win32 = Debug|Win32\r
+               Debug|x64 = Debug|x64\r
+               Release|Win32 = Release|Win32\r
+               Release|x64 = Release|x64\r
+       EndGlobalSection\r
+       GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+               {2C249AD2-A0AE-4A88-8DCD-71F96133690E}.Debug|Win32.ActiveCfg = Debug|Win32\r
+               {2C249AD2-A0AE-4A88-8DCD-71F96133690E}.Debug|Win32.Build.0 = Debug|Win32\r
+               {2C249AD2-A0AE-4A88-8DCD-71F96133690E}.Debug|x64.ActiveCfg = Debug|x64\r
+               {2C249AD2-A0AE-4A88-8DCD-71F96133690E}.Debug|x64.Build.0 = Debug|x64\r
+               {2C249AD2-A0AE-4A88-8DCD-71F96133690E}.Release|Win32.ActiveCfg = Release|Win32\r
+               {2C249AD2-A0AE-4A88-8DCD-71F96133690E}.Release|Win32.Build.0 = Release|Win32\r
+               {2C249AD2-A0AE-4A88-8DCD-71F96133690E}.Release|x64.ActiveCfg = Release|x64\r
+               {2C249AD2-A0AE-4A88-8DCD-71F96133690E}.Release|x64.Build.0 = Release|x64\r
+       EndGlobalSection\r
+       GlobalSection(SolutionProperties) = preSolution\r
+               HideSolutionNode = FALSE\r
+       EndGlobalSection\r
+EndGlobal\r
diff --git a/bundles/org.simantics.fastlz/native/vs2012/fastlz.vcxproj b/bundles/org.simantics.fastlz/native/vs2012/fastlz.vcxproj
new file mode 100644 (file)
index 0000000..9d1203b
--- /dev/null
@@ -0,0 +1,188 @@
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+  <ItemGroup Label="ProjectConfigurations">\r
+    <ProjectConfiguration Include="Debug|Win32">\r
+      <Configuration>Debug</Configuration>\r
+      <Platform>Win32</Platform>\r
+    </ProjectConfiguration>\r
+    <ProjectConfiguration Include="Debug|x64">\r
+      <Configuration>Debug</Configuration>\r
+      <Platform>x64</Platform>\r
+    </ProjectConfiguration>\r
+    <ProjectConfiguration Include="Release|Win32">\r
+      <Configuration>Release</Configuration>\r
+      <Platform>Win32</Platform>\r
+    </ProjectConfiguration>\r
+    <ProjectConfiguration Include="Release|x64">\r
+      <Configuration>Release</Configuration>\r
+      <Platform>x64</Platform>\r
+    </ProjectConfiguration>\r
+  </ItemGroup>\r
+  <PropertyGroup Label="Globals">\r
+    <ProjectGuid>{2C249AD2-A0AE-4A88-8DCD-71F96133690E}</ProjectGuid>\r
+    <RootNamespace>fastlz</RootNamespace>\r
+    <Keyword>Win32Proj</Keyword>\r
+  </PropertyGroup>\r
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">\r
+    <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+    <CharacterSet>Unicode</CharacterSet>\r
+    <WholeProgramOptimization>true</WholeProgramOptimization>\r
+    <PlatformToolset>v110</PlatformToolset>\r
+  </PropertyGroup>\r
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">\r
+    <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+    <CharacterSet>Unicode</CharacterSet>\r
+    <PlatformToolset>v110</PlatformToolset>\r
+  </PropertyGroup>\r
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">\r
+    <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+    <CharacterSet>Unicode</CharacterSet>\r
+    <WholeProgramOptimization>true</WholeProgramOptimization>\r
+    <PlatformToolset>v110</PlatformToolset>\r
+  </PropertyGroup>\r
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">\r
+    <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+    <CharacterSet>Unicode</CharacterSet>\r
+    <PlatformToolset>v110</PlatformToolset>\r
+  </PropertyGroup>\r
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
+  <ImportGroup Label="ExtensionSettings">\r
+  </ImportGroup>\r
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">\r
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+  </ImportGroup>\r
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">\r
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+  </ImportGroup>\r
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">\r
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+  </ImportGroup>\r
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">\r
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+  </ImportGroup>\r
+  <PropertyGroup Label="UserMacros" />\r
+  <PropertyGroup>\r
+    <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>\r
+    <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>\r
+    <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>\r
+    <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>\r
+    <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>\r
+    <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Platform)\$(Configuration)\</IntDir>\r
+    <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>\r
+    <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>\r
+    <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Platform)\$(Configuration)\</IntDir>\r
+    <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>\r
+    <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>\r
+    <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Platform)\$(Configuration)\</IntDir>\r
+    <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>\r
+  </PropertyGroup>\r
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+    <ClCompile>\r
+      <Optimization>Disabled</Optimization>\r
+      <AdditionalIncludeDirectories>$(JAVA_HOME)\include;$(JAVA_HOME)\include\win32;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;FASTLZ_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+      <MinimalRebuild>true</MinimalRebuild>\r
+      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\r
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+      <PrecompiledHeader>\r
+      </PrecompiledHeader>\r
+      <WarningLevel>Level3</WarningLevel>\r
+      <DebugInformationFormat>EditAndContinue</DebugInformationFormat>\r
+    </ClCompile>\r
+    <Link>\r
+      <GenerateDebugInformation>true</GenerateDebugInformation>\r
+      <SubSystem>Windows</SubSystem>\r
+      <TargetMachine>MachineX86</TargetMachine>\r
+    </Link>\r
+    <PostBuildEvent>\r
+      <Command>copy "$(TargetPath)" "$(ProjectDir)..\..\src\fastlz-windows-x86.dll"</Command>\r
+    </PostBuildEvent>\r
+  </ItemDefinitionGroup>\r
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+    <Midl>\r
+      <TargetEnvironment>X64</TargetEnvironment>\r
+    </Midl>\r
+    <ClCompile>\r
+      <Optimization>Disabled</Optimization>\r
+      <AdditionalIncludeDirectories>$(JAVA_HOME)\include;$(JAVA_HOME)\include\win32;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;FASTLZ_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+      <MinimalRebuild>true</MinimalRebuild>\r
+      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\r
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+      <PrecompiledHeader>\r
+      </PrecompiledHeader>\r
+      <WarningLevel>Level3</WarningLevel>\r
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+    </ClCompile>\r
+    <Link>\r
+      <GenerateDebugInformation>true</GenerateDebugInformation>\r
+      <SubSystem>Windows</SubSystem>\r
+      <TargetMachine>MachineX64</TargetMachine>\r
+    </Link>\r
+    <PostBuildEvent>\r
+      <Command>copy "$(TargetPath)" "$(ProjectDir)..\..\src\fastlz-windows-x86_64.dll"</Command>\r
+    </PostBuildEvent>\r
+  </ItemDefinitionGroup>\r
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+    <ClCompile>\r
+      <Optimization>MaxSpeed</Optimization>\r
+      <IntrinsicFunctions>true</IntrinsicFunctions>\r
+      <AdditionalIncludeDirectories>$(JAVA_HOME)\include;$(JAVA_HOME)\include\win32;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;FASTLZ_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+      <FunctionLevelLinking>true</FunctionLevelLinking>\r
+      <PrecompiledHeader>\r
+      </PrecompiledHeader>\r
+      <WarningLevel>Level3</WarningLevel>\r
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+    </ClCompile>\r
+    <Link>\r
+      <GenerateDebugInformation>true</GenerateDebugInformation>\r
+      <SubSystem>Windows</SubSystem>\r
+      <OptimizeReferences>true</OptimizeReferences>\r
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+      <TargetMachine>MachineX86</TargetMachine>\r
+    </Link>\r
+    <PostBuildEvent>\r
+      <Command>copy "$(TargetPath)" "$(ProjectDir)..\..\src\fastlz-windows-x86.dll"</Command>\r
+    </PostBuildEvent>\r
+  </ItemDefinitionGroup>\r
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+    <Midl>\r
+      <TargetEnvironment>X64</TargetEnvironment>\r
+    </Midl>\r
+    <ClCompile>\r
+      <Optimization>MaxSpeed</Optimization>\r
+      <IntrinsicFunctions>true</IntrinsicFunctions>\r
+      <AdditionalIncludeDirectories>$(JAVA_HOME)\include;$(JAVA_HOME)\include\win32;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;FASTLZ_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+      <FunctionLevelLinking>true</FunctionLevelLinking>\r
+      <PrecompiledHeader>\r
+      </PrecompiledHeader>\r
+      <WarningLevel>Level3</WarningLevel>\r
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+    </ClCompile>\r
+    <Link>\r
+      <GenerateDebugInformation>true</GenerateDebugInformation>\r
+      <SubSystem>Windows</SubSystem>\r
+      <OptimizeReferences>true</OptimizeReferences>\r
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+      <TargetMachine>MachineX64</TargetMachine>\r
+    </Link>\r
+    <PostBuildEvent>\r
+      <Command>copy "$(TargetPath)" "$(ProjectDir)..\..\src\fastlz-windows-x86_64.dll"</Command>\r
+    </PostBuildEvent>\r
+  </ItemDefinitionGroup>\r
+  <ItemGroup>\r
+    <ClCompile Include="..\fastlz.c" />\r
+    <ClCompile Include="..\jniWrapper.c" />\r
+  </ItemGroup>\r
+  <ItemGroup>\r
+    <ClInclude Include="..\fastlz.h" />\r
+  </ItemGroup>\r
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+  <ImportGroup Label="ExtensionTargets">\r
+  </ImportGroup>\r
+</Project>
\ No newline at end of file
diff --git a/bundles/org.simantics.fastlz/native/vs2012/fastlz.vcxproj.filters b/bundles/org.simantics.fastlz/native/vs2012/fastlz.vcxproj.filters
new file mode 100644 (file)
index 0000000..114e174
--- /dev/null
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+  <ItemGroup>\r
+    <Filter Include="Source Files">\r
+      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>\r
+      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>\r
+    </Filter>\r
+    <Filter Include="Header Files">\r
+      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>\r
+      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>\r
+    </Filter>\r
+    <Filter Include="Resource Files">\r
+      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>\r
+      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav</Extensions>\r
+    </Filter>\r
+  </ItemGroup>\r
+  <ItemGroup>\r
+    <ClCompile Include="..\fastlz.c">\r
+      <Filter>Source Files</Filter>\r
+    </ClCompile>\r
+    <ClCompile Include="..\jniWrapper.c">\r
+      <Filter>Source Files</Filter>\r
+    </ClCompile>\r
+  </ItemGroup>\r
+  <ItemGroup>\r
+    <ClInclude Include="..\fastlz.h">\r
+      <Filter>Resource Files</Filter>\r
+    </ClInclude>\r
+  </ItemGroup>\r
+</Project>
\ No newline at end of file
index d6ac30896efe9e8566d47e52927a19478b764576..290990b8deb0ccccbaea09ae7850ed3e8a266e77 100644 (file)
@@ -43,6 +43,11 @@ public class FastLZJavaInputStream extends DecompressingInputStream {
         super(stream, channel);\r
     }\r
 \r
+    @Override\r
+    protected ByteBuffer allocateBuffer(int capacity) {\r
+        return ByteBuffer.allocate(capacity);\r
+    }\r
+    \r
     @Override\r
     public void decompress(ByteBuffer compressed, int compressedOffset, int compressedSize, ByteBuffer uncompressed,\r
             int uncompressedOffset, int uncompressedSize) throws IOException {\r
index 177c8d1dbc8852f7c970e23b2d7448ed29a7b90f..572ee11890a095a461a46538c5e0c171498c146b 100644 (file)
@@ -60,7 +60,6 @@ public class FastLZBasicTests {
     @Test\r
     public void validateCompress() throws IOException {\r
         validateCompress(testData1);\r
-        validateCompress(new File("grades.snp"));\r
     }\r
 \r
     private void validateCompress(File testData) throws IOException {\r
index 362c08c0753448e2524f8a6fb4865baf0d1efb51..b75d2eb3ac6e587cf58d374f0a03f1a19a3f30a2 100644 (file)
Binary files a/bundles/org.simantics.g2d.ontology/graph.tg and b/bundles/org.simantics.g2d.ontology/graph.tg differ
index 6d785149f1272476c6e306f69d0660f9c5734ced..d64d3d56432796808e56877e2677d28d244fd975 100644 (file)
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Compiler
 Bundle-SymbolicName: org.simantics.graph.compiler;singleton:=true
-Bundle-Version: 1.1.11.qualifier
+Bundle-Version: 1.1.15.qualifier
 Bundle-RequiredExecutionEnvironment: JavaSE-1.8
 Require-Bundle: org.simantics.graph;bundle-version="1.0.0";visibility:=reexport,
  org.simantics.ltk.antlr;bundle-version="1.0.0",
index 21761a3b156d2e404684a830745a33a5a9c043ad..5f30082a806a537d47a3c6a625a6311ebe2c450f 100644 (file)
@@ -124,7 +124,7 @@ public class GraphCompiler {
                        run(new CreateInverseRelations(graph, store));\r
                        run(new AddConsistsOf(paths, store));\r
                        run(new ConvertPreValues(graph, store, errors));\r
-                       run(new ReportCollisions(errors, store));\r
+                       run(new ReportCollisions(preferences, errors, store));\r
                        if(preferences.validate)\r
                                run(new ValidateGraph(graph, errors, store, preferences));\r
                        \r
index 67b11fbaa7a1abc257a4c7117387036f4ae89758..995960f94b5019322e562cf8accc10a68cd229b5 100644 (file)
@@ -85,7 +85,7 @@ public Token nextToken() {
                emit balancing number of DEDENT tokens.\r
             */\r
             if(iStack.size() <= 1)\r
-                return Token.EOF_TOKEN;\r
+                return getEOFToken();\r
             else {                \r
                 while(iStack.size() > 1) {\r
                     iStack.removeAt(iStack.size()-1);\r
@@ -450,4 +450,4 @@ mapAssignment
     : value '=' value\r
     -> ^(ASSIGNMENT value*) \r
     ;\r
-        
\ No newline at end of file
+        \r
index 98a10ec9769a48ca85a68659ca75acbb247aff50..d75d8cc93f96eb30710baf90a8b0bc1fd258746b 100644 (file)
@@ -108,7 +108,7 @@ public class GraphLexer extends Lexer {
                    emit balancing number of DEDENT tokens.\r
                 */\r
                 if(iStack.size() <= 1)\r
-                    return Token.EOF_TOKEN;\r
+                    return getEOFToken();\r
                 else {                \r
                     while(iStack.size() > 1) {\r
                         iStack.removeAt(iStack.size()-1);\r
@@ -2471,4 +2471,4 @@ public class GraphLexer extends Lexer {
     }\r
  \r
 \r
-}
\ No newline at end of file
+}\r
index e7346e94570c92edd76c0e46e4df56b2b87935d0..277ddc7b16cdb876d1bcef7760467a2e08519430 100644 (file)
@@ -2,22 +2,34 @@ package org.simantics.graph.compiler.internal.validation;
 \r
 import java.util.Collection;\r
 \r
+import org.simantics.graph.compiler.GraphCompilerPreferences;\r
 import org.simantics.graph.compiler.internal.store.LocationStore;\r
 import org.simantics.graph.compiler.internal.templates.TemplateDefinitionStore;\r
+import org.simantics.graph.query.Res;\r
 import org.simantics.graph.store.GraphStore;\r
+import org.simantics.graph.store.StatementCollision;\r
 import org.simantics.ltk.Problem;\r
 \r
 public class ReportCollisions implements Runnable {\r
+    GraphCompilerPreferences preferences;\r
        Collection<Problem> problems;\r
        GraphStore store;\r
 \r
        public ReportCollisions(\r
-                       Collection<Problem> problems,\r
+                       GraphCompilerPreferences preferences, Collection<Problem> problems,\r
                        GraphStore store) {\r
+           this.preferences = preferences;\r
                this.problems = problems;\r
                this.store = store;\r
        }\r
        \r
+       private static String abbreviateURI(Res res) {\r
+           if(res == null)\r
+               return "null";\r
+           String uri = res.toString();\r
+           return uri.replace("http://www.simantics.org/", "");\r
+       }\r
+       \r
        @Override\r
        public void run() {\r
                LocationStore locations = store.getStore(LocationStore.class);\r
@@ -32,7 +44,16 @@ public class ReportCollisions implements Runnable {
                for(int c : store.getStore(TemplateDefinitionStore.class).getCollisions().toArray())\r
                        problems.add(new Problem(\r
                                        locations.getLocation(c), \r
-                                       "Two tempalate definitions are given for the same resource."));\r
+                                       "Two template definitions are given for the same resource."));\r
+               if(preferences.validate)\r
+                   for(StatementCollision collision : store.statements.getCollisions()) {\r
+                       problems.add(new Problem(\r
+                               locations.getLocation(collision.subject), \r
+                               "The same statement is defined " + collision.count + " times: " +\r
+                                       abbreviateURI(store.idToRes(collision.subject)) + ", " +\r
+                                       abbreviateURI(store.idToRes(collision.predicate)) + ", " +\r
+                                       abbreviateURI(store.idToRes(collision.object))));\r
+                   }\r
        }\r
        \r
 }\r
index 448d8556ba68551fe90af862bff6a30fbaad0763..5f4404acc377654f15cb79c95be04028b3fc09a7 100644 (file)
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Transferable Graph Runtime
 Bundle-SymbolicName: org.simantics.graph
-Bundle-Version: 1.1.11.qualifier
+Bundle-Version: 1.1.15.qualifier
 Bundle-RequiredExecutionEnvironment: JavaSE-1.8
 Require-Bundle: org.simantics.databoard;bundle-version="0.5.1",
  gnu.trove3;bundle-version="3.0.0";visibility:=reexport,
diff --git a/bundles/org.simantics.graph/src/org/simantics/graph/store/StatementCollision.java b/bundles/org.simantics.graph/src/org/simantics/graph/store/StatementCollision.java
new file mode 100644 (file)
index 0000000..3bcd4f0
--- /dev/null
@@ -0,0 +1,16 @@
+package org.simantics.graph.store;\r
+\r
+public class StatementCollision {\r
+    public final int subject;\r
+    public final int predicate;\r
+    public final int object;\r
+    public final int count;\r
+    \r
+    public StatementCollision(int subject, int predicate, int object, int count) {\r
+        this.subject = subject;\r
+        this.predicate = predicate;\r
+        this.object = object;\r
+        this.count = count;\r
+    }\r
+}\r
+\r
index 96b6b1c725ee61a073ac2b7cebf949ff94def4bc..02dfddde81085d06727089863185c4e10345d54f 100644 (file)
@@ -1,5 +1,7 @@
 package org.simantics.graph.store;\r
 \r
+import java.util.ArrayList;\r
+\r
 import gnu.trove.list.array.TIntArrayList;\r
 import gnu.trove.map.hash.TIntIntHashMap;\r
 import gnu.trove.map.hash.TIntObjectHashMap;\r
@@ -10,7 +12,7 @@ import gnu.trove.set.hash.TIntHashSet;
 \r
 /**\r
  * Statement store indexes a set of statements. \r
- * @author Hannu Niemistö\r
+ * @author Hannu Niemist\r
  */\r
 public class StatementStore implements IStore {\r
        \r
@@ -287,5 +289,64 @@ public class StatementStore implements IStore {
                        }\r
                });\r
                return statements.toArray();\r
+       }\r
+\r
+       private static class CollisionSubjectProcedure implements TIntObjectProcedure<TIntObjectHashMap<TIntArrayList>> {\r
+           CollisionPredicateProcedure predicateProcedure;\r
+\r
+           public CollisionSubjectProcedure(CollisionPredicateProcedure predicateProcedure) {\r
+               this.predicateProcedure = predicateProcedure;\r
+           }\r
+\r
+           @Override\r
+           public boolean execute(int subject, TIntObjectHashMap<TIntArrayList> predicateObjectMap) {\r
+               predicateProcedure.subject = subject;\r
+               predicateObjectMap.forEachEntry(predicateProcedure);\r
+               return true;\r
+           }\r
+\r
+       }\r
+\r
+       private static class CollisionPredicateProcedure implements TIntObjectProcedure<TIntArrayList> {\r
+        ArrayList<StatementCollision> collisions;\r
+           int subject;\r
+           \r
+           public CollisionPredicateProcedure(ArrayList<StatementCollision> collisions) {\r
+            this.collisions = collisions;\r
+        }\r
+           \r
+        @Override\r
+           public boolean execute(int predicate, TIntArrayList objects) {\r
+               if(objects.size() > 1) {\r
+                   objects.sort();\r
+                   int oldObject = objects.get(0);\r
+                   int collisionCount = 1;\r
+                   for(int i=1;i<objects.size();++i) {\r
+                       int curObject = objects.get(i);\r
+                       if(curObject == oldObject) {\r
+                           ++collisionCount;\r
+                       }\r
+                       else {\r
+                           if(collisionCount > 1) {\r
+                               collisions.add(new StatementCollision(subject, predicate, oldObject, collisionCount));\r
+                               collisionCount = 1;\r
+                           }\r
+                           oldObject = curObject;\r
+                       }\r
+                   }\r
+                if(collisionCount > 1)\r
+                    collisions.add(new StatementCollision(subject, predicate, oldObject, collisionCount));\r
+               }\r
+               return true;\r
+           }\r
+\r
+       }\r
+\r
+       public ArrayList<StatementCollision> getCollisions() {\r
+           ArrayList<StatementCollision> collisions = new ArrayList<StatementCollision>();\r
+           CollisionPredicateProcedure predicateProcedure = new CollisionPredicateProcedure(collisions);\r
+           CollisionSubjectProcedure subjectProcedure = new CollisionSubjectProcedure(predicateProcedure);\r
+           statements.forEachEntry(subjectProcedure);\r
+           return collisions;\r
        }       \r
 }\r
index 67308b2c610c2c22bff50f2642ee19e7a84f2fe0..4ef17332aa3054ad4587b64b20b8f13f749fbef3 100644 (file)
Binary files a/bundles/org.simantics.graphfile.ontology/graph.tg and b/bundles/org.simantics.graphfile.ontology/graph.tg differ
index 6cf33fb32e72e613f1756dfb4028b3fa8a989d53..b1dabee38291b97f08cc3a18bee4e65b1e550530 100644 (file)
@@ -1,9 +1,5 @@
 <?xml version="1.0" encoding="UTF-8"?>\r
 <classpath>\r
-       <classpathentry exported="true" kind="lib" path="fontbox-1.8.10.jar"/>\r
-       <classpathentry exported="true" kind="lib" path="jempbox-1.8.10.jar"/>\r
-       <classpathentry exported="true" kind="lib" path="xmpbox-1.8.10.jar"/>\r
-       <classpathentry exported="true" kind="lib" path="pdfbox-1.8.10.jar" sourcepath="pdfbox-1.8.10-src.zip"/>\r
        <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7"/>\r
        <classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>\r
        <classpathentry kind="src" path="src"/>\r
index d9c6d4585cdfce2d63b0c517e961b8fb8a7fb68e..a20f8e475c2eb875f8c9e223b126e38380ef3122 100644 (file)
@@ -8,11 +8,8 @@ Bundle-Vendor: Semantum Oy
 Require-Bundle: org.eclipse.core.runtime,
  org.eclipse.help.base,
  org.apache.commons.logging;bundle-version="1.0.4",
- org.bouncycastle;bundle-version="1.47.0"
+ org.apache.pdfbox;bundle-version="2.0.2",
+ org.apache.pdfbox.fontbox;bundle-version="2.0.2"
 Bundle-RequiredExecutionEnvironment: JavaSE-1.7
 Bundle-ActivationPolicy: lazy
-Bundle-ClassPath: .,
- xmpbox-1.8.10.jar,
- jempbox-1.8.10.jar,
- fontbox-1.8.10.jar,
- pdfbox-1.8.10.jar
+Bundle-ClassPath: .
index 7904aba265b7750d7c55bb8ed2f9715953d706e5..c125ab4d7c2315faaddc837eda2730b8bc50d894 100644 (file)
@@ -2,9 +2,5 @@ source.. = src/
 output.. = bin/\r
 bin.includes = META-INF/,\\r
                .,\\r
-               plugin.xml,\\r
-               pdfbox-1.8.10.jar,\\r
-               fontbox-1.8.10.jar,\\r
-               jempbox-1.8.10.jar,\\r
-               xmpbox-1.8.10.jar\r
+               plugin.xml\r
 source.. = src/\r
diff --git a/bundles/org.simantics.help.base/fontbox-1.8.10.jar b/bundles/org.simantics.help.base/fontbox-1.8.10.jar
deleted file mode 100644 (file)
index 3284950..0000000
Binary files a/bundles/org.simantics.help.base/fontbox-1.8.10.jar and /dev/null differ
diff --git a/bundles/org.simantics.help.base/jempbox-1.8.10.jar b/bundles/org.simantics.help.base/jempbox-1.8.10.jar
deleted file mode 100644 (file)
index 48cc633..0000000
Binary files a/bundles/org.simantics.help.base/jempbox-1.8.10.jar and /dev/null differ
diff --git a/bundles/org.simantics.help.base/pdfbox-1.8.10-src.zip b/bundles/org.simantics.help.base/pdfbox-1.8.10-src.zip
deleted file mode 100644 (file)
index e05aa63..0000000
Binary files a/bundles/org.simantics.help.base/pdfbox-1.8.10-src.zip and /dev/null differ
diff --git a/bundles/org.simantics.help.base/pdfbox-1.8.10.jar b/bundles/org.simantics.help.base/pdfbox-1.8.10.jar
deleted file mode 100644 (file)
index 87bb9a7..0000000
Binary files a/bundles/org.simantics.help.base/pdfbox-1.8.10.jar and /dev/null differ
index c12e56b4d56e1c76bfa8e788097900dc2dac7c3e..43fac6c7079a114d92e99e690824c46262967969 100644 (file)
@@ -1,14 +1,14 @@
 package org.simantics.help.base.internal;\r
 \r
 import java.io.File;\r
-import java.io.FileInputStream;\r
 import java.io.IOException;\r
 \r
 import org.apache.pdfbox.cos.COSDocument;\r
+import org.apache.pdfbox.io.RandomAccessFile;\r
 import org.apache.pdfbox.pdfparser.PDFParser;\r
 import org.apache.pdfbox.pdmodel.PDDocument;\r
 import org.apache.pdfbox.pdmodel.PDDocumentInformation;\r
-import org.apache.pdfbox.util.PDFTextStripper;\r
+import org.apache.pdfbox.text.PDFTextStripper;\r
 import org.eclipse.help.search.ISearchDocument;\r
 \r
 /**\r
@@ -17,7 +17,7 @@ import org.eclipse.help.search.ISearchDocument;
 public class PDFUtil {\r
 \r
     public static void stripText(File fromPdf, ISearchDocument doc) throws IOException {\r
-        PDFParser parser = new PDFParser(new FileInputStream(fromPdf));\r
+        PDFParser parser = new PDFParser(new RandomAccessFile(fromPdf, "r"));\r
         parser.parse();\r
 \r
         try (COSDocument cosDoc = parser.getDocument()) {\r
diff --git a/bundles/org.simantics.help.base/xmpbox-1.8.10.jar b/bundles/org.simantics.help.base/xmpbox-1.8.10.jar
deleted file mode 100644 (file)
index 7a8a465..0000000
Binary files a/bundles/org.simantics.help.base/xmpbox-1.8.10.jar and /dev/null differ
index 4cb13d035f016dfaef9a978ef8d21619378cd34d..376ab173e1e9a9c5afaaa0058ee2c7a4b57f40ed 100644 (file)
Binary files a/bundles/org.simantics.help.ontology/graph.tg and b/bundles/org.simantics.help.ontology/graph.tg differ
index 94eee91ae2c07c17c3e2ef28b944b2e9d02021be..0eb99caa0b6be6f06fe5d717b8dcdee6ad337bef 100644 (file)
Binary files a/bundles/org.simantics.image.ontology/graph.tg and b/bundles/org.simantics.image.ontology/graph.tg differ
index c6c565455b4e79f9f8aed8d27b1e65ee7b919651..c4ff92401b4085e1f2cac93183306ebb6268e702 100644 (file)
Binary files a/bundles/org.simantics.image2.ontology/graph.tg and b/bundles/org.simantics.image2.ontology/graph.tg differ
index d0fbbf128039fa02dc7b055101984e8005dc6f84..23e0bb966130559958b64a621b58761346a6a198 100644 (file)
Binary files a/bundles/org.simantics.issues.ontology/graph.tg and b/bundles/org.simantics.issues.ontology/graph.tg differ
index 0397e7e78ab360a777927f20fffb53574d5b4c2c..50682c6e64dc576118bb4f8b50784bc41dfe575e 100644 (file)
Binary files a/bundles/org.simantics.issues.ui.ontology/graph.tg and b/bundles/org.simantics.issues.ui.ontology/graph.tg differ
index 9be2e4f0b418e8080fde4723479456172e8a5a4a..b12db40eb5f15f257efd60db944f405b84c7e92a 100644 (file)
Binary files a/bundles/org.simantics.layer0/graph.tg and b/bundles/org.simantics.layer0/graph.tg differ
index 6463535e85d520ba780ffb50c8a1e9d93f3286f2..7a0e38445a4a8138b940d618767600c722c0b268 100644 (file)
Binary files a/bundles/org.simantics.layer0x.ontology/graph.tg and b/bundles/org.simantics.layer0x.ontology/graph.tg differ
index 99da36f06795ffd42c8f6ff21f77eb4bd8706ee5..0be6753252f91fbc3137097b8774f7b1de8bfaad 100644 (file)
@@ -5,5 +5,5 @@ Bundle-SymbolicName: org.simantics.ltk.antlr
 Bundle-Version: 1.1.10.qualifier
 Bundle-RequiredExecutionEnvironment: JavaSE-1.8
 Require-Bundle: org.simantics.ltk;bundle-version="1.0.0";visibility:=reexport,
- org.antlr.runtime;bundle-version="3.2.0";visibility:=reexport
+ org.antlr.runtime;bundle-version="[3.2.0,4.0.0)";visibility:=reexport
 Export-Package: org.simantics.ltk.antlr
index 1295295928ec66b1bfa9f7ad1f3ff3e39407912f..bc546631a437755908396e72624c0454784c3024 100644 (file)
Binary files a/bundles/org.simantics.modeling.ontology/graph.tg and b/bundles/org.simantics.modeling.ontology/graph.tg differ
index 85eb75fef050c776a26b0cea4aa8542b652fcb01..848ede327bb5cfaed5783c55482cc5d2935cd465 100644 (file)
Binary files a/bundles/org.simantics.modeling.template2d.ontology/graph.tg and b/bundles/org.simantics.modeling.template2d.ontology/graph.tg differ
index 10ad43d9a99f44938ef9e44fa0bfb698039fbbea..76aec39f30184755d4f1f67103611eabc95c21f4 100644 (file)
@@ -32,7 +32,7 @@ Require-Bundle: org.simantics.project;bundle-version="1.0.0",
  org.simantics.issues;bundle-version="1.1.0",
  org.simantics.document;bundle-version="1.0.0",
  org.simantics.graph.db;bundle-version="1.1.9",
- org.bouncycastle;bundle-version="1.47.0",
+ org.bouncycastle.bcprov-jdk14;bundle-version="1.38.0",
  org.simantics.image2.ontology;bundle-version="1.1.0",
  org.simantics.scl.compiler;bundle-version="0.4.0",
  org.simantics.scl.compiler.dummy;bundle-version="1.0.0",
index 03b6a0594954ba84532a15ca1ae6cd35c535b4d8..45c711d9cf908a0312c80be0e3585ef25befe1e0 100644 (file)
Binary files a/bundles/org.simantics.platform.ui.ontology/graph.tg and b/bundles/org.simantics.platform.ui.ontology/graph.tg differ
index 85b93158488f7a907fb996f6831f1809e9963dca..8f8ebc45bb597132de2f5fd0259702c0b9111e95 100644 (file)
Binary files a/bundles/org.simantics.project.ontology/graph.tg and b/bundles/org.simantics.project.ontology/graph.tg differ
index bbeb7541e0892e86c1eb45c8a8c9c5a8c168bed5..8957a21679b45607a2daab8d235b7e90545c93c6 100644 (file)
@@ -33,8 +33,8 @@ public class ServerManagerFactory {
     public static ServerManager create(String databaseDriverId, String address) throws IOException, DatabaseException {\r
         Driver driver = Manager.getDriver(databaseDriverId);\r
         if (driver == null)\r
-            throw new IllegalArgumentException("Database driver for ID " + databaseDriverId + " Could not be found!");\r
-        System.out.println("ServerManagerFactory.create called with databaseId=" + databaseDriverId + " and driver is " + driver.toString());\r
+            throw new IllegalArgumentException("Database driver with ID " + databaseDriverId + " could not be found!");\r
+        System.out.println("ServerManagerFactory.create called with id " + databaseDriverId + ", driver is " + driver.toString());\r
         DatabaseUserAgent agent = Manager.getUserAgent(databaseDriverId);\r
         if (agent != null)\r
             driver.setDatabaseUserAgent(address, agent);\r
index 2991c6e55e544fc30bcbfbd99567478c266c2dd0..6033385d45cfab488cede49d2bbc52cdf5e4d9b0 100644 (file)
Binary files a/bundles/org.simantics.scenegraph.ontology/graph.tg and b/bundles/org.simantics.scenegraph.ontology/graph.tg differ
index 70adf524b501592229f21d1f8ed201149f9f7e72..f9b5efc3bd3eb7eb3e68dc148f9f4d0496648a3f 100644 (file)
@@ -49,7 +49,7 @@ public class SubSolver {
         reduceChains();\r
         propagateUpperBounds();\r
         checkLowerBounds();\r
-        errorFromUnsolvedEquations();\r
+        //errorFromUnsolvedEquations();\r
         //System.out.println("--");\r
         //print();\r
     }\r
index 5c8f59bb2dcee005fc5c006f72c4f453fa987990..39f0ac721ee2a2bd32d366a9e17123e6c9ab6e74 100644 (file)
@@ -182,7 +182,7 @@ public class ExpressionEvaluator {
         final Environment environment = runtimeEnvironment.getEnvironment();
         
         // Parse expression
-        if(expressionText != null) {
+        if(expressionText != null && !expressionText.trim().isEmpty()) {
             try {
                 switch(parseMode) {
                 case BLOCK: {
index 950e59b42ff0238825f5f0037490c67dd021d843..52095a10169c88160aac11f4b9d3392cc79806e4 100644 (file)
@@ -7,5 +7,6 @@ Bundle-RequiredExecutionEnvironment: JavaSE-1.7
 Require-Bundle: org.simantics.scl.runtime;bundle-version="0.4.0",
  org.simantics.scl.osgi;bundle-version="1.0.4",
  org.jdom2;bundle-version="2.0.6",
- org.junit;bundle-version="4.12.0";resolution:=optional
+ org.junit;bundle-version="4.12.0";resolution:=optional,
+ com.fasterxml.jackson.core.jackson-core;bundle-version="2.8.2"
 Bundle-ClassPath: .
diff --git a/bundles/org.simantics.scl.data/scl/Data/Json.md b/bundles/org.simantics.scl.data/scl/Data/Json.md
new file mode 100644 (file)
index 0000000..fe8e721
--- /dev/null
@@ -0,0 +1,42 @@
+# Basic functions\r
+\r
+::value[toJsonString, fromJsonString]\r
+\r
+# Supported value types\r
+\r
+This module supports the following value types:\r
+\r
+```\r
+instance Json String\r
+instance Json Short\r
+instance Json Integer\r
+instance Json Long\r
+instance Json Float\r
+instance Json Double\r
+\r
+instance (Json a) => Json [a]\r
+instance (Json a) => Json (Maybe a)\r
+\r
+instance Json ()\r
+instance (Json a, Json b) => Json (a, b)\r
+instance (Json a, Json b, Json c) => Json (a, b, c)\r
+instance (Json a, Json b, Json c, Json d) => Json (a, b, c, d)\r
+instance (Json a, Json b, Json c, Json d, Json e) => Json (a, b, c, d, e) \r
+\r
+instance Json Json\r
+```\r
+\r
+# Generic JSON Type\r
+\r
+::data[Json, JsonField]\r
+\r
+# Adding support for additional value types\r
+\r
+::data[JsonGenerator, JsonParser]\r
+::class[Json]\r
+\r
+It is enough to implement `toJson` and `fromJson`.\r
+\r
+# Undocumented entities\r
+\r
+::undocumented[]
\ No newline at end of file
diff --git a/bundles/org.simantics.scl.data/scl/Data/Json.scl b/bundles/org.simantics.scl.data/scl/Data/Json.scl
new file mode 100644 (file)
index 0000000..fb034cd
--- /dev/null
@@ -0,0 +1,421 @@
+import "StandardLibrary"\r
+import "Data/Writer"\r
+import "JavaBuiltin" as Java\r
+\r
+importJava "com.fasterxml.jackson.core.JsonGenerator" where\r
+    data JsonGenerator\r
+\r
+@private\r
+importJava "com.fasterxml.jackson.core.JsonGenerator" where\r
+    writeNull :: JsonGenerator -> <Proc> ()\r
+    \r
+    writeStartArray :: JsonGenerator -> <Proc> ()\r
+    @JavaName writeStartArray\r
+    writeStartArrayN :: JsonGenerator -> Integer -> <Proc> ()\r
+    writeEndArray :: JsonGenerator -> <Proc> ()\r
+    \r
+    writeStartObject :: JsonGenerator -> <Proc> ()\r
+    writeFieldName :: JsonGenerator -> String -> <Proc> ()\r
+    writeEndObject :: JsonGenerator -> <Proc> ()\r
+\r
+    writeBoolean :: JsonGenerator -> Boolean -> <Proc> ()\r
+    \r
+    writeString :: JsonGenerator -> String -> <Proc> ()\r
+    \r
+    @JavaName writeNumber\r
+    writeNumberDouble :: JsonGenerator -> Double -> <Proc> ()\r
+    @JavaName writeNumber\r
+    writeNumberInteger :: JsonGenerator -> Integer -> <Proc> ()\r
+    @JavaName writeNumber\r
+    writeNumberLong :: JsonGenerator -> Long -> <Proc> ()\r
+    @JavaName writeNumber\r
+    writeNumberShort :: JsonGenerator -> Short -> <Proc> ()\r
+    @JavaName writeNumber\r
+    writeNumberFloat :: JsonGenerator -> Float -> <Proc> ()\r
+    \r
+    @JavaName close\r
+    closeGenerator :: JsonGenerator -> <Proc> ()\r
+\r
+@private\r
+importJava "com.fasterxml.jackson.core.JsonToken" where\r
+    data JsonToken\r
+    END_ARRAY :: JsonToken\r
+    END_OBJECT :: JsonToken\r
+    FIELD_NAME :: JsonToken\r
+    NOT_AVAILABLE :: JsonToken\r
+    START_ARRAY :: JsonToken\r
+    START_OBJECT :: JsonToken\r
+    VALUE_EMBEDDED_OBJECT :: JsonToken\r
+    VALUE_FALSE :: JsonToken\r
+    VALUE_NULL :: JsonToken\r
+    VALUE_NUMBER_FLOAT :: JsonToken\r
+    VALUE_NUMBER_INT :: JsonToken\r
+    VALUE_STRING :: JsonToken\r
+    VALUE_TRUE :: JsonToken\r
+instance Eq JsonToken where\r
+    (==) = Java.equals\r
+\r
+importJava "com.fasterxml.jackson.core.JsonParser" where\r
+    data JsonParser\r
+\r
+@private\r
+importJava "com.fasterxml.jackson.core.JsonParser" where\r
+    nextToken :: JsonParser -> <Proc> JsonToken\r
+    currentToken :: JsonParser -> <Proc> JsonToken\r
+    getDoubleValue :: JsonParser -> <Proc> Double\r
+    getIntValue :: JsonParser -> <Proc> Integer\r
+    getText :: JsonParser -> <Proc> String\r
+    getShortValue :: JsonParser -> <Proc> Short\r
+    getFloatValue :: JsonParser -> <Proc> Float\r
+    getLongValue :: JsonParser -> <Proc> Long\r
+    nextFieldName :: JsonParser -> <Proc> Maybe String\r
+\r
+@private\r
+importJava "com.fasterxml.jackson.core.JsonFactory" where\r
+    data JsonFactory\r
+    \r
+    @JavaName "<init>"\r
+    createJsonFactory :: <Proc> JsonFactory\r
+    \r
+    @JavaName createGenerator\r
+    createWriterGenerator :: JsonFactory -> Writer -> <Proc> JsonGenerator\r
+    \r
+    @JavaName createParser\r
+    createStringParser :: JsonFactory -> String -> <Proc> JsonParser\r
+    \r
+@private\r
+defaultFactory = createJsonFactory\r
+\r
+@private\r
+@inline\r
+assertStartArray :: JsonParser -> <Proc> ()\r
+assertStartArray p = if currentToken p == START_ARRAY\r
+                     then ()\r
+                     else fail "Expected START_ARRAY token."\r
+\r
+@private\r
+@inline\r
+assertEndArray :: JsonParser -> <Proc> ()\r
+assertEndArray p = if nextToken p == END_ARRAY\r
+                   then ()\r
+                   else fail "Expected END_ARRAY token."\r
+\r
+// *** Json type class ********************************************************\r
+\r
+class Json a where\r
+    writeJson :: JsonGenerator -> a -> <Proc> ()\r
+    readJson :: JsonParser -> <Proc> a\r
+    toJson :: a -> Json\r
+    fromJson :: Json -> a\r
+    \r
+    writeJson g v = writeJson g (toJson v)\r
+    readJson p = fromJson (readJson p) \r
+\r
+@private\r
+readNextJson :: Json a => JsonParser -> <Proc> a\r
+readNextJson p = do\r
+    nextToken p\r
+    readJson p\r
+\r
+"""\r
+Converts the value to a string encoded with JSON\r
+"""\r
+toJsonString :: Json a => a -> String\r
+toJsonString v = runProc do\r
+    writer = createStringWriter\r
+    generator = createWriterGenerator defaultFactory (toWriter writer)\r
+    writeJson generator v\r
+    closeGenerator generator\r
+    resultOfStringWriter writer\r
+\r
+"""\r
+Parses a JSON encoded string into a value\r
+"""\r
+fromJsonString :: Json a => String -> a\r
+fromJsonString str = runProc do\r
+    parser = createStringParser defaultFactory str\r
+    readNextJson parser\r
+\r
+instance Json String where\r
+    writeJson = writeString\r
+    readJson = getText\r
+    toJson = JsonString\r
+    fromJson (JsonString value) = value\r
+\r
+instance Json Boolean where\r
+    writeJson = writeBoolean\r
+    readJson p =\r
+        if currentToken p == VALUE_TRUE\r
+        then True\r
+        else False\r
+    toJson = JsonBoolean\r
+    fromJson (JsonBoolean value) = value\r
+    \r
+instance Json Double where\r
+    writeJson = writeNumberDouble\r
+    readJson = getDoubleValue\r
+    toJson = JsonDouble\r
+    fromJson (JsonDouble value) = value\r
+\r
+instance Json Float where\r
+    writeJson = writeNumberFloat\r
+    readJson = getFloatValue\r
+    toJson = JsonDouble . toDouble\r
+    fromJson (JsonDouble value) = fromDouble value\r
+\r
+instance Json Integer where\r
+    writeJson = writeNumberInteger\r
+    readJson = getIntValue\r
+    toJson = JsonLong . fromInteger\r
+    fromJson (JsonLong value) = Java.l2i value\r
+\r
+instance Json Long where\r
+    writeJson = writeNumberLong\r
+    readJson = getLongValue\r
+    toJson = JsonLong\r
+    fromJson (JsonLong value) = value\r
+\r
+instance Json Short where\r
+    writeJson = writeNumberShort\r
+    readJson = getShortValue\r
+    toJson = JsonLong . Java.i2l . Java.s2i\r
+    fromJson (JsonLong value) = Java.i2s (Java.l2i value) \r
+    \r
+instance (Json a) => Json (Maybe a) where\r
+    writeJson g (Just v) =  writeJson g v\r
+    writeJson g Nothing = writeNull g\r
+    readJson p = \r
+        if currentToken p == VALUE_NULL\r
+        then Nothing\r
+        else Just (readJson p)\r
+    toJson (Just value) = toJson value\r
+    toJson Nothing = JsonNull\r
+    fromJson JsonNull = Nothing\r
+    fromJson json = Just (fromJson json)\r
+\r
+instance (Json a) => Json [a] where\r
+    writeJson g l = do\r
+        writeStartArray g\r
+        iter (writeJson g) l\r
+        writeEndArray g\r
+    readJson p = MList.freeze result\r
+      where\r
+        result = MList.create ()\r
+        assertStartArray p\r
+        while (nextToken p != END_ARRAY)\r
+            (MList.add result $ readJson p)\r
+    toJson l = JsonArray (map toJson l)\r
+    fromJson (JsonArray l) = map fromJson l\r
+\r
+instance Json () where\r
+    writeJson g _ = do\r
+        writeStartArray g\r
+        writeEndArray g\r
+    readJson p = do\r
+        assertStartArray p\r
+        assertEndArray p\r
+        ()\r
+    toJson _ = JsonArray []\r
+    fromJson (JsonArray []) = ()\r
+\r
+instance (Json a, Json b) => Json (a, b) where\r
+    writeJson g (a, b) = do\r
+        writeStartArray g\r
+        writeJson g a\r
+        writeJson g b\r
+        writeEndArray g\r
+    readJson p = (a, b)\r
+      where\r
+        assertStartArray p\r
+        a = readNextJson p\r
+        b = readNextJson p\r
+        assertEndArray p\r
+    toJson (a, b) = JsonArray [toJson a, toJson b]\r
+    fromJson (JsonArray [a, b]) = (fromJson a, fromJson b)        \r
+\r
+instance (Json a, Json b, Json c) => Json (a, b, c) where\r
+    writeJson g (a, b, c) = do\r
+        writeStartArray g\r
+        writeJson g a\r
+        writeJson g b\r
+        writeJson g c\r
+        writeEndArray g\r
+    readJson p = (a, b, c)\r
+      where\r
+        assertStartArray p\r
+        a = readNextJson p\r
+        b = readNextJson p\r
+        c = readNextJson p\r
+        assertEndArray p\r
+    toJson (a, b, c) = JsonArray [toJson a, toJson b, toJson c]\r
+    fromJson (JsonArray [a, b, c]) = (fromJson a, fromJson b, fromJson c)        \r
+\r
+instance (Json a, Json b, Json c, Json d) => Json (a, b, c, d) where\r
+    writeJson g (a, b, c, d) = do\r
+        writeStartArray g\r
+        writeJson g a\r
+        writeJson g b\r
+        writeJson g c\r
+        writeJson g d\r
+        writeEndArray g\r
+    readJson p = (a, b, c, d)\r
+      where\r
+        assertStartArray p\r
+        a = readNextJson p\r
+        b = readNextJson p\r
+        c = readNextJson p\r
+        d = readNextJson p\r
+        assertEndArray p\r
+    toJson (a, b, c, d) = JsonArray [toJson a, toJson b, toJson c, toJson d]\r
+    fromJson (JsonArray [a, b, c, d]) = (fromJson a, fromJson b, fromJson c, fromJson d)        \r
+\r
+instance (Json a, Json b, Json c, Json d, Json e) => Json (a, b, c, d, e) where\r
+    writeJson g (a, b, c, d, e) = do\r
+        writeStartArray g\r
+        writeJson g a\r
+        writeJson g b\r
+        writeJson g c\r
+        writeJson g d\r
+        writeJson g e\r
+        writeEndArray g\r
+    readJson p = (a, b, c, d, e)\r
+      where\r
+        assertStartArray p\r
+        a = readNextJson p\r
+        b = readNextJson p\r
+        c = readNextJson p\r
+        d = readNextJson p\r
+        e = readNextJson p\r
+        assertEndArray p\r
+    toJson (a, b, c, d, e) = JsonArray [toJson a, toJson b, toJson c, toJson d, toJson e]\r
+    fromJson (JsonArray [a, b, c, d, e]) = (fromJson a, fromJson b, fromJson c, fromJson d, fromJson e)        \r
+\r
+data Json =\r
+    JsonString String\r
+  | JsonDouble Double\r
+  | JsonLong Long\r
+  | JsonArray [Json]\r
+  | JsonBoolean Boolean\r
+  | JsonNull\r
+  | JsonObject [JsonField]\r
+data JsonField = JsonField String Json\r
+  \r
+deriving instance Show Json\r
+deriving instance Eq Json\r
+deriving instance Show JsonField\r
+deriving instance Eq JsonField\r
+\r
+instance Json Json where\r
+    writeJson g (JsonString value) = writeString g value\r
+    writeJson g (JsonDouble value) = writeNumberDouble g value\r
+    writeJson g (JsonLong value) = writeNumberLong g value\r
+    writeJson g (JsonBoolean value) = writeBoolean g value\r
+    writeJson g JsonNull = writeNull g\r
+    writeJson g (JsonArray values) = do\r
+        writeStartArray g\r
+        iter (writeJson g) values\r
+        writeEndArray g\r
+    writeJson g (JsonObject fields) = do\r
+        writeStartObject g\r
+        iter (\(JsonField name value) -> do\r
+            writeFieldName g name\r
+            writeJson g value) fields\r
+        writeEndObject g\r
+        \r
+    readJson p = do\r
+        token = currentToken p\r
+        if token == VALUE_STRING\r
+        then JsonString (getText p)\r
+        else if token == VALUE_NUMBER_FLOAT\r
+        then JsonDouble (getDoubleValue p)\r
+        else if token == VALUE_NUMBER_INT\r
+        then JsonLong (getLongValue p)\r
+        else if token == VALUE_TRUE\r
+        then JsonBoolean True\r
+        else if token == VALUE_FALSE\r
+        then JsonBoolean False\r
+        else if token == VALUE_NULL\r
+        then JsonNull\r
+        else if token == START_ARRAY\r
+        then do\r
+            result = MList.create ()\r
+            while (nextToken p != END_ARRAY)\r
+                (MList.add result $ readJson p)\r
+            JsonArray (MList.freeze result)\r
+        else if token == START_OBJECT\r
+        then do\r
+            result = MList.create ()\r
+            readJsonObjectContents result p\r
+            JsonObject (MList.freeze result)\r
+        else fail "Unsupported token type." \r
+    toJson = id\r
+    fromJson = id\r
+\r
+@private\r
+readJsonObjectContents :: MList.T JsonField -> JsonParser -> <Proc> ()\r
+readJsonObjectContents result p =\r
+    match nextFieldName p with\r
+        Just name -> do\r
+            MList.add result $ JsonField name (readNextJson p)\r
+            readJsonObjectContents result p\r
+        Nothing -> ()\r
+\r
+/*\r
+@private\r
+makeTypeEqual :: a -> a -> ()\r
+makeTypeEqual _ _ = ()\r
+\r
+@private\r
+testValue :: Json a => Show a => Eq a => a -> <Proc> ()\r
+testValue v1 = do\r
+    v2 = toJsonString v1\r
+    v3 = fromJsonString v2\r
+    makeTypeEqual v1 v3\r
+    print "\(v1) -> \(v2) -> \(v3)"\r
+    if v1 != v3\r
+    then fail "Values differ"\r
+    else ()\r
+\r
+testGenericJson :: String -> <Proc> ()\r
+testGenericJson v1 = do\r
+    v2 = fromJsonString v1 :: Json\r
+    v3 = toJsonString v2\r
+    print "\(v1) -> \(v2) -> \(v3)"\r
+    if v1 != v3\r
+    then fail "Values differ"\r
+    else ()\r
+\r
+testIt :: <Proc> ()\r
+testIt = do\r
+    testValue "asd"\r
+    testValue True\r
+    testValue False\r
+    testValue (123 :: Short)\r
+    testValue (123 :: Integer)    \r
+    testValue (123 :: Long)\r
+    testValue (123 :: Double)\r
+    testValue (123 :: Float)\r
+    testValue (Nothing :: Maybe String)\r
+    testValue (Just "asd") \r
+    testValue ["a", "b", "c"] \r
+    testValue [[],["a"],["b","c"]]\r
+    testValue ()\r
+    testValue ("a", "b")\r
+    testValue ("a", "b", "c")\r
+    testValue ("a", "b", "c", "d")\r
+    testValue [Just "a", Nothing]\r
+    testValue [("a", "b"), ("c", "d")]\r
+    testValue (("a", "b"), ("c", "d"))\r
+     \r
+    testGenericJson "\"asd\""\r
+    testGenericJson "123"\r
+    testGenericJson "123.0"\r
+    testGenericJson "true"\r
+    testGenericJson "false"\r
+    testGenericJson "null"\r
+    testGenericJson "[1,2,3]"\r
+    testGenericJson "[[1],[2,3],[]]"\r
+    testGenericJson "{}"\r
+    testGenericJson "{\"a\":123,\"b\":[]}"\r
+    testGenericJson "{\"a\":{}}"\r
+*/
\ No newline at end of file
diff --git a/bundles/org.simantics.scl.data/scl/Data/Writer.scl b/bundles/org.simantics.scl.data/scl/Data/Writer.scl
new file mode 100644 (file)
index 0000000..4c526f2
--- /dev/null
@@ -0,0 +1,18 @@
+import "JavaBuiltin" as Java\r
+\r
+importJava "java.io.Writer" where\r
+    data Writer\r
+\r
+importJava "java.io.StringWriter" where\r
+    data StringWriter\r
+    \r
+    @JavaName "<init>"\r
+    createStringWriter :: <Proc> StringWriter\r
+\r
+    @JavaName toString\r
+    resultOfStringWriter :: StringWriter -> <Proc> String\r
+\r
+class WriterLike a where\r
+    toWriter :: a -> Writer\r
+instance WriterLike StringWriter where\r
+    toWriter = Java.unsafeCoerce
\ No newline at end of file
diff --git a/bundles/org.simantics.scl.runtime/scl/Expressions/Equations.scl b/bundles/org.simantics.scl.runtime/scl/Expressions/Equations.scl
deleted file mode 100644 (file)
index e695a00..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-effect Equation\r
-    "equation"\r
-    "org.simantics.scl.runtime.equations.EquationContext"\r
-\r
-importJava "org.simantics.scl.runtime.equations.EquationContext" where\r
-    data EquationContext\r
-\r
-    listenEquationVariable :: String -> (a -> <Equation,Proc> ()) -> <Equation,Proc> ()\r
-    setEquationVariable :: String -> a -> <Equation> ()\r
-    applyEquationContext :: (<Equation,Proc> a) -> EquationContext -> <Proc> a \r
-    \r
-importJava "org.simantics.scl.runtime.equations.TestEquationContext" where\r
-    solveEquations :: (<Equation,e> a) -> <e> [(String,String)]
\ No newline at end of file
index b29b04a2fe42025c1538637f10080c921e4bf12f..84f8b91b6ef97751ea1f0a00b053ece6e2e8be63 100644 (file)
@@ -1266,7 +1266,7 @@ instance MonadZero Maybe where
     mzero = Nothing
 
 instance MonadOr Maybe where
-    morelse (Just a) _ = Just a
+    morelse a@(Just _) _ = a
     morelse _ b = b
 
 "`execJust v f` executes the function `f` with parameter value `x`, if `v=Just x`. If `v=Nothing`, the function does nothing."
index 6b43166e3d34a355802ba00da2df0f1f457f36de..76ecece9c5b3e59ce39c63d766d16b0836bcccb2 100644 (file)
@@ -15,7 +15,7 @@ include "MSet" as MSet
 include "MList" as MList
 include "MMultiMap" as MMultiMap
 include "Coercion"
-include "Json2"
+//include "Json2"
 
 include "IterN" as Extra
 
diff --git a/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/EquationContext.java b/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/EquationContext.java
deleted file mode 100644 (file)
index f20dd6f..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-package org.simantics.scl.runtime.equations;\r
-\r
-import org.simantics.scl.runtime.SCLContext;\r
-import org.simantics.scl.runtime.function.Function;\r
-import org.simantics.scl.runtime.tuple.Tuple0;\r
-\r
-public interface EquationContext {\r
-    void listenEquationVariable(String variableName, Function callback);\r
-    void setEquationVariable(String variableName, Object value);\r
-    \r
-    public static Object applyEquationContext(Function f, EquationContext equationContext) {\r
-        SCLContext context = SCLContext.getCurrent();\r
-        Object oldEquationContext = context.put("equation", equationContext);\r
-        try {\r
-            return f.apply(Tuple0.INSTANCE);\r
-        } finally {\r
-            context.put("equation", oldEquationContext);\r
-        }\r
-    }\r
-}\r
diff --git a/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/TestEquationContext.java b/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/TestEquationContext.java
deleted file mode 100644 (file)
index 856ef28..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-package org.simantics.scl.runtime.equations;\r
-\r
-import java.util.ArrayList;\r
-import java.util.Collections;\r
-import java.util.List;\r
-\r
-import org.simantics.scl.runtime.SCLContext;\r
-import org.simantics.scl.runtime.function.Function;\r
-import org.simantics.scl.runtime.tuple.Tuple0;\r
-import org.simantics.scl.runtime.tuple.Tuple2;\r
-\r
-import gnu.trove.map.hash.THashMap;\r
-import gnu.trove.procedure.TObjectObjectProcedure;\r
-\r
-public class TestEquationContext implements EquationContext {\r
-\r
-    public static final boolean TRACE = true;\r
-    \r
-    THashMap<String, Object> values = new THashMap<String, Object>(); \r
-    THashMap<String, ArrayList<Function>> listenerMap = new THashMap<String, ArrayList<Function>>(); \r
-\r
-    @Override\r
-    public void listenEquationVariable(String variableName, Function listener) {\r
-        if(TRACE)\r
-            System.out.println("listenEquationVariable(" + variableName + ", " + listener + ")");\r
-        if(values.containsKey(variableName)) {\r
-            Object value = values.get(variableName);\r
-            if(TRACE)\r
-                System.out.println("    apply " + value);\r
-            listener.apply(value);\r
-        }\r
-        else {\r
-            if(TRACE)\r
-                System.out.println("    add listener");\r
-            ArrayList<Function> listeners = listenerMap.get(variableName);\r
-            if(listeners == null) {\r
-                listeners = new ArrayList<Function>();\r
-                listenerMap.put(variableName, listeners);\r
-            }\r
-            listeners.add(listener);\r
-        }\r
-    }\r
-\r
-    @Override\r
-    public void setEquationVariable(String variableName, Object value) {\r
-        if(TRACE)\r
-            System.out.println("setEquationVariable(" + variableName + ", " + value + ")");\r
-        if(values.containsKey(variableName))\r
-            throw new IllegalStateException("Value for " + variableName + " already defined (oldValue=" + values.get(variableName) + \r
-                    ", newValue=" + value + ").");\r
-        values.put(variableName, value);\r
-        ArrayList<Function> listeners = listenerMap.remove(variableName);\r
-        SCLContext context = SCLContext.getCurrent();\r
-        if(listeners != null) {\r
-            Object oldEquationContex = context.put("equation", this);\r
-            try {\r
-                for(Function listener : listeners) {\r
-                    if(TRACE)\r
-                        System.out.println("    apply " + listener + " " + value);\r
-                    listener.apply(value);\r
-                }\r
-            } finally {\r
-                context.put("equation", oldEquationContex);\r
-            }\r
-        }\r
-    }\r
-    \r
-    public static List<Tuple2> solveEquations(Function f) {\r
-        TestEquationContext equationContext = new TestEquationContext();\r
-        SCLContext context = SCLContext.getCurrent();\r
-        Object oldEquationContext = context.put("equation", equationContext);\r
-        try {\r
-            f.apply(Tuple0.INSTANCE);\r
-        } finally {\r
-            context.put("equation", oldEquationContext);\r
-        }\r
-        ArrayList<Tuple2> result = new ArrayList<Tuple2>(equationContext.values.size());\r
-        equationContext.values.forEachEntry(new TObjectObjectProcedure<String, Object>() {\r
-            @Override\r
-            public boolean execute(String a, Object b) {\r
-                result.add(new Tuple2(a, String.valueOf(b)));\r
-                return true;\r
-            }\r
-        });\r
-        Collections.sort(result, (t1, t2) -> {\r
-            return ((String)t1.c0).compareTo((String)t2.c0);\r
-        });\r
-        return result;\r
-    }\r
-    \r
-    public THashMap<String, Object> getValues() {\r
-        return values;\r
-    }\r
-\r
-}\r
index 5ba15703ccd5f6bb0b4a082fbae24848f43b3671..a0f65152b08e65a3f0c9dc9bf89a485673d0f9a6 100644 (file)
Binary files a/bundles/org.simantics.selectionview.ui.ontology/graph.tg and b/bundles/org.simantics.selectionview.ui.ontology/graph.tg differ
index c54a35e394e64a41290888b12ed426caca1f94c4..d27b280711409f9457cfbab07aa1158e67904e7c 100644 (file)
Binary files a/bundles/org.simantics.silk.ontology/graph.tg and b/bundles/org.simantics.silk.ontology/graph.tg differ
index b16b4cf7336756f7ef9f19958b812b8a7e3c6e98..b818692608a3a85eece13d2ff9f0e39504106f6b 100644 (file)
Binary files a/bundles/org.simantics.simulation.ontology/graph.tg and b/bundles/org.simantics.simulation.ontology/graph.tg differ
index 094d9f6156e352dd1d2138277909dd9c98371fa4..35f47e6bdacca642183ae60c65b54a20fb1a019a 100644 (file)
Binary files a/bundles/org.simantics.softwareconfiguration.ontology/graph.tg and b/bundles/org.simantics.softwareconfiguration.ontology/graph.tg differ
index 967272c074c03e5dd6f6624d5ed6a3fc60373b2f..a5eef2fb2265eaec3cb1921bface8a1e4a103578 100644 (file)
@@ -21,7 +21,8 @@ Require-Bundle: org.simantics.layer0.utils,
  org.simantics.datatypes.ontology;bundle-version="1.1.0",
  org.simantics.document.server.io;bundle-version="0.0.1",
  org.simantics.simulator.variable,
- org.apache.poi;bundle-version="3.10.0",
+ org.apache.poi;bundle-version="3.14.0",
+ org.apache.poi.ooxml;bundle-version="3.14.0",
  it.unimi.dsi.fastutil,
  org.simantics.structural.synchronization.client,
  org.simantics.db.indexing,
index 81aa67eaae38d20a9b7421d713414a874c416872..444054c8b23e088da2a1840a057aa4e2b29bc118 100644 (file)
@@ -398,7 +398,7 @@ public class ExcelImport {
             XSSFColor fontColor = xssfFont.getXSSFColor();\r
             if (fontColor != null) {\r
                 \r
-                byte[] rgb = fontColor.getRgbWithTint();\r
+                byte[] rgb = fontColor.getRGBWithTint();\r
                 \r
                 String ix = fontColor.getARGBHex();\r
                 RGB.Integer s = hex2Rgb(ix);\r
index 435b714aad59ba7566a8f450d01d2598bc7abf53..8ad7b0843382d31d31c47d0c02f35187d03d19d3 100644 (file)
Binary files a/bundles/org.simantics.spreadsheet.ontology/graph.tg and b/bundles/org.simantics.spreadsheet.ontology/graph.tg differ
index 28ffbc4654818328379e6005ad74d05ed3458035..fbe043eabb42745f08acf3d0de55a72a1e04defd 100644 (file)
Binary files a/bundles/org.simantics.structural.ontology/graph.tg and b/bundles/org.simantics.structural.ontology/graph.tg differ
index 5196e4a496e0c81dbe6ad8b3c83413af275fa094..59c26d0fef331293fa766997e2fd8e9744860d06 100644 (file)
Binary files a/bundles/org.simantics.user.ontology/graph.tg and b/bundles/org.simantics.user.ontology/graph.tg differ
index accbb69ba407abbf8699726a1c9749e5451406b3..05d60ae9b66d7635346e187c9b81cf3276d01a23 100644 (file)
@@ -12,4 +12,4 @@
 source.. = src/\r
 output.. = bin/\r
 bin.includes = META-INF/,\\r
-               .
\ No newline at end of file
+               .
index 3de5dabca89017ca91e4446d5885baef7208225e..d3163ee7b1ffe3c367ff1a474087a2733915c45d 100644 (file)
Binary files a/bundles/org.simantics.viewpoint.ontology/graph.tg and b/bundles/org.simantics.viewpoint.ontology/graph.tg differ
index a2ec73d89334f9a5a1ca4662daae6a2fc46acf23..fcd56da6f7dfd48aed01a3cb518377583062130d 100644 (file)
Binary files a/bundles/org.simantics.views.ontology/graph.tg and b/bundles/org.simantics.views.ontology/graph.tg differ
index 22afc5f43389c6eaa03a2ad45e79835230099b67..68a1b4c50e986f6a0f9d559ea6aeeda3b3f5cc4c 100644 (file)
Binary files a/bundles/org.simantics.workbench.ontology/graph.tg and b/bundles/org.simantics.workbench.ontology/graph.tg differ
index 4641096f897c74ba92b63b4fac16e5afff9ffd03..54ab690db0c2627a056ef9c6d2c03a78810608cf 100644 (file)
         <module>com.famfamfam.silk</module>\r
         <module>org.simantics</module>\r
         <module>org.simantics.action.ontology</module>\r
+        <module>org.simantics.acorn</module>\r
         <module>org.simantics.annotation.ontology</module>\r
         <module>org.simantics.annotation.ui</module>\r
         <module>org.simantics.application</module>\r
+        <module>org.simantics.backup</module>\r
+        <module>org.simantics.backup.db</module>\r
+        <module>org.simantics.backup.ontology</module>\r
         <module>org.simantics.basicexpression</module>\r
         <module>org.simantics.browsing.ui</module>\r
         <module>org.simantics.browsing.ui.common</module>\r
         <module>org.simantics.export.core</module>\r
         <module>org.simantics.export.ui</module>\r
         <module>org.simantics.fastlz</module>\r
+        <module>org.simantics.fileimport</module>\r
+        <module>org.simantics.fileimport.ui</module>\r
         <module>org.simantics.g2d</module>\r
         <module>org.simantics.g2d.ontology</module>\r
         <module>org.simantics.graph</module>\r
index 46281dc83d12bb22762d3d5e877aac30c590ae6e..c4130baed5fd59eff0d3ac20d556edc8d70a37d2 100644 (file)
@@ -1 +1 @@
-/target/**\r
+/target/**
index 46281dc83d12bb22762d3d5e877aac30c590ae6e..c4130baed5fd59eff0d3ac20d556edc8d70a37d2 100644 (file)
@@ -1 +1 @@
-/target/**\r
+/target/**
index 65acfa3903b2059fb9554576fc578421bffa8a98..32d412a74fb649d291c06cf0f5287cb624db1280 100644 (file)
       [Enter License Description here.]\r
    </license>\r
 \r
-   <includes\r
-         id="org.jfree"\r
-         version="0.0.0"/>\r
-\r
    <includes\r
          id="org.simantics.g2d"\r
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.event"\r
+         id="org.simantics.event.feature"\r
          version="0.0.0"/>\r
 \r
    <plugin\r
index d6101139cacbbb1c5df0ad01a1c0990d71b4e387..14ecb0bf65a4112810ed8297ecb8f571d789dad5 100644 (file)
          install-size="0"\r
          version="0.0.0"/>\r
 \r
+   <plugin\r
+         id="org.simantics.acorn"\r
+         download-size="0"\r
+         install-size="0"\r
+         version="0.0.0"\r
+         unpack="false"/>\r
+\r
    <plugin\r
          id="org.simantics.databoard"\r
          download-size="0"\r
          version="0.0.0"\r
          unpack="false"/>\r
 \r
+   <plugin\r
+         id="org.simantics.backup"\r
+         download-size="0"\r
+         install-size="0"\r
+         version="0.0.0"\r
+         unpack="false"/>\r
+\r
 </feature>\r
index d29ccac34a20301c44f3f992b24370c7734352e5..ee8db3761bf14dc5390f6b5f49166d5b4c6655e9 100644 (file)
@@ -26,7 +26,7 @@
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.issues"\r
+         id="org.simantics.issues.feature"\r
          version="0.0.0"/>\r
 \r
    <includes\r
@@ -34,7 +34,7 @@
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.issues.ui"\r
+         id="org.simantics.issues.ui.feature"\r
          version="0.0.0"/>\r
 \r
    <includes\r
diff --git a/features/org.simantics.eclipsec.launcher.feature/bin/win32/win32/x86/eclipsec.exe b/features/org.simantics.eclipsec.launcher.feature/bin/win32/win32/x86/eclipsec.exe
new file mode 100644 (file)
index 0000000..e5c6f3b
Binary files /dev/null and b/features/org.simantics.eclipsec.launcher.feature/bin/win32/win32/x86/eclipsec.exe differ
diff --git a/features/org.simantics.eclipsec.launcher.feature/bin/win32/win32/x86_64/eclipsec.exe b/features/org.simantics.eclipsec.launcher.feature/bin/win32/win32/x86_64/eclipsec.exe
new file mode 100644 (file)
index 0000000..ffc52a8
Binary files /dev/null and b/features/org.simantics.eclipsec.launcher.feature/bin/win32/win32/x86_64/eclipsec.exe differ
index 30e90c89299f42723c66e63b8693d53c51f45b19..ef8dccc6329e0e470c7905e8f95c16e5cf188e11 100644 (file)
@@ -12,4 +12,4 @@
 ###############################################################################\r
 bin.includes=feature.xml\r
 root.win32.win32.x86=bin/win32/win32/x86\r
-root.win32.win32.x86_64=bin/win32/win32/x86_64
\ No newline at end of file
+root.win32.win32.x86_64=bin/win32/win32/x86_64\r
index 5d129d61925875875f7fdbd8750a6a0dfc22c20e..54796e239f926ab2dbb96b225c3e1842f3a64083 100644 (file)
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>\r
 <feature\r
-      id="org.simantics.event"\r
+      id="org.simantics.event.feature"\r
       label="Event Log Feature"\r
       version="0.0.1.qualifier"\r
       provider-name="Semantum Oy">\r
index 060e326e097b2f46caafed78a6e7634339c0d462..e3e9eb0d30951352c6f82dd0c54ce88dafea4a88 100644 (file)
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.jfree"\r
+         id="org.simantics.views.swt.feature"\r
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.views.swt"\r
+         id="org.jfree"\r
          version="0.0.0"/>\r
 \r
    <plugin\r
          version="0.0.0"\r
          unpack="false"/>\r
 \r
-   <plugin\r
-         id="net.sf.cglib"\r
-         download-size="0"\r
-         install-size="0"\r
-         version="0.0.0"\r
-         unpack="false"/>\r
-\r
    <plugin\r
          id="org.objectweb.asm"\r
          download-size="0"\r
index 78aa3eed4f5306a5e554ba1175516854387e5f29..fc37ff353ad12076a1630f8e78065a8d441644b0 100644 (file)
@@ -11,7 +11,7 @@
         VTT Technical Research Centre of Finland - initial API and implementation
  -->\r
 <feature\r
-      id="org.simantics.issues"\r
+      id="org.simantics.issues.feature"\r
       label="Issue Subsystem"\r
       version="1.1.0.qualifier"\r
       provider-name="VTT Technical Research Centre of Finland">\r
index 035aa296cc78a0aae720a0080ffd9dc6f5ae8a4b..71b86dfd0f6d8dab17ce351532fffb8aa068865c 100644 (file)
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>\r
 <feature\r
-      id="org.simantics.issues.ui"\r
+      id="org.simantics.issues.ui.feature"\r
       label="Issue UI Feature"\r
       version="1.1.1.qualifier"\r
       provider-name="Semantum Oy">\r
@@ -18,7 +18,7 @@
    </license>\r
 \r
    <includes\r
-         id="org.simantics.issues"\r
+         id="org.simantics.issues.feature"\r
          version="0.0.0"/>\r
 \r
    <plugin\r
index e77581d801a439f5a6b979d12b241063c702639e..fa10579e32adfb7f03ef54e241b23db22999b888 100644 (file)
       Licensed under Eclipse Public License (EPL) 1.0.\r
    </license>\r
 \r
-   <includes\r
-         id="org.jfree"\r
-         version="0.0.0"/>\r
-\r
    <includes\r
          id="com.lowagie.text"\r
          version="0.0.0"/>\r
index c3efa01b5827ad8f45913d35bddd747ca5dac4a0..000aad2c7fb878784bcc02da0ac6cd91addb8b37 100644 (file)
@@ -32,7 +32,7 @@ reusable components implemented on top of the platform.
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.utils.ui"\r
+         id="org.simantics.utils.ui.feature"\r
          version="0.0.0"/>\r
 \r
    <includes\r
@@ -56,7 +56,7 @@ reusable components implemented on top of the platform.
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.wiki.ui"\r
+         id="org.simantics.wiki.ui.feature"\r
          version="0.0.0"/>\r
 \r
    <includes\r
index ee4a899a1b96203e929900913ac656a1e0a93700..ed48ad311a546fd0dc75fbfefff77bcbc22d8e16 100644 (file)
@@ -49,7 +49,7 @@
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.utils.ui"\r
+         id="org.simantics.utils.ui.feature"\r
          version="0.0.0"/>\r
 \r
    <includes\r
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.issues"\r
+         id="org.simantics.issues.feature"\r
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.issues.ui"\r
+         id="org.simantics.issues.ui.feature"\r
+         version="0.0.0"/>\r
+\r
+   <includes\r
+         id="org.simantics.views.swt.client.feature"\r
+         version="0.0.0"/>\r
+
+   <includes\r
+         id="org.simantics.views.swt.feature"\r
          version="0.0.0"/>\r
 \r
    <includes\r
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.event"\r
+         id="org.simantics.event.feature"\r
          version="0.0.0"/>\r
 \r
    <includes\r
          version="0.0.0"/>\r
 \r
    <includes\r
-         id="org.simantics.wiki.ui"\r
+         id="org.simantics.wiki.ui.feature"\r
          version="0.0.0"/>\r
 \r
    <includes\r
index 20b434dc5ed85c6653d2dbad0e1cb23dd56bf33b..dd1af072701285d81d48650d84e172bb800ee373 100644 (file)
@@ -121,4 +121,18 @@ This Agreement is governed by the laws of the State of New York and the intellec
          version="0.0.0"\r
          unpack="false"/>\r
 \r
+   <plugin\r
+         id="org.apache.poi.ooxml"\r
+         download-size="0"\r
+         install-size="0"\r
+         version="0.0.0"\r
+         unpack="false"/>\r
+\r
+   <plugin\r
+         id="org.apache.poi.ooxml-schemas"\r
+         download-size="0"\r
+         install-size="0"\r
+         version="0.0.0"\r
+         unpack="false"/>\r
+\r
 </feature>\r
index 3892f7d0865aeac36e372f7cbd783d4af8a46aca..b81e2e567e8fd6337f569dc28878becdae467388 100644 (file)
@@ -1,14 +1,14 @@
 <?xml version="1.0" encoding="UTF-8"?>\r
-<!--
-    Copyright (c) 2007, 2010 Association for Decentralized Information Management
-    in Industry THTH ry.
-    All rights reserved. This program and the accompanying materials
-    are made available under the terms of the Eclipse Public License v1.0
-    which accompanies this distribution, and is available at
-    http://www.eclipse.org/legal/epl-v10.html
-   
-    Contributors:
-        VTT Technical Research Centre of Finland - initial API and implementation
+<!--\r
+    Copyright (c) 2007, 2010 Association for Decentralized Information Management\r
+    in Industry THTH ry.\r
+    All rights reserved. This program and the accompanying materials\r
+    are made available under the terms of the Eclipse Public License v1.0\r
+    which accompanies this distribution, and is available at\r
+    http://www.eclipse.org/legal/epl-v10.html\r
+   \r
+    Contributors:\r
+        VTT Technical Research Centre of Finland - initial API and implementation\r
  -->\r
 <feature\r
       id="org.simantics.utils"\r
       [Enter License Description here.]\r
    </license>\r
 \r
-   <includes\r
-         id="org.apache.commons"\r
-         version="0.0.0"/>\r
-\r
    <requires>\r
       <import plugin="org.eclipse.core.runtime"/>\r
       <import plugin="org.eclipse.core.resources"/>\r
    </requires>\r
 \r
    <plugin\r
-         id="org.simantics.utils"\r
+         id="org.apache.commons.collections"\r
          download-size="0"\r
          install-size="0"\r
          version="0.0.0"\r
          unpack="false"/>\r
 \r
    <plugin\r
-         id="org.simantics.utils.datastructures"\r
+         id="org.apache.commons.io"\r
          download-size="0"\r
          install-size="0"\r
          version="0.0.0"\r
          unpack="false"/>\r
 \r
    <plugin\r
-         id="org.simantics.utils.thread"\r
+         id="org.simantics.utils"\r
          download-size="0"\r
          install-size="0"\r
          version="0.0.0"\r
          unpack="false"/>\r
 \r
    <plugin\r
-         id="xtc.parser.runtime"\r
+         id="org.simantics.utils.datastructures"\r
+         download-size="0"\r
+         install-size="0"\r
+         version="0.0.0"\r
+         unpack="false"/>\r
+\r
+   <plugin\r
+         id="org.simantics.utils.thread"\r
          download-size="0"\r
          install-size="0"\r
          version="0.0.0"\r
index e5cc91c1ea8df99726e171252d931dbb51b75b9f..6924e60acaa672050fa2ac1b029200b448579671 100644 (file)
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>\r
 <feature\r
-      id="org.simantics.utils.ui"\r
+      id="org.simantics.utils.ui.feature"\r
       label="UI Utils Feature"\r
       version="1.25.0.qualifier"\r
       provider-name="VTT Technical Research Centre of Finland">\r
index ae293d67491c2ad6e8924682c448d94dff8fddb0..d8b3cc81f27c88dddb71da2c5d407ae34509221b 100644 (file)
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>\r
 <feature\r
-      id="org.simantics.views.swt.client"\r
+      id="org.simantics.views.swt.client.feature"\r
       label="SWT views client"\r
       version="1.0.1.qualifier"\r
       provider-name="Semantum Oy">\r
index f94557de7183356f2a04380945f159ab921e84dd..e80360cf625ae6c3846b110a09e4fabe92f32976 100644 (file)
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>\r
 <feature\r
-      id="org.simantics.views.swt"\r
+      id="org.simantics.views.swt.feature"\r
       label="SWT views"\r
       version="1.0.1.qualifier"\r
       provider-name="Semantum Oy">\r
       [Enter License Description here.]\r
    </license>\r
 \r
-   <plugin\r
-         id="org.simantics.views"\r
-         download-size="0"\r
-         install-size="0"\r
-         version="0.0.0"\r
-         unpack="false"/>\r
+   <includes\r
+         id="org.simantics.views.swt.client.feature"\r
+         version="0.0.0"/>\r
 \r
    <plugin\r
          id="org.simantics.views.ontology"\r
          version="0.0.0"\r
          unpack="false"/>\r
 \r
-   <plugin\r
-         id="org.simantics.views.swt.client"\r
-         download-size="0"\r
-         install-size="0"\r
-         version="0.0.0"\r
-         unpack="false"/>\r
-\r
 </feature>\r
index 8284fb3c0d7bbb62504577228952f662780c1c48..5569344e7ce964d83a336ed13a3cc10fbfdd9a3d 100644 (file)
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>\r
 <feature\r
-      id="org.simantics.wiki.ui"\r
+      id="org.simantics.wiki.ui.feature"\r
       label="Wiki Documentation Feature"\r
       version="1.25.0.qualifier"\r
       provider-name="Semantum Oy">\r
index 4e17e37653c0c5186996357becc7062c054ce683..361c0cdcdbb430ebd34334a2a5269618720970d6 100644 (file)
                                     <id>net.java.dev.jna:jna-platform:4.2.2</id>\r
                                     <source>true</source>\r
                                 </artifact>\r
+                                <artifact>\r
+                                    <id>com.fasterxml.jackson.core:jackson-core:2.8.2</id>\r
+                                    <source>true</source>\r
+                                </artifact>\r
                             </artifacts>\r
                         </configuration>\r
                     </execution>\r
diff --git a/releng/org.simantics.sdk.build.targetdefinition/org.simantics.sdk.build.targetdefinition-semantum.target b/releng/org.simantics.sdk.build.targetdefinition/org.simantics.sdk.build.targetdefinition-semantum.target
new file mode 100644 (file)
index 0000000..0410ad6
--- /dev/null
@@ -0,0 +1,194 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>\r
+<?pde version="3.8"?><target name="Eclipse Mars.2" sequenceNumber="141">\r
+<locations>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
+<unit id="org.eclipse.mylyn.wikitext_feature.feature.group" version="2.7.0.v20151015-1452"/>\r
+<unit id="org.eclipse.emf.sdk.feature.group" version="2.11.2.v20160208-0841"/>\r
+<unit id="org.eclipse.epp.mpc.feature.group" version="1.4.2.v20160210-2005"/>\r
+<repository location="http://dev.simupedia.com/download/mars"/>\r
+</location>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
+<unit id="org.eclipse.ecf.core.ssl.feature.feature.group" version="1.1.0.v20151130-0157"/>\r
+<unit id="org.eclipse.rcp.source.feature.group" version="4.5.2.v20160212-1500"/>\r
+<unit id="org.eclipse.e4.core.tools.feature.feature.group" version="4.5.1.v20160129-0959"/>\r
+<unit id="org.eclipse.ecf.filetransfer.source.feature.feature.group" version="3.12.0.v20151130-0157"/>\r
+<unit id="org.eclipse.ecf.filetransfer.ssl.feature.feature.group" version="1.1.0.v20151130-0157"/>\r
+<unit id="org.eclipse.help.feature.group" version="2.1.2.v20160212-1500"/>\r
+<unit id="org.eclipse.ecf.filetransfer.httpclient4.source.feature.feature.group" version="3.12.0.v20151130-0157"/>\r
+<unit id="org.eclipse.e4.core.tools.feature.source.feature.group" version="4.5.1.v20160129-0959"/>\r
+<unit id="org.eclipse.swt.tools.feature.feature.group" version="3.104.2.v20160128-0900"/>\r
+<unit id="org.eclipse.ecf.filetransfer.httpclient4.feature.feature.group" version="3.12.0.v20151130-0157"/>\r
+<unit id="org.eclipse.ecf.filetransfer.ssl.source.feature.feature.group" version="1.1.0.v20151130-0157"/>\r
+<unit id="org.eclipse.platform.feature.group" version="4.5.2.v20160212-1500"/>\r
+<unit id="org.eclipse.ecf.core.ssl.source.feature.feature.group" version="1.1.0.v20151130-0157"/>\r
+<unit id="org.eclipse.rcp.feature.group" version="4.5.2.v20160212-1500"/>\r
+<unit id="org.eclipse.equinox.p2.discovery.feature.feature.group" version="1.0.300.v20150430-1836"/>\r
+<unit id="org.eclipse.ecf.core.feature.feature.group" version="1.2.0.v20151130-0157"/>\r
+<unit id="org.eclipse.ecf.core.source.feature.feature.group" version="1.2.0.v20151130-0157"/>\r
+<unit id="org.eclipse.equinox.sdk.feature.group" version="3.11.2.v20160202-2102"/>\r
+<unit id="org.eclipse.platform.source.feature.group" version="4.5.2.v20160212-1500"/>\r
+<unit id="org.eclipse.releng.tools.feature.group" version="3.6.0.v20150527-0145"/>\r
+<unit id="org.eclipse.ecf.filetransfer.feature.feature.group" version="3.12.0.v20151130-0157"/>\r
+<unit id="org.eclipse.sdk.ide" version="4.5.2.M20160212-1500"/>\r
+<unit id="org.eclipse.ecf.filetransfer.httpclient4.ssl.feature.feature.group" version="1.1.0.v20151130-0157"/>\r
+<unit id="org.eclipse.help.source.feature.group" version="2.1.2.v20160212-1500"/>\r
+<unit id="org.eclipse.ecf.filetransfer.httpclient4.ssl.source.feature.feature.group" version="1.1.0.v20151130-0157"/>\r
+<repository location="http://dev.simupedia.com/download/eclipse/updates/4.5"/>\r
+</location>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
+<unit id="org.eclipse.nebula.effects.stw.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.visualization.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.calendarcombo.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.gallery.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.cwt.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.oscilloscope.feature.feature.group" version="1.2.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.pshelf.css.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.ganttchart.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.xviewer.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.compositetable.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.grid.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.cdatetime.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.pshelf.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.richtext.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.tablecombo.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.paperclips.feature.feature.group" version="2.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.examples.release.feature.feature.group" version="1.0.4.201605182147"/>\r
+<unit id="org.eclipse.nebula.widgets.pgroup.feature.feature.group" version="1.0.0.201605182147"/>\r
+<repository location="http://dev.simupedia.com/download/nebula/Q22016/release/"/>\r
+</location>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
+<unit id="org.eclipse.e4.tools.spies.feature.feature.group" version="0.17.0.v20160811-1516"/>\r
+<repository location="http://dev.simupedia.com/download/e4/snapshots/org.eclipse.e4.tools/latest/"/>\r
+</location>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
+<unit id="org.eclipse.nebula.incubation.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.examples.incubation.feature.feature.group" version="1.0.0.201605182147"/>\r
+<repository location="http://dev.simupedia.com/download/nebula/Q22016/incubation"/>\r
+</location>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
+<unit id="org.eclipse.nebula.widgets.nattable.core.feature.feature.group" version="1.4.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.e4.source.feature.feature.group" version="1.0.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.poi.feature.feature.group" version="1.4.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.glazedlists.feature.feature.group" version="1.4.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.core.source.feature.feature.group" version="1.4.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.glazedlists.source.feature.feature.group" version="1.4.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.nebula.feature.feature.group" version="1.0.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.e4.feature.feature.group" version="1.0.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.nebula.source.feature.feature.group" version="1.0.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.poi.source.feature.feature.group" version="1.4.0.201606011907"/>\r
+<repository location="http://dev.simupedia.com/download/nebula/nattable/releases/1.4.0/repository/"/>\r
+</location>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
+<unit id="org.apache.commons.lang.source" version="2.6.0"/>\r
+<unit id="org.antlr.runtime.source" version="3.5.2"/>\r
+<unit id="org.apache.commons.compress" version="1.12.0"/>\r
+<unit id="org.apache.pdfbox.source" version="2.0.2"/>\r
+<unit id="org.bouncycastle.bcmail-jdk14.source" version="1.38.0"/>\r
+<unit id="org.ini4j" version="0.5.4"/>\r
+<unit id="org.jdom2.source" version="2.0.6"/>\r
+<unit id="org.apache.commons.logging" version="1.2.0"/>\r
+<unit id="org.apache.pdfbox.xmpbox.source" version="2.0.2"/>\r
+<unit id="net.ucanaccess" version="3.0.6"/>\r
+<unit id="gnu.trove2" version="2.1.0"/>\r
+<unit id="net.ucanaccess.source" version="3.0.6"/>\r
+<unit id="org.eclipse.collections.eclipse-collections.source" version="7.1.0"/>\r
+<unit id="freemarker" version="2.3.23.stable"/>\r
+<unit id="com.github.virtuald.curvesapi.source" version="1.3.0"/>\r
+<unit id="org.apache.commons.collections.source" version="3.2.2"/>\r
+<unit id="org.apache.pdfbox.fontbox" version="2.0.2"/>\r
+<unit id="org.apache.commons.compress.source" version="1.12.0"/>\r
+<unit id="org.apache.lucene4.queryparser.source" version="4.9.0.b0003"/>\r
+<unit id="org.mozilla.rhino.source" version="1.7.7.1"/>\r
+<unit id="freemarker.source" version="2.3.23.stable"/>\r
+<unit id="jakarta-regexp" version="1.4.0"/>\r
+<unit id="net.jcip.annotations.source" version="1.0.0"/>\r
+<unit id="org.apache.log4j.source" version="1.2.17"/>\r
+<unit id="org.apache.commons.logging.source" version="1.2.0"/>\r
+<unit id="org.apache.commons.io.source" version="1.4.0"/>\r
+<unit id="com.healthmarketscience.jackcess" version="2.1.3"/>\r
+<unit id="org.bouncycastle.bctsp-jdk14.source" version="1.38.0"/>\r
+<unit id="org.eclipse.collections.eclipse-collections-api" version="7.1.0"/>\r
+<unit id="gnu.trove3.source" version="3.0.3"/>\r
+<unit id="it.unimi.dsi.fastutil.source" version="7.0.12"/>\r
+<unit id="org.hsqldb.hsqldb.source" version="2.3.1"/>\r
+<unit id="org.apache.poi.source" version="3.14.0"/>\r
+<unit id="org.apache.lucene4.sandbox.source" version="4.9.0.b0003"/>\r
+<unit id="org.apache.xmlbeans" version="2.6.0"/>\r
+<unit id="org.supercsv" version="2.4.0"/>\r
+<unit id="org.mozilla.rhino" version="1.7.7.1"/>\r
+<unit id="org.apache.pdfbox.xmpbox" version="2.0.2"/>\r
+<unit id="org.apache.commons.codec" version="1.10.0"/>\r
+<unit id="com.healthmarketscience.jackcess.source" version="2.1.3"/>\r
+<unit id="org.eclipse.collections.eclipse-collections-api.source" version="7.1.0"/>\r
+<unit id="org.bouncycastle.bcprov-jdk14.source" version="1.38.0"/>\r
+<unit id="stax.api" version="1.0.1"/>\r
+<unit id="org.apache.pdfbox.fontbox.source" version="2.0.2"/>\r
+<unit id="it.unimi.dsi.fastutil" version="7.0.12"/>\r
+<unit id="com.lowagie.text.source" version="2.1.7.b1"/>\r
+<unit id="net.jcip.annotations" version="1.0.0"/>\r
+<unit id="org.apache.lucene4.queryparser" version="4.9.0.b0003"/>\r
+<unit id="org.bouncycastle.bctsp-jdk14" version="1.38.0"/>\r
+<unit id="org.jfree.jchart.source" version="1.0.19"/>\r
+<unit id="com.lowagie.text" version="2.1.7.b1"/>\r
+<unit id="org.apache.poi.ooxml-schemas" version="3.14.0"/>\r
+<unit id="org.bouncycastle.bcprov-jdk14" version="1.38.0"/>\r
+<unit id="com.sun.jna" version="4.2.2"/>\r
+<unit id="org.apache.poi" version="3.14.0"/>\r
+<unit id="org.jdom2" version="2.0.6"/>\r
+<unit id="org.apache.commons.lang" version="2.6.0"/>\r
+<unit id="org.jfree.jchart" version="1.0.19"/>\r
+<unit id="org.apache.lucene4.sandbox" version="4.9.0.b0003"/>\r
+<unit id="org.apache.commons.codec.source" version="1.10.0"/>\r
+<unit id="org.supercsv.source" version="2.4.0"/>\r
+<unit id="com.github.virtuald.curvesapi" version="1.3.0"/>\r
+<unit id="org.eclipse.collections.eclipse-collections" version="7.1.0"/>\r
+<unit id="org.apache.log4j" version="1.2.17"/>\r
+<unit id="org.apache.lucene4.queries.source" version="4.9.0.b0003"/>\r
+<unit id="com.fasterxml.jackson.core.jackson-core.source" version="2.8.2"/>\r
+<unit id="org.apache.lucene4.core.source" version="4.9.0.b0003"/>\r
+<unit id="com.sun.jna.source" version="4.2.2"/>\r
+<unit id="org.apache.poi.ooxml" version="3.14.0"/>\r
+<unit id="com.sun.jna.platform" version="4.2.2"/>\r
+<unit id="org.apache.lucene4.analyzers-common.source" version="4.9.0.b0003"/>\r
+<unit id="org.apache.pdfbox" version="2.0.2"/>\r
+<unit id="org.bouncycastle.bcmail-jdk14" version="1.38.0"/>\r
+<unit id="org.jfree.jcommon" version="1.0.23"/>\r
+<unit id="org.apache.lucene4.analyzers-common" version="4.9.0.b0003"/>\r
+<unit id="org.apache.commons.collections" version="3.2.2"/>\r
+<unit id="org.apache.poi.ooxml.source" version="3.14.0"/>\r
+<unit id="com.fasterxml.jackson.core.jackson-core" version="2.8.2"/>\r
+<unit id="org.apache.lucene4.core" version="4.9.0.b0003"/>\r
+<unit id="gnu.trove2.source" version="2.1.0"/>\r
+<unit id="gnu.trove3" version="3.0.3"/>\r
+<unit id="org.antlr.runtime" version="3.5.2"/>\r
+<unit id="org.jfree.jcommon.source" version="1.0.23"/>\r
+<unit id="org.hsqldb.hsqldb" version="2.3.1"/>\r
+<unit id="org.apache.commons.io" version="1.4.0"/>\r
+<unit id="bouncycastle.bcmail-jdk14" version="138.0.0"/>\r
+<unit id="org.apache.lucene4.queries" version="4.9.0.b0003"/>\r
+<unit id="com.sun.jna.platform.source" version="4.2.2"/>\r
+<unit id="javax.vecmath" version="1.5.2"/>\r
+<unit id="bouncycastle.bcprov-jdk14" version="138.0.0"/>\r
+<unit id="org.ini4j.source" version="0.5.4"/>\r
+<repository location="http://dev.simupedia.com/download/master/external-components"/>\r
+</location>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
+<unit id="org.objectweb.asm.xml.source" version="5.0.1.v201404251740"/>\r
+<unit id="org.objectweb.asm.util.source" version="5.0.1.v201404251740"/>\r
+<unit id="org.objectweb.asm.source" version="5.0.1.v201404251740"/>\r
+<unit id="com.google.guava" version="15.0.0.v201403281430"/>\r
+<unit id="org.objectweb.asm.util" version="5.0.1.v201404251740"/>\r
+<unit id="org.objectweb.asm.xml" version="5.0.1.v201404251740"/>\r
+<unit id="com.google.guava.source" version="15.0.0.v201403281430"/>\r
+<unit id="org.objectweb.asm" version="5.0.1.v201404251740"/>\r
+<unit id="org.objectweb.asm.commons" version="5.0.1.v201404251740"/>\r
+<unit id="org.objectweb.asm.analysis.source" version="5.0.1.v201505121915"/>\r
+<unit id="org.objectweb.asm.commons.source" version="5.0.1.v201404251740"/>\r
+<unit id="org.objectweb.asm.tree.source" version="5.0.1.v201404251740"/>\r
+<unit id="org.objectweb.asm.analysis" version="5.0.1.v201505121915"/>\r
+<unit id="org.objectweb.asm.tree" version="5.0.1.v201404251740"/>\r
+<repository location="http://dev.simupedia.com/download/tools/orbit/downloads/drops/R20160221192158/repository/"/>\r
+</location>\r
+</locations>\r
+</target>\r
index 34bfa95c2f088ef40d5d6dcd23662ebc9e7c0006..7d5f0d45c036ed959a97428da071eaba51c28739 100644 (file)
@@ -1,5 +1,5 @@
 <?xml version="1.0" encoding="UTF-8" standalone="no"?>\r
-<?pde version="3.8"?><target name="Eclipse Mars.2" sequenceNumber="127">\r
+<?pde version="3.8"?><target name="Eclipse Mars.2" sequenceNumber="128">\r
 <locations>\r
 <location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
 <unit id="org.objectweb.asm.xml.source" version="5.0.1.v201404251740"/>\r
 <repository location="http://www.simantics.org/update/nebula/Q22016/release/"/>\r
 </location>\r
 <location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
-<unit id="org.apache.lucene4.queries" version="4.9.0.b0003"/>\r
+<unit id="org.eclipse.e4.tools.spies.feature.feature.group" version="0.17.0.v20160811-1516"/>\r
+<repository location="http://www.simantics.org/update/e4/snapshots/org.eclipse.e4.tools/latest/"/>\r
+</location>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
+<unit id="org.eclipse.nebula.incubation.feature.feature.group" version="1.0.0.201605182147"/>\r
+<unit id="org.eclipse.nebula.examples.incubation.feature.feature.group" version="1.0.0.201605182147"/>\r
+<repository location="http://www.simantics.org/update/nebula/Q22016/incubation"/>\r
+</location>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
+<unit id="org.eclipse.nebula.widgets.nattable.core.feature.feature.group" version="1.4.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.e4.source.feature.feature.group" version="1.0.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.poi.feature.feature.group" version="1.4.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.glazedlists.feature.feature.group" version="1.4.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.core.source.feature.feature.group" version="1.4.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.glazedlists.source.feature.feature.group" version="1.4.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.nebula.feature.feature.group" version="1.0.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.e4.feature.feature.group" version="1.0.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.nebula.source.feature.feature.group" version="1.0.0.201606011907"/>\r
+<unit id="org.eclipse.nebula.widgets.nattable.extension.poi.source.feature.feature.group" version="1.4.0.201606011907"/>\r
+<repository location="http://www.simantics.org/update/nebula/nattable/releases/1.4.0/repository/"/>\r
+</location>\r
+<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
 <unit id="org.apache.commons.lang.source" version="2.6.0"/>\r
-<unit id="org.apache.lucene4.sandbox.source" version="4.9.0.b0003"/>\r
 <unit id="org.antlr.runtime.source" version="3.5.2"/>\r
 <unit id="org.apache.commons.compress" version="1.12.0"/>\r
 <unit id="org.apache.pdfbox.source" version="2.0.2"/>\r
 <unit id="org.ini4j" version="0.5.4"/>\r
 <unit id="org.jdom2.source" version="2.0.6"/>\r
 <unit id="org.apache.commons.logging" version="1.2.0"/>\r
+<unit id="org.apache.pdfbox.xmpbox.source" version="2.0.2"/>\r
 <unit id="net.ucanaccess" version="3.0.6"/>\r
 <unit id="gnu.trove2" version="2.1.0"/>\r
 <unit id="net.ucanaccess.source" version="3.0.6"/>\r
 <unit id="org.eclipse.collections.eclipse-collections.source" version="7.1.0"/>\r
 <unit id="freemarker" version="2.3.23.stable"/>\r
 <unit id="com.github.virtuald.curvesapi.source" version="1.3.0"/>\r
-<unit id="org.apache.lucene4.analyzers-common" version="4.9.0.b0003"/>\r
 <unit id="org.apache.commons.collections.source" version="3.2.2"/>\r
 <unit id="org.apache.pdfbox.fontbox" version="2.0.2"/>\r
 <unit id="org.apache.commons.compress.source" version="1.12.0"/>\r
+<unit id="org.apache.lucene4.queryparser.source" version="4.9.0.b0003"/>\r
 <unit id="org.mozilla.rhino.source" version="1.7.7.1"/>\r
-<unit id="org.apache.lucene4.queries.source" version="4.9.0.b0003"/>\r
 <unit id="freemarker.source" version="2.3.23.stable"/>\r
 <unit id="jakarta-regexp" version="1.4.0"/>\r
 <unit id="net.jcip.annotations.source" version="1.0.0"/>\r
 <unit id="org.apache.log4j.source" version="1.2.17"/>\r
-<unit id="org.apache.lucene4.sandbox" version="4.9.0.b0003"/>\r
 <unit id="org.apache.commons.logging.source" version="1.2.0"/>\r
 <unit id="org.apache.commons.io.source" version="1.4.0"/>\r
-<unit id="org.apache.lucene4.core" version="4.9.0.b0003"/>\r
 <unit id="com.healthmarketscience.jackcess" version="2.1.3"/>\r
 <unit id="org.bouncycastle.bctsp-jdk14.source" version="1.38.0"/>\r
 <unit id="org.eclipse.collections.eclipse-collections-api" version="7.1.0"/>\r
 <unit id="it.unimi.dsi.fastutil.source" version="7.0.12"/>\r
 <unit id="org.hsqldb.hsqldb.source" version="2.3.1"/>\r
 <unit id="org.apache.poi.source" version="3.14.0"/>\r
+<unit id="org.apache.lucene4.sandbox.source" version="4.9.0.b0003"/>\r
 <unit id="org.apache.xmlbeans" version="2.6.0"/>\r
 <unit id="org.supercsv" version="2.4.0"/>\r
 <unit id="org.mozilla.rhino" version="1.7.7.1"/>\r
+<unit id="org.apache.pdfbox.xmpbox" version="2.0.2"/>\r
 <unit id="org.apache.commons.codec" version="1.10.0"/>\r
 <unit id="com.healthmarketscience.jackcess.source" version="2.1.3"/>\r
 <unit id="org.eclipse.collections.eclipse-collections-api.source" version="7.1.0"/>\r
 <unit id="org.bouncycastle.bcprov-jdk14.source" version="1.38.0"/>\r
 <unit id="stax.api" version="1.0.1"/>\r
 <unit id="org.apache.pdfbox.fontbox.source" version="2.0.2"/>\r
-<unit id="org.apache.lucene4.analyzers-common.source" version="4.9.0.b0003"/>\r
 <unit id="it.unimi.dsi.fastutil" version="7.0.12"/>\r
+<unit id="com.lowagie.text.source" version="2.1.7.b1"/>\r
 <unit id="net.jcip.annotations" version="1.0.0"/>\r
+<unit id="org.apache.lucene4.queryparser" version="4.9.0.b0003"/>\r
 <unit id="org.bouncycastle.bctsp-jdk14" version="1.38.0"/>\r
 <unit id="org.jfree.jchart.source" version="1.0.19"/>\r
-<unit id="org.apache.lucene4.queryparser" version="4.9.0.b0003"/>\r
+<unit id="com.lowagie.text" version="2.1.7.b1"/>\r
 <unit id="org.apache.poi.ooxml-schemas" version="3.14.0"/>\r
 <unit id="org.bouncycastle.bcprov-jdk14" version="1.38.0"/>\r
+<unit id="com.sun.jna" version="4.2.2"/>\r
 <unit id="org.apache.poi" version="3.14.0"/>\r
 <unit id="org.jdom2" version="2.0.6"/>\r
 <unit id="org.apache.commons.lang" version="2.6.0"/>\r
 <unit id="org.jfree.jchart" version="1.0.19"/>\r
+<unit id="org.apache.lucene4.sandbox" version="4.9.0.b0003"/>\r
 <unit id="org.apache.commons.codec.source" version="1.10.0"/>\r
-<unit id="com.lowagie.text.source" version="2.1.7.b1"/>\r
 <unit id="org.supercsv.source" version="2.4.0"/>\r
 <unit id="com.github.virtuald.curvesapi" version="1.3.0"/>\r
 <unit id="org.eclipse.collections.eclipse-collections" version="7.1.0"/>\r
 <unit id="org.apache.log4j" version="1.2.17"/>\r
-<unit id="com.lowagie.text" version="2.1.7.b1"/>\r
+<unit id="org.apache.lucene4.queries.source" version="4.9.0.b0003"/>\r
+<unit id="com.fasterxml.jackson.core.jackson-core.source" version="2.8.2"/>\r
+<unit id="org.apache.lucene4.core.source" version="4.9.0.b0003"/>\r
+<unit id="com.sun.jna.source" version="4.2.2"/>\r
 <unit id="org.apache.poi.ooxml" version="3.14.0"/>\r
+<unit id="com.sun.jna.platform" version="4.2.2"/>\r
+<unit id="org.apache.lucene4.analyzers-common.source" version="4.9.0.b0003"/>\r
 <unit id="org.apache.pdfbox" version="2.0.2"/>\r
 <unit id="org.bouncycastle.bcmail-jdk14" version="1.38.0"/>\r
 <unit id="org.jfree.jcommon" version="1.0.23"/>\r
+<unit id="org.apache.lucene4.analyzers-common" version="4.9.0.b0003"/>\r
 <unit id="org.apache.commons.collections" version="3.2.2"/>\r
 <unit id="org.apache.poi.ooxml.source" version="3.14.0"/>\r
+<unit id="com.fasterxml.jackson.core.jackson-core" version="2.8.2"/>\r
+<unit id="org.apache.lucene4.core" version="4.9.0.b0003"/>\r
 <unit id="gnu.trove2.source" version="2.1.0"/>\r
 <unit id="gnu.trove3" version="3.0.3"/>\r
 <unit id="org.antlr.runtime" version="3.5.2"/>\r
 <unit id="org.hsqldb.hsqldb" version="2.3.1"/>\r
 <unit id="org.apache.commons.io" version="1.4.0"/>\r
 <unit id="bouncycastle.bcmail-jdk14" version="138.0.0"/>\r
+<unit id="org.apache.lucene4.queries" version="4.9.0.b0003"/>\r
+<unit id="com.sun.jna.platform.source" version="4.2.2"/>\r
 <unit id="javax.vecmath" version="1.5.2"/>\r
 <unit id="bouncycastle.bcprov-jdk14" version="138.0.0"/>\r
 <unit id="org.ini4j.source" version="0.5.4"/>\r
-<unit id="org.apache.lucene4.core.source" version="4.9.0.b0003"/>\r
-<unit id="org.apache.lucene4.queryparser.source" version="4.9.0.b0003"/>\r
 <repository location="http://www.simantics.org/download/master/external-components"/>\r
 </location>\r
-<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
-<unit id="org.eclipse.e4.tools.spies.feature.feature.group" version="0.17.0.v20160811-1516"/>\r
-<repository location="http://www.simantics.org/update/e4/snapshots/org.eclipse.e4.tools/latest/"/>\r
-</location>\r
-<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
-<unit id="org.eclipse.nebula.incubation.feature.feature.group" version="1.0.0.201605182147"/>\r
-<unit id="org.eclipse.nebula.examples.incubation.feature.feature.group" version="1.0.0.201605182147"/>\r
-<repository location="http://www.simantics.org/update/nebula/Q22016/incubation"/>\r
-</location>\r
-<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
-<unit id="org.eclipse.nebula.widgets.nattable.core.feature.feature.group" version="1.4.0.201606011907"/>\r
-<unit id="org.eclipse.nebula.widgets.nattable.extension.e4.source.feature.feature.group" version="1.0.0.201606011907"/>\r
-<unit id="org.eclipse.nebula.widgets.nattable.extension.poi.feature.feature.group" version="1.4.0.201606011907"/>\r
-<unit id="org.eclipse.nebula.widgets.nattable.extension.glazedlists.feature.feature.group" version="1.4.0.201606011907"/>\r
-<unit id="org.eclipse.nebula.widgets.nattable.core.source.feature.feature.group" version="1.4.0.201606011907"/>\r
-<unit id="org.eclipse.nebula.widgets.nattable.extension.glazedlists.source.feature.feature.group" version="1.4.0.201606011907"/>\r
-<unit id="org.eclipse.nebula.widgets.nattable.extension.nebula.feature.feature.group" version="1.0.0.201606011907"/>\r
-<unit id="org.eclipse.nebula.widgets.nattable.extension.e4.feature.feature.group" version="1.0.0.201606011907"/>\r
-<unit id="org.eclipse.nebula.widgets.nattable.extension.nebula.source.feature.feature.group" version="1.0.0.201606011907"/>\r
-<unit id="org.eclipse.nebula.widgets.nattable.extension.poi.source.feature.feature.group" version="1.4.0.201606011907"/>\r
-<repository location="http://www.simantics.org/update/nebula/nattable/releases/1.4.0/repository/"/>\r
-</location>\r
 </locations>\r
 </target>\r
diff --git a/releng/org.simantics.target/simantics-sdk.target b/releng/org.simantics.target/simantics-sdk.target
deleted file mode 100644 (file)
index ce7cb62..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>\r
-<?pde version="3.6"?>\r
-\r
-<target name="Simantics SDK Trunk" sequenceNumber="9">\r
-<locations>\r
-<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
-<unit id="org.simantics.sdk.feature.group" version="1.21.0"/>\r
-<unit id="org.simantics.sdk.source.feature.group" version="1.21.0"/>\r
-<repository location="http://www.simantics.org/download/head/sdk-source"/>\r
-</location>\r
-<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
-<unit id="org.eclipse.nebula.feature.feature.group" version="1.0.0.201605182147"/>\r
-<repository location="http://www.simantics.org/update/nebula/Q22016/release/"/>\r
-</location>\r
-<location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
-<unit id="org.eclipse.nebula.incubation.feature.feature.group" version="1.0.0.201605182147"/>\r
-<repository location="http://www.simantics.org/update/nebula/Q22016/incubation"/>\r
-</location>\r
-</locations>\r
-</target>\r
index 324e1ea3fa88717be78d8665fdc0df4af27bd226..78916402914356cf45101554df97d2fa4e3dc4cf 100644 (file)
@@ -1,9 +1,10 @@
 <?xml version="1.0" encoding="UTF-8" standalone="no"?>\r
-<?pde version="3.8"?><target name="Simantics Trunk" sequenceNumber="8">\r
+<?pde version="3.8"?>\r
+<target name="Simantics Trunk" sequenceNumber="9">\r
 <locations>\r
 <location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
-<unit id="org.simantics.sdk.feature.group" version="1.21.0"/>\r
-<repository location="http://www.simantics.org/download/head/sdk"/>\r
+<unit id="org.simantics.sdk.feature.group" version="1.25.0"/>\r
+<repository location="http://www.simantics.org/download/master/sdk"/>\r
 </location>\r
 <location includeAllPlatforms="true" includeConfigurePhase="false" includeMode="slicer" includeSource="true" type="InstallableUnit">\r
 <unit id="org.eclipse.nebula.feature.feature.group" version="1.0.0.201605182147"/>\r
diff --git a/sonar-simantics-platform-sdk.properties b/sonar-simantics-platform-sdk.properties
new file mode 100644 (file)
index 0000000..d929449
--- /dev/null
@@ -0,0 +1,23 @@
+# required metadata
+sonar.projectKey=simantics-platform-sdk
+sonar.projectName=Simantics Platform SDK
+sonar.projectVersion=1.25
+
+# path to source directories (required)
+sonar.sources=bundles
+
+# path to test source directories (optional)
+#sonar.tests=testDir1,testDir2
+
+# path to project binaries (optional), for example directory of Java bytecode
+#sonar.binaries=binDir
+
+# optional comma-separated list of paths to libraries. Only path to JAR file and path to directory of classes are supported.
+#sonar.libraries=path/to/library.jar,path/to/classes/dir
+
+# Uncomment this line to analyse a project which is not a java project.
+# The value of the property must be the key of the language.
+#sonar.language=cobol
+
+# Additional parameters
+#sonar.my.property=value
\ No newline at end of file