From: Tuukka Lehtonen Date: Wed, 9 Nov 2016 22:23:08 +0000 (+0200) Subject: Merge commit '31664b6' X-Git-Tag: v1.25.0~55 X-Git-Url: https://gerrit.simantics.org/r/gitweb?p=simantics%2Fplatform.git;a=commitdiff_plain;h=bb5a3edf299cb943999c72c69dd68fb740c8a506;hp=31664b68100388a43ce2d3c36c98ed2910c9b6a9 Merge commit '31664b6' Sync git svn branch with SVN repository r33382. refs #6803 [PRIVATE-12714] --- diff --git a/.gitignore b/.gitignore index 0d0631b56..d61db6ef2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ /**/bin/ +/features/*/target/ +/bundles/*/target/ +/releng/**/target/ +/tests/*/target/ /**/.polyglot.build.properties diff --git a/bundles/org.apache.batik/.classpath b/bundles/org.apache.batik/.classpath index 4a8d60f3c..0f23270ef 100644 --- a/bundles/org.apache.batik/.classpath +++ b/bundles/org.apache.batik/.classpath @@ -1,5 +1,6 @@ + @@ -11,8 +12,6 @@ - - diff --git a/bundles/org.apache.batik/META-INF/MANIFEST.MF b/bundles/org.apache.batik/META-INF/MANIFEST.MF index aebf12801..22d86a06d 100644 --- a/bundles/org.apache.batik/META-INF/MANIFEST.MF +++ b/bundles/org.apache.batik/META-INF/MANIFEST.MF @@ -3,22 +3,8 @@ Bundle-ManifestVersion: 2 Bundle-Name: Batik Bundle-SymbolicName: org.apache.batik;singleton:=true Bundle-Version: 1.8.0.qualifier -Bundle-Activator: org.apache.batik.Activator -Require-Bundle: org.eclipse.ui, - org.eclipse.core.runtime Bundle-RequiredExecutionEnvironment: JavaSE-1.8 -Bundle-ActivationPolicy: lazy Export-Package: java_cup.runtime, - javax.xml, - javax.xml.datatype, - javax.xml.namespace, - javax.xml.parsers, - javax.xml.transform, - javax.xml.transform.dom, - javax.xml.transform.sax, - javax.xml.transform.stream, - javax.xml.validation, - javax.xml.xpath, org.apache.avalon.framework, org.apache.avalon.framework.activity, org.apache.avalon.framework.configuration, @@ -108,12 +94,17 @@ Export-Package: java_cup.runtime, org.apache.fop, org.apache.fop.accessibility, org.apache.fop.apps, + org.apache.fop.apps.io, + org.apache.fop.complexscripts.bidi, org.apache.fop.complexscripts.fonts, + org.apache.fop.complexscripts.util, + org.apache.fop.events, org.apache.fop.fo, org.apache.fop.fonts, org.apache.fop.fonts.apps, org.apache.fop.fonts.autodetect, org.apache.fop.fonts.base14, + org.apache.fop.fonts.cff, org.apache.fop.fonts.substitute, org.apache.fop.fonts.truetype, org.apache.fop.fonts.type1, @@ -121,13 +112,18 @@ Export-Package: java_cup.runtime, org.apache.fop.pdf, org.apache.fop.pdf.xref, org.apache.fop.render, + org.apache.fop.render.gradient, org.apache.fop.render.intermediate, org.apache.fop.render.pdf, org.apache.fop.render.pdf.extensions, org.apache.fop.render.ps, org.apache.fop.render.ps.extensions, org.apache.fop.render.ps.fonts, + org.apache.fop.render.ps.svg, org.apache.fop.svg, + org.apache.fop.svg.font, + org.apache.fop.svg.text, + org.apache.fop.traits, org.apache.fop.util, org.apache.html.dom, org.apache.regexp, @@ -195,7 +191,6 @@ Export-Package: java_cup.runtime, org.apache.xml.serialize, org.apache.xml.utils, org.apache.xml.utils.res, - org.apache.xmlcommons, org.apache.xmlgraphics.fonts, org.apache.xmlgraphics.image, org.apache.xmlgraphics.image.codec.png, @@ -260,21 +255,11 @@ Export-Package: java_cup.runtime, org.w3c.css.sac, org.w3c.css.sac.helpers, org.w3c.dom, - org.w3c.dom.bootstrap, - org.w3c.dom.css, org.w3c.dom.events, org.w3c.dom.html, org.w3c.dom.ls, - org.w3c.dom.ranges, org.w3c.dom.smil, - org.w3c.dom.stylesheets, - org.w3c.dom.svg, - org.w3c.dom.traversal, - org.w3c.dom.views, - org.w3c.dom.xpath, - org.xml.sax, - org.xml.sax.ext, - org.xml.sax.helpers + org.w3c.dom.svg Bundle-ClassPath: lib/batik-awt-util-1.8.jar, lib/batik-dom-1.8.jar, lib/batik-ext-1.8.jar, @@ -298,6 +283,6 @@ Bundle-ClassPath: lib/batik-awt-util-1.8.jar, lib/js.jar, lib/xalan-2.7.0.jar, lib/xerces_2_5_0.jar, - lib/xml-apis-1.3.04.jar, - lib/xml-apis-ext-1.3.04.jar, - lib/fop-transcoder-allinone-svn-trunk.jar + lib/fop-transcoder-allinone-svn-trunk.jar, + lib/xml-apis-ext-1.3.04.jar +Require-Bundle: javax.xml;bundle-version="1.3.4" diff --git a/bundles/org.apache.batik/build.properties b/bundles/org.apache.batik/build.properties index 91aa3c77a..d435895c2 100644 --- a/bundles/org.apache.batik/build.properties +++ b/bundles/org.apache.batik/build.properties @@ -25,6 +25,5 @@ bin.includes = META-INF/,\ lib/js.jar,\ lib/xalan-2.7.0.jar,\ lib/xerces_2_5_0.jar,\ - lib/xml-apis-1.3.04.jar,\ - lib/xml-apis-ext-1.3.04.jar,\ - lib/fop-transcoder-allinone-svn-trunk.jar + lib/fop-transcoder-allinone-svn-trunk.jar,\ + lib/xml-apis-ext-1.3.04.jar diff --git a/bundles/org.apache.batik/src/.keep b/bundles/org.apache.batik/src/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/bundles/org.apache.batik/src/org/apache/batik/Activator.java b/bundles/org.apache.batik/src/org/apache/batik/Activator.java deleted file mode 100644 index 0023b84fa..000000000 --- a/bundles/org.apache.batik/src/org/apache/batik/Activator.java +++ /dev/null @@ -1,50 +0,0 @@ -package org.apache.batik; - -import org.eclipse.ui.plugin.AbstractUIPlugin; -import org.osgi.framework.BundleContext; - -/** - * The activator class controls the plug-in life cycle - */ -public class Activator extends AbstractUIPlugin { - - // The plug-in ID - public static final String PLUGIN_ID = "org.apache.batik"; //$NON-NLS-1$ - - // The shared instance - private static Activator plugin; - - /** - * The constructor - */ - public Activator() { - } - - /* - * (non-Javadoc) - * @see org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext) - */ - public void start(BundleContext context) throws Exception { - super.start(context); - plugin = this; - } - - /* - * (non-Javadoc) - * @see org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext) - */ - public void stop(BundleContext context) throws Exception { - plugin = null; - super.stop(context); - } - - /** - * Returns the shared instance - * - * @return the shared instance - */ - public static Activator getDefault() { - return plugin; - } - -} diff --git a/bundles/org.simantics.acorn/.classpath b/bundles/org.simantics.acorn/.classpath new file mode 100644 index 000000000..22f30643c --- /dev/null +++ b/bundles/org.simantics.acorn/.classpath @@ -0,0 +1,7 @@ + + + + + + + diff --git a/bundles/org.simantics.acorn/.project b/bundles/org.simantics.acorn/.project new file mode 100644 index 000000000..9726c0b94 --- /dev/null +++ b/bundles/org.simantics.acorn/.project @@ -0,0 +1,33 @@ + + + org.simantics.acorn + + + + + + org.eclipse.jdt.core.javabuilder + + + + + org.eclipse.pde.ManifestBuilder + + + + + org.eclipse.pde.SchemaBuilder + + + + + org.eclipse.pde.ds.core.builder + + + + + + org.eclipse.pde.PluginNature + org.eclipse.jdt.core.javanature + + diff --git a/bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs b/bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs new file mode 100644 index 000000000..0c68a61dc --- /dev/null +++ b/bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs @@ -0,0 +1,7 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled +org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8 +org.eclipse.jdt.core.compiler.compliance=1.8 +org.eclipse.jdt.core.compiler.problem.assertIdentifier=error +org.eclipse.jdt.core.compiler.problem.enumIdentifier=error +org.eclipse.jdt.core.compiler.source=1.8 diff --git a/bundles/org.simantics.acorn/.svn/wc.db b/bundles/org.simantics.acorn/.svn/wc.db new file mode 100644 index 000000000..9defa9058 Binary files /dev/null and b/bundles/org.simantics.acorn/.svn/wc.db differ diff --git a/bundles/org.simantics.acorn/META-INF/MANIFEST.MF b/bundles/org.simantics.acorn/META-INF/MANIFEST.MF new file mode 100644 index 000000000..9152acafd --- /dev/null +++ b/bundles/org.simantics.acorn/META-INF/MANIFEST.MF @@ -0,0 +1,18 @@ +Manifest-Version: 1.0 +Bundle-ManifestVersion: 2 +Bundle-Name: Acorn Database for Simantics +Bundle-SymbolicName: org.simantics.acorn +Bundle-Version: 1.1.2.qualifier +Bundle-Vendor: Semantum Oy +Require-Bundle: gnu.trove3;bundle-version="3.0.0", + gnu.trove2;bundle-version="2.0.4", + org.simantics.db.impl;bundle-version="0.8.0", + org.simantics.db.server;bundle-version="1.0.0", + org.simantics.compressions;bundle-version="1.0.0", + org.simantics.backup, + org.eclipse.core.runtime;bundle-version="3.11.1", + org.simantics.db.procore +Bundle-RequiredExecutionEnvironment: JavaSE-1.8 +Bundle-ActivationPolicy: lazy +Bundle-Activator: org.simantics.acorn.internal.Activator +Service-Component: OSGI-INF/component.xml, OSGI-INF/org.simantics.acorn.AcornDriver.xml diff --git a/bundles/org.simantics.acorn/OSGI-INF/component.xml b/bundles/org.simantics.acorn/OSGI-INF/component.xml new file mode 100644 index 000000000..5b88ac3c0 --- /dev/null +++ b/bundles/org.simantics.acorn/OSGI-INF/component.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml b/bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml new file mode 100644 index 000000000..f1a97d175 --- /dev/null +++ b/bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/bundles/org.simantics.acorn/build.properties b/bundles/org.simantics.acorn/build.properties new file mode 100644 index 000000000..40374cc74 --- /dev/null +++ b/bundles/org.simantics.acorn/build.properties @@ -0,0 +1,17 @@ +############################################################################### +# Copyright (c) 2007, 2010 Association for Decentralized Information Management +# in Industry THTH ry. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# Contributors: +# VTT Technical Research Centre of Finland - initial API and implementation +############################################################################### +output.. = bin/ +bin.includes = META-INF/,\ + .,\ + log4j.properties,\ + OSGI-INF/ +source.. = src/ diff --git a/bundles/org.simantics.acorn/log4j.properties b/bundles/org.simantics.acorn/log4j.properties new file mode 100644 index 000000000..6fecb6d25 --- /dev/null +++ b/bundles/org.simantics.acorn/log4j.properties @@ -0,0 +1,63 @@ +############################################################################### +# Copyright (c) 2007, 2010 Association for Decentralized Information Management +# in Industry THTH ry. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# Contributors: +# VTT Technical Research Centre of Finland - initial API and implementation +############################################################################### +# For the general syntax of property based configuration files see the +# documentation of org.apache.log4j.PropertyConfigurator. + +# The root category uses the appender called rolling. If no priority is +# specified, the root category assumes the default priority for root +# which is DEBUG in log4j. The root category is the only category that +# has a default priority. All other categories need not be assigned a +# priority in which case they inherit their priority from the +# hierarchy. + +# This will provide console output on log4j configuration loading +#log4j.debug=true + +log4j.rootCategory=warn, stdout +#log4j.rootCategory=warn + +# BEGIN APPENDER: CONSOLE APPENDER (stdout) +# first: type of appender (fully qualified class name) +log4j.appender.stdout=org.apache.log4j.ConsoleAppender + +# second: Any configuration information needed for that appender. +# Many appenders require a layout. +log4j.appender.stdout.layout=org.apache.log4j.TTCCLayout +# log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout + +# Possible information overload? +# log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +# additionally, some layouts can take additional information -- +# like the ConversionPattern for the PatternLayout. +# log4j.appender.stdout.layout.ConversionPattern=%d %-5p %-17c{2} (%30F:%L) %3x - %m%n +# END APPENDER: CONSOLE APPENDER (stdout) + +# BEGIN APPENDER: ROLLING FILE APPENDER (rolling) +#log4j.appender.rolling=com.tools.logging.PluginFileAppender +#log4j.appender.rolling=org.apache.log4j.FileAppender +log4j.appender.rolling=org.apache.log4j.RollingFileAppender +log4j.appender.rolling.File=procore.log +log4j.appender.rolling.append=true +log4j.appender.rolling.MaxFileSize=8192KB +# Keep one backup file +log4j.appender.rolling.MaxBackupIndex=1 +log4j.appender.rolling.layout=org.apache.log4j.PatternLayout +#log4j.appender.rolling.layout.ConversionPattern=%p %t %c - %m%n +log4j.appender.rolling.layout.ConversionPattern=%-6r [%15.15t] %-5p %30.30c - %m%n +# END APPENDER: ROLLING FILE APPENDER (rolling) + +# BEGIN APPENDER: PLUG-IN LOG APPENDER (plugin) +log4j.appender.plugin=com.tools.logging.PluginLogAppender +log4j.appender.plugin.layout=org.apache.log4j.PatternLayout +#log4j.appender.plugin.layout.ConversionPattern=%p %t %c - %m%n +log4j.appender.plugin.layout.ConversionPattern=%-6r [%15.15t] %-5p %30.30c - %m%n +# END APPENDER: PLUG-IN LOG APPENDER (plugin) diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java new file mode 100644 index 000000000..db2c16763 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java @@ -0,0 +1,40 @@ +package org.simantics.acorn; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + +import org.simantics.acorn.internal.AcornDatabase; +import org.simantics.db.Database; +import org.simantics.db.server.ProCoreException; + +/** + * @author Tuukka Lehtonen + */ +public class AcornDatabaseManager { + + private static Map dbs = new HashMap(); + + public static synchronized Database getDatabase(Path folder) throws ProCoreException { + Path canonical; + try { + if (!Files.exists(folder)) + Files.createDirectories(folder); + canonical = folder.toRealPath(); + } catch (IOException e) { + throw new ProCoreException("Could not get canonical path.", e); + } + + String canonicalPath = canonical.toString(); + Database db = dbs.get(canonicalPath); + if (null != db) + return db; + + db = new AcornDatabase(canonical); + dbs.put(canonicalPath, db); + return db; + } + +} \ No newline at end of file diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java new file mode 100644 index 000000000..536c35c74 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java @@ -0,0 +1,131 @@ +package org.simantics.acorn; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +import org.simantics.db.Database; +import org.simantics.db.DatabaseUserAgent; +import org.simantics.db.Driver; +import org.simantics.db.ServerI; +import org.simantics.db.ServerReference; +import org.simantics.db.Session; +import org.simantics.db.SessionReference; +import org.simantics.db.exception.DatabaseException; + +public class AcornDriver implements Driver { + + public static final String AcornDriverName = "acorn"; + + private Map servers = new HashMap<>(); + private Map managements = new HashMap<>(); + + @Override + public String getName() { + return AcornDriverName; + } + + @Override + public DatabaseUserAgent getDatabaseUserAgent(String address) throws DatabaseException { + return AcornDatabaseManager.getDatabase(Paths.get(address)).getUserAgent(); + } + + @Override + public void setDatabaseUserAgent(String address, DatabaseUserAgent dbUserAgent) throws DatabaseException { + AcornDatabaseManager.getDatabase(Paths.get(address)).setUserAgent(dbUserAgent); + } + + @Override + public Session getSession(String address, Properties properties) throws DatabaseException { + Path dbFolder = Paths.get(address); + Session session = AcornSessionManagerImpl.getInstance().createSession(new SessionReference() { + + @Override + public ServerReference getServerReference() { + return new ServerReference() { + + @Override + public Path getDBFolder() { + return dbFolder; + } + }; + } + + @Override + public long getSessionId() { + return 0L; + } + }, null); + if (!properties.containsKey("clientId")) + properties.put("clientId", dbFolder.toAbsolutePath().toString()); + session.registerService(Properties.class, properties); + Session s = session.peekService(Session.class); + if (null == s) + session.registerService(Session.class, session); + return session; + } + + @Override + public ServerI getServer(String address, Properties properties) throws DatabaseException { + ServerI server = servers.get(address); + if (server == null) { + server = new AcornServerI(AcornDatabaseManager.getDatabase(Paths.get(address)), address); + servers.put(address, server); + } + return server; + } + + @Override + public Management getManagement(String address, Properties properties) throws DatabaseException { + Management mgmt = managements.get(address); + if (mgmt == null) { + mgmt = new AcornManagement(AcornDatabaseManager.getDatabase(Paths.get(address)), properties); + managements.put(address, mgmt); + } + return mgmt; + } + + private static class AcornServerI implements ServerI { + + private Database database; + private String address; + + public AcornServerI(Database db, String address) { + this.database = db; + this.address = address; + } + + @Override + public void stop() throws DatabaseException { + database.tryToStop(); + } + + @Override + public void start() throws DatabaseException { + database.start(); + } + + @Override + public boolean isActive() throws DatabaseException { + return database.isRunning(); + } + + @Override + public String getAddress() throws DatabaseException { + return address; + } + + @Override + public String executeAndDisconnect(String command) throws DatabaseException { + return ""; + } + + @Override + public String execute(String command) throws DatabaseException { + return ""; + } + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java new file mode 100644 index 000000000..c21491210 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java @@ -0,0 +1,51 @@ +package org.simantics.acorn; + +import java.nio.file.Path; +import java.util.Properties; + +import org.simantics.db.Database; +import org.simantics.db.Driver.Management; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.server.ProCoreException; + +public class AcornManagement implements Management { + + private final Database db; + private final Properties properties; + + AcornManagement(Database db, Properties properties) throws ProCoreException { + this.db = db; + this.properties = properties; + } + + @Override + public boolean exist() throws DatabaseException { + return db.isFolderOk(); + } + + @Override + public void delete() throws DatabaseException { + db.deleteFiles(); + if (exist()) + throw new DatabaseException("Failed to delete database. folder=" + db.getFolder()); + } + + @Override + public void create() throws DatabaseException { + db.initFolder(properties); + if (!exist()) + throw new DatabaseException("Failed to create Acorn database. folder=" + db.getFolder()); + } + + @Override + public void purge() throws DatabaseException { + db.purgeDatabase(); + } + + @Override + public void shutdown() throws DatabaseException { + db.tryToStop(); + db.disconnect(); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java new file mode 100644 index 000000000..f67a4aa7c --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java @@ -0,0 +1,132 @@ +package org.simantics.acorn; + +import java.nio.file.Path; +import java.util.concurrent.ConcurrentHashMap; + +import org.simantics.db.Database; +import org.simantics.db.Session; +import org.simantics.db.SessionErrorHandler; +import org.simantics.db.SessionManager; +import org.simantics.db.SessionReference; +import org.simantics.db.authentication.UserAuthenticationAgent; +import org.simantics.db.common.utils.Logger; +import org.simantics.db.event.SessionEvent; +import org.simantics.db.event.SessionListener; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.exception.RuntimeDatabaseException; +import org.simantics.db.service.LifecycleSupport; +import org.simantics.utils.datastructures.ListenerList; + +import fi.vtt.simantics.procore.internal.SessionImplDb; +import fi.vtt.simantics.procore.internal.SessionImplSocket; + +public class AcornSessionManagerImpl implements SessionManager { + + private static AcornSessionManagerImpl INSTANCE; + + private ConcurrentHashMap sessionMap = new ConcurrentHashMap<>(); + private ListenerList sessionListeners = new ListenerList<>(SessionListener.class); + private SessionErrorHandler errorHandler; + + private Database database; + + private AcornSessionManagerImpl() {} + + void finish() { + sessionMap = null; + sessionListeners = null; + } + + @Override + public void addSessionListener(SessionListener listener) { + sessionListeners.add(listener); + } + + @Override + public Session createSession(SessionReference sessionReference, UserAuthenticationAgent authAgent) + throws DatabaseException { + SessionImplDb sessionImpl = new SessionImplDb(this, authAgent); + boolean ok = false; + try { + Path dbFolder = sessionReference.getServerReference().getDBFolder(); + database = AcornDatabaseManager.getDatabase(dbFolder); + Database.Session dbSession = database.newSession(sessionImpl); + sessionImpl.connect(sessionReference, dbSession); + sessionMap.put(sessionImpl, dbSession); + fireSessionOpened(sessionImpl); + ok = true; + } catch (Throwable e) { + Logger.defaultLogError("Connection failed. See exception for details.", e); + try { + fireSessionClosed(sessionImpl, e); + sessionMap.remove(sessionImpl); + sessionImpl = null; + } catch (Throwable t) { + } + throw new DatabaseException(e); + } finally { + if (!ok && null != sessionImpl) + sessionImpl.getService(LifecycleSupport.class).close(); + } + return sessionImpl; + } + + @Override + public void removeSessionListener(SessionListener listener) { + sessionListeners.remove(listener); + } + + private void fireSessionOpened(SessionImplSocket session) { + SessionEvent se = new SessionEvent(session, null); + for (SessionListener listener : sessionListeners.getListeners()) { + listener.sessionOpened(se); + } + } + + private void fireSessionClosed(SessionImplSocket session, Throwable cause) { + SessionEvent se = new SessionEvent(session, cause); + for (SessionListener listener : sessionListeners.getListeners()) { + listener.sessionClosed(se); + } + } + + @Override + public void shutdown(Session s, Throwable cause) { + SessionImplSocket sis = (SessionImplSocket) s; + if (null == sis) + return; + try { + fireSessionClosed(sis, cause); + } finally { + sessionMap.remove(s); + } + } + + @Override + public SessionErrorHandler getErrorHandler() { + return errorHandler; + } + + @Override + public void setErrorHandler(SessionErrorHandler errorHandler) { + this.errorHandler = errorHandler; + } + + public synchronized static AcornSessionManagerImpl getInstance() { + if (INSTANCE == null) + INSTANCE = new AcornSessionManagerImpl(); + return INSTANCE; + } + + @Override + public Database getDatabase() { + return database; + } + + public GraphClientImpl2 getClient() { + if (sessionMap.values().size() > 1) + throw new RuntimeDatabaseException("Currently only one GraphClientImpl2 per session is supported!"); + org.simantics.db.Database.Session client = sessionMap.values().iterator().next(); + return (GraphClientImpl2) client; + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java new file mode 100644 index 000000000..51db52efc --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java @@ -0,0 +1,582 @@ +package org.simantics.acorn; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.simantics.acorn.cluster.ClusterImpl; +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.exception.InvalidHeadStateException; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.acorn.lru.ChangeSetInfo; +import org.simantics.acorn.lru.ClusterInfo; +import org.simantics.acorn.lru.ClusterLRU; +import org.simantics.acorn.lru.ClusterStreamChunk; +import org.simantics.acorn.lru.FileInfo; +import org.simantics.acorn.lru.LRU; +import org.simantics.db.ClusterCreator; +import org.simantics.db.Database.Session.ClusterIds; +import org.simantics.db.Database.Session.ResourceSegment; +import org.simantics.db.ServiceLocator; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterI; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.procore.cluster.ClusterTraits; +import org.simantics.db.service.ClusterSetsSupport; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.threads.logger.ITask; +import org.simantics.utils.threads.logger.ThreadLogger; + +public class ClusterManager { + + private ArrayList currentChanges = new ArrayList(); + + public final Path dbFolder; + public Path lastSessionDirectory; + public Path workingDirectory; + + public LRU streamLRU; + public LRU csLRU; + public ClusterLRU clusterLRU; + public LRU fileLRU; + + public MainState mainState; + public HeadState state; + + private long lastSnapshot = System.nanoTime(); + + final public ClusterSupport2 support = new ClusterSupport2(this); + + /* + * Public interface + * + */ + + public ClusterManager(Path dbFolder) { + this.dbFolder = dbFolder; + } + + public ArrayList getChanges(long changeSetId) throws AcornAccessVerificationException, IllegalAcornStateException { + ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId); + info.acquireMutex(); + try { + info.makeResident(); + return info.getCSSIds(); + } finally { + info.releaseMutex(); + } + } + + public ClusterBase getClusterByClusterKey(int clusterKey) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { + return clusterLRU.getClusterByClusterKey(clusterKey); + } + + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { + return clusterLRU.getClusterByClusterUIDOrMake(clusterUID); + } + + public ClusterImpl getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { + return clusterLRU.getClusterByClusterUIDOrMakeProxy(clusterUID); + } + + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) throws AcornAccessVerificationException { + return clusterLRU.getClusterKeyByClusterUIDOrMake(clusterUID); + } + + public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) throws IllegalAcornStateException, AcornAccessVerificationException { + return clusterLRU.getClusterKeyByClusterUIDOrMakeWithoutMutex(clusterUID); + } + + public int getClusterKeyByUID(long id1, long id2) throws DatabaseException, IllegalAcornStateException { + return clusterLRU.getClusterKeyByUIDWithoutMutex(id1, id2); + } + + public T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { + return clusterLRU.getClusterProxyByResourceKey(resourceKey); + } + + public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException, AcornAccessVerificationException { + return clusterLRU.getClusterUIDByResourceKey(resourceKey); + } + + public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws DatabaseException, IllegalAcornStateException, AcornAccessVerificationException { + return clusterLRU.getClusterUIDByResourceKeyWithoutMutex(resourceKey); + } + + /* + * Private implementation + * + */ + + private static long countFiles(Path directory) throws IOException { + try (DirectoryStream ds = Files.newDirectoryStream(directory)) { + int count = 0; + for (@SuppressWarnings("unused") Path p : ds) + ++count; + return count; + } + } + + // Add check to make sure if it safe to make snapshot (used with cancel which is not yet supported and may cause corrupted head.state writing) + private AtomicBoolean safeToMakeSnapshot = new AtomicBoolean(true); + private IllegalAcornStateException cause; + + public synchronized boolean makeSnapshot(ServiceLocator locator, boolean fullSave) throws IllegalAcornStateException { + try { + if (!safeToMakeSnapshot.get()) + throw cause; + // Maximum autosave frequency is per 60s + if(!fullSave && System.nanoTime() - lastSnapshot < 10*1000000000L) { + // System.err.println("lastSnapshot too early"); + return false; + } + + // Cluster files are always there + // Nothing has been written => no need to do anything + long amountOfFiles = countFiles(workingDirectory); + if(!fullSave && amountOfFiles < 3) { + // System.err.println("amountOfFiles < 3"); + return false; + } + + System.err.println("makeSnapshot"); + + // Schedule writing of all data to disk + refreshHeadState(); + + // Wait for all files to be written + clusterLRU.shutdown(); + fileLRU.shutdown(); + streamLRU.shutdown(); + csLRU.shutdown(); + + // Lets check if it is still safe to make a snapshot + if (!safeToMakeSnapshot.get()) + throw cause; + + persistHeadState(); + + if (fullSave) + mainState.save(dbFolder); + + ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class); + cssi.save(); + + amountOfFiles = countFiles(workingDirectory); + + System.err.println(" -finished: amount of files is " + amountOfFiles); + + workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); + if (!Files.exists(workingDirectory)) { + Files.createDirectories(workingDirectory); + } + + cssi.updateWriteDirectory(workingDirectory); + + clusterLRU.setWriteDir(workingDirectory); + fileLRU.setWriteDir(workingDirectory); + streamLRU.setWriteDir(workingDirectory); + csLRU.setWriteDir(workingDirectory); + + clusterLRU.resume(); + fileLRU.resume(); + streamLRU.resume(); + csLRU.resume(); + + lastSnapshot = System.nanoTime(); + + return true; + } catch (IllegalAcornStateException e) { + notSafeToMakeSnapshot(e); + throw e; + } catch (IOException e) { + IllegalAcornStateException e1 = new IllegalAcornStateException(e); + notSafeToMakeSnapshot(e1); + throw e1; + } + } + + private void refreshHeadState() throws IOException, IllegalAcornStateException { + state.clusters.clear(); + state.files.clear(); + state.stream.clear(); + state.cs.clear(); + + clusterLRU.persist(state.clusters); + fileLRU.persist(state.files); + streamLRU.persist(state.stream); + csLRU.persist(state.cs); + } + + private void persistHeadState() throws IOException { + // Sync current working directory + Files.walk(workingDirectory, 1).filter(Files::isRegularFile).forEach(FileIO::uncheckedSyncPath); + state.save(workingDirectory); + mainState.headDir++; + } + + +// public void save() throws IOException { +// +// refreshHeadState(); +// +// clusterLRU.shutdown(); +// fileLRU.shutdown(); +// streamLRU.shutdown(); +// csLRU.shutdown(); +// +// persistHeadState(); +// +// mainState.save(getBaseDirectory()); + +// try { +// ThreadLogVisualizer visualizer = new ThreadLogVisualizer(); +// visualizer.read(new DataInputStream(new FileInputStream( +// ThreadLogger.LOG_FILE))); +// visualizer.visualize3(new PrintStream(ThreadLogger.LOG_FILE +// + ".svg")); +// } catch (FileNotFoundException e) { +// // TODO Auto-generated catch block +// e.printStackTrace(); +// } + + // System.err.println("-- load statistics --"); + // for(Pair entry : + // CollectionUtils.valueSortedEntries(histogram)) { + // System.err.println(" " + entry.second + " " + entry.first); + // } + +// } + + private void acquireAll() throws IllegalAcornStateException { + clusterLRU.acquireMutex(); + fileLRU.acquireMutex(); + streamLRU.acquireMutex(); + csLRU.acquireMutex(); + } + + private void releaseAll() { + csLRU.releaseMutex(); + streamLRU.releaseMutex(); + fileLRU.releaseMutex(); + clusterLRU.releaseMutex(); + } + + private AtomicBoolean rollback = new AtomicBoolean(false); + + boolean rolledback() { + return rollback.get(); + } + + public void load() throws IOException { + + // Main state + mainState = MainState.load(dbFolder, t -> rollback.set(true)); + + lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1)); + + // Head State + try { + state = HeadState.load(lastSessionDirectory); + } catch (InvalidHeadStateException e) { + // For backwards compatibility only! + Throwable cause = e.getCause(); + if (cause instanceof Throwable) { + try { + org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory); + + HeadState newState = new HeadState(); + newState.clusters = oldState.clusters; + newState.cs = oldState.cs; + newState.files = oldState.files; + newState.stream = oldState.stream; + newState.headChangeSetId = oldState.headChangeSetId; + newState.reservedIds = oldState.reservedIds; + newState.transactionId = oldState.transactionId; + state = newState; + } catch (InvalidHeadStateException e1) { + throw new IOException("Could not load HeadState due to corruption", e1); + } + } else { + // This should never happen as MainState.load() checks the integrity + // of head.state files and rolls back in cases of corruption until a + // consistent state is found (could be case 0 - initial db state) + // IF this does happen something is completely wrong + throw new IOException("Could not load HeadState due to corruption", e); + } + } + try { + workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); + Files.createDirectories(workingDirectory); + + csLRU = new LRU(this, "Change Set", workingDirectory); + streamLRU = new LRU(this, "Cluster Stream", workingDirectory); + clusterLRU = new ClusterLRU(this, "Cluster", workingDirectory); + fileLRU = new LRU(this, "External Value", workingDirectory); + + acquireAll(); + + // Clusters + for (String clusterKey : state.clusters) { + String[] parts1 = clusterKey.split("#"); + String[] parts = parts1[0].split("\\."); + long first = new BigInteger(parts[0], 16).longValue(); + long second = new BigInteger(parts[1], 16).longValue(); + ClusterUID uuid = ClusterUID.make(first, second); + Path readDir = dbFolder.resolve(parts1[1]); + int offset = Integer.parseInt(parts1[2]); + int length = Integer.parseInt(parts1[3]); + clusterLRU.map(new ClusterInfo(this, clusterLRU, readDir, uuid, offset, length)); + } + // Files + for (String fileKey : state.files) { + // System.err.println("loadFile: " + fileKey); + String[] parts = fileKey.split("#"); + Path readDir = dbFolder.resolve(parts[1]); + int offset = Integer.parseInt(parts[2]); + int length = Integer.parseInt(parts[3]); + FileInfo info = new FileInfo(fileLRU, readDir, parts[0], offset, length); + fileLRU.map(info); + } + // Update chunks + for (String fileKey : state.stream) { + // System.err.println("loadStream: " + fileKey); + String[] parts = fileKey.split("#"); + Path readDir = dbFolder.resolve(parts[1]); + int offset = Integer.parseInt(parts[2]); + int length = Integer.parseInt(parts[3]); + ClusterStreamChunk info = new ClusterStreamChunk(this, + streamLRU, readDir, parts[0], offset, length); + streamLRU.map(info); + } + // Change sets + for (String fileKey : state.cs) { + String[] parts = fileKey.split("#"); + Path readDir = dbFolder.resolve(parts[1]); + Long revisionId = Long.parseLong(parts[0]); + int offset = Integer.parseInt(parts[2]); + int length = Integer.parseInt(parts[3]); + ChangeSetInfo info = new ChangeSetInfo(csLRU, readDir, revisionId, offset, length); + csLRU.map(info); + } + + releaseAll(); + } catch (IllegalAcornStateException | AcornAccessVerificationException e) { + // ROLLBACK ONE DIR UNTIL WE ARE FINE! + throw new IOException(e); + } + } + + public T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException, IOException { + + clusterLRU.ensureUpdates(uid); + + ClusterInfo info = clusterLRU.getWithoutMutex(uid); + return info.clone(uid, creator); + } + + //private int loadCounter = 0; + + public static void startLog(String msg) { + tasks.put(msg, ThreadLogger.getInstance().begin(msg)); + } + + public static void endLog(String msg) { + ITask task = tasks.get(msg); + if (task != null) + task.finish(); + } + + static Map tasks = new HashMap(); + + public void update(ClusterUID uid, ClusterImpl clu) throws AcornAccessVerificationException, IllegalAcornStateException { + ClusterInfo info = clusterLRU.getWithoutMutex(uid); + info.acquireMutex(); + try { + info.update(clu); + } finally { + info.releaseMutex(); + } + } + + public long getClusterIdOrCreate(ClusterUID clusterUID) { + return 1; + } + + public int getResourceKey(ClusterUID uid, int index) throws AcornAccessVerificationException { + return clusterLRU.getResourceKey(uid, index); + } + + public int getResourceKeyWitoutMutex(ClusterUID uid, int index) throws IllegalAcornStateException { + return clusterLRU.getResourceKeyWithoutMutex(uid, index); + } + + public ClusterIds getClusterIds() throws IllegalAcornStateException { + clusterLRU.acquireMutex(); + + try { + Collection infos = clusterLRU.values(); + final int status = infos.size(); + final long[] firsts = new long[status]; + final long[] seconds = new long[status]; + + int index = 0; + for (ClusterInfo info : infos) { + firsts[index] = 0; + seconds[index] = info.getKey().second; + index++; + } + + return new ClusterIds() { + + @Override + public int getStatus() { + return status; + } + + @Override + public long[] getFirst() { + return firsts; + } + + @Override + public long[] getSecond() { + return seconds; + } + + }; + + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + clusterLRU.releaseMutex(); + } + } + + public void addIntoCurrentChangeSet(String ccs) throws IllegalAcornStateException { + csLRU.acquireMutex(); + + try { + currentChanges.add(ccs); + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + csLRU.releaseMutex(); + } + } + + public void commitChangeSet(long changeSetId, byte[] data) throws IllegalAcornStateException { + csLRU.acquireMutex(); + try { + ArrayList csids = new ArrayList(currentChanges); + currentChanges = new ArrayList(); + new ChangeSetInfo(csLRU, changeSetId, data, csids); + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + csLRU.releaseMutex(); + } + } + + public byte[] getMetadata(long changeSetId) throws AcornAccessVerificationException, IllegalAcornStateException { + + ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId); + if (info == null) return null; + info.acquireMutex(); + try { + return info.getMetadataBytes(); + } catch (IllegalAcornStateException | AcornAccessVerificationException e) { + throw e; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + info.releaseMutex(); + } + } + + public byte[] getResourceFile(final byte[] clusterUID, final int resourceIndex) throws AcornAccessVerificationException, IllegalAcornStateException { + + ClusterUID uid = ClusterUID.make(clusterUID, 0); + String key = uid.toString() + "_" + resourceIndex; + FileInfo info = fileLRU.getWithoutMutex(key); + if(info == null) return null; + info.acquireMutex(); + try { + return info.getResourceFile(); + } catch (IllegalAcornStateException | AcornAccessVerificationException e) { + throw e; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + info.releaseMutex(); + } + } + + public ResourceSegment getResourceSegment(final byte[] clusterUID, final int resourceIndex, final long segmentOffset, short segmentSize) throws AcornAccessVerificationException, IllegalAcornStateException { + ClusterUID uid = ClusterUID.make(clusterUID, 0); + + String key = uid.toString() + "_" + resourceIndex; + FileInfo info = fileLRU.getWithoutMutex(key); + if(info == null) return null; + info.acquireMutex(); + try { + return info.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize); + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + info.releaseMutex(); + } + } + + public void modiFileEx(ClusterUID uid, int resourceKey, long offset, long size, byte[] bytes, long pos, ClusterSupport support) throws IllegalAcornStateException { + try { + String key = uid.toString() + "_" + ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + + FileInfo info = null; + fileLRU.acquireMutex(); + try { + info = fileLRU.get(key); + if (info == null) { + info = new FileInfo(fileLRU, key, (int) (offset + size)); + } + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + fileLRU.releaseMutex(); + } + + info.acquireMutex(); + try { + info.updateData(bytes, offset, pos, size); + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + info.releaseMutex(); + } + } catch (DatabaseException e) { + e.printStackTrace(); + } + } + + public void shutdown() { + clusterLRU.shutdown(); + fileLRU.shutdown(); + streamLRU.shutdown(); + csLRU.shutdown(); + } + + public void notSafeToMakeSnapshot(IllegalAcornStateException t) { + this.safeToMakeSnapshot.compareAndSet(true, false); + this.cause = t; + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java new file mode 100644 index 000000000..8d0bac29f --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java @@ -0,0 +1,43 @@ +package org.simantics.acorn; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; + +public class ExternalizableExample implements Externalizable { + + public int first; + private long second; + + public ExternalizableExample(int first, long second) { + this.first = first; + this.second = second; + } + + @Override + public void writeExternal(ObjectOutput out) throws IOException { + out.writeInt(first); + out.writeLong(second); + } + + @Override + public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + + } + + + public static void main(String[] args) { + Externalizable test = new ExternalizableExample(123, 3456); + + try (ObjectOutputStream stream = new ObjectOutputStream(Files.newOutputStream(Paths.get("C:/Users/Jani Simomaa/Desktop/test"), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING))) { + stream.writeObject(test); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java new file mode 100644 index 000000000..c5480d86e --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java @@ -0,0 +1,144 @@ +package org.simantics.acorn; + +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.Files; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.FileAttribute; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.simantics.databoard.file.RuntimeIOException; + +public class FileIO { + + private static final FileAttribute[] NO_ATTRIBUTES = new FileAttribute[0]; + + private static final Set CREATE_OPTIONS = new HashSet<>(2); + private static final Set APPEND_OPTIONS = new HashSet<>(1); + + static { + CREATE_OPTIONS.add(StandardOpenOption.WRITE); + CREATE_OPTIONS.add(StandardOpenOption.CREATE); + + APPEND_OPTIONS.add(StandardOpenOption.APPEND); + } + + private Path path; + private int writePosition = 0; + + private FileIO(Path path) { + this.path = path; + } + + private static Map map = new HashMap(); + + public static FileIO get(Path path) { + synchronized(map) { + FileIO existing = map.get(path); + if(existing == null) { + existing = new FileIO(path); + map.put(path, existing); + } + return existing; + } + } + + //private static final boolean TRACE_SWAP = false; + private static final boolean TRACE_PERF = false; + + public synchronized int saveBytes(byte[] bytes, int length, boolean overwrite) throws IOException { + if(overwrite) writePosition = 0; + int result = writePosition; + long start = System.nanoTime(); + Set options = writePosition == 0 ? CREATE_OPTIONS : APPEND_OPTIONS; + + ByteBuffer bb = ByteBuffer.wrap(bytes, 0, length); + try (FileChannel fc = FileChannel.open(path, options, NO_ATTRIBUTES)) { + fc.write(bb); + + writePosition += length; + if(TRACE_PERF) { + long duration = System.nanoTime()-start; + double ds = 1e-9*duration; + System.err.println("Wrote " + bytes.length + " bytes @ " + 1e-6*bytes.length / ds + "MB/s"); + } + return result; + } catch (Throwable t) { + throw new IOException("An error occured file saving bytes for file " + path.toAbsolutePath().toString(), t); + } + } + + public synchronized byte[] readBytes(int offset, int length) throws IOException { + long start = System.nanoTime(); + try (SeekableByteChannel channel = Files.newByteChannel(path)) { + channel.position(offset); + ByteBuffer buf = ByteBuffer.allocate(length); + int read = 0; + while (read < length) { + read += channel.read(buf); + } + byte[] result = buf.array(); + if (result.length != length) + System.err.println("faa"); + if (TRACE_PERF) { + long duration = System.nanoTime() - start; + double ds = 1e-9 * duration; + System.err.println("Read " + result.length + " bytes @ " + 1e-6 * result.length / ds + "MB/s"); + } + return result; + } + } + + public static void syncPath(Path f) throws IOException { + // Does not seem to need 's' according to unit test in Windows + try (RandomAccessFile raf = new RandomAccessFile(f.toFile(), "rw")) { + raf.getFD().sync(); + } + } + + static void uncheckedSyncPath(Path f) { + try { + syncPath(f); + } catch (IOException e) { + throw new RuntimeIOException(e); + } + } + + public static void main(String[] args) throws Exception { + + byte[] buf = new byte[1024*1024]; + + long s = System.nanoTime(); + + Path test = Paths.get("e:/work/test.dat"); + OutputStream fs = Files.newOutputStream(test); + OutputStream os = new BufferedOutputStream(fs, 128*1024); + + for(int i=0;i<40;i++) { + os.write(buf); + } + + os.flush(); + //fs.getFD().sync(); + os.close(); + + syncPath(test); + + long duration = System.nanoTime()-s; + System.err.println("Took " + 1e-6*duration + "ms."); + + + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java new file mode 100644 index 000000000..05f9c8de0 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java @@ -0,0 +1,730 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.internal.ClusterChange; +import org.simantics.acorn.internal.ClusterUpdateProcessorBase; +import org.simantics.acorn.internal.UndoClusterUpdateProcessor; +import org.simantics.acorn.lru.ClusterInfo; +import org.simantics.acorn.lru.ClusterStreamChunk; +import org.simantics.acorn.lru.ClusterUpdateOperation; +import org.simantics.acorn.lru.ClusterChangeSet.Entry; +import org.simantics.db.ClusterCreator; +import org.simantics.db.Database; +import org.simantics.db.ServiceLocator; +import org.simantics.db.common.utils.Logger; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.exception.SDBException; +import org.simantics.db.server.ProCoreException; +import org.simantics.db.service.ClusterSetsSupport; +import org.simantics.db.service.ClusterUID; +import org.simantics.db.service.LifecycleSupport; +import org.simantics.utils.datastructures.Pair; +import org.simantics.utils.logging.TimeLogger; + +import gnu.trove.map.hash.TLongObjectHashMap; + +public class GraphClientImpl2 implements Database.Session { + + public static final boolean DEBUG = false; + + public final ClusterManager clusters; + + private TransactionManager transactionManager = new TransactionManager(); + private ExecutorService executor = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Main Program", false)); + private ExecutorService saver = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Snapshot Saver", true)); + + private Path dbFolder; + private final Database database; + private ServiceLocator locator; + private MainProgram mainProgram; + + static class ClientThreadFactory implements ThreadFactory { + + final String name; + final boolean daemon; + + public ClientThreadFactory(String name, boolean daemon) { + this.name = name; + this.daemon = daemon; + } + + @Override + public Thread newThread(Runnable r) { + Thread thread = new Thread(r, name); + thread.setDaemon(daemon); + return thread; + } + } + + public GraphClientImpl2(Database database, Path dbFolder, ServiceLocator locator) throws IOException { + this.database = database; + this.dbFolder = dbFolder; + this.locator = locator; + this.clusters = new ClusterManager(dbFolder); + load(); + ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class); + cssi.setReadDirectory(clusters.lastSessionDirectory); + mainProgram = new MainProgram(this, clusters); + executor.execute(mainProgram); + } + + public Path getDbFolder() { + return dbFolder; + } + + public void tryMakeSnapshot() throws IOException { + + if (isClosing || unexpectedClose) + return; + + saver.execute(new Runnable() { + + @Override + public void run() { + Transaction tr = null; + try { + // First take a write transaction + tr = askWriteTransaction(-1); + // Then make sure that MainProgram is idling + mainProgram.mutex.acquire(); + try { + synchronized(mainProgram) { + if(mainProgram.operations.isEmpty()) { + makeSnapshot(false); + } else { + // MainProgram is becoming busy again - delay snapshotting + return; + } + } + } finally { + mainProgram.mutex.release(); + } + } catch (IllegalAcornStateException | ProCoreException e) { + Logger.defaultLogError(e); + unexpectedClose = true; + } catch (InterruptedException e) { + Logger.defaultLogError(e); + } finally { + try { + if(tr != null) + endTransaction(tr.getTransactionId()); + if (unexpectedClose) { + LifecycleSupport support = getServiceLocator().getService(LifecycleSupport.class); + try { + support.close(); + } catch (DatabaseException e1) { + Logger.defaultLogError(e1); + } + } + } catch (ProCoreException e) { + Logger.defaultLogError(e); + } + } + } + }); + } + + public void makeSnapshot(boolean fullSave) throws IllegalAcornStateException { + clusters.makeSnapshot(locator, fullSave); + } + + public T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException { + try { + return clusters.clone(uid, creator); + } catch (AcornAccessVerificationException | IllegalAcornStateException | IOException e) { + unexpectedClose = true; + throw new DatabaseException(e); + } + } + +// private void save() throws IOException { +// clusters.save(); +// } + + public void load() throws IOException { + clusters.load(); + } + +// public void modiFileEx(ClusterUID uid, int resourceKey, long offset, long size, byte[] bytes, long pos, ClusterSupport support) { +// clusters.modiFileEx(uid, resourceKey, offset, size, bytes, pos, support); +// } + + @Override + public Database getDatabase() { + return database; + } + + private boolean closed = false; + private boolean isClosing = false; + private boolean unexpectedClose = false; + + @Override + public void close() throws ProCoreException { + System.err.println("Closing " + this + " and mainProgram " + mainProgram); + if(!closed && !isClosing) { + isClosing = true; + try { + if (!unexpectedClose) + makeSnapshot(true); + + mainProgram.close(); + clusters.shutdown(); + executor.shutdown(); + saver.shutdown(); + boolean executorTerminated = executor.awaitTermination(500, TimeUnit.MILLISECONDS); + boolean saverTerminated = saver.awaitTermination(500, TimeUnit.MILLISECONDS); + + System.err.println("executorTerminated=" + executorTerminated + ", saverTerminated=" + saverTerminated); + + mainProgram = null; + executor = null; + saver = null; + + } catch (IllegalAcornStateException | InterruptedException e) { + throw new ProCoreException(e); + } + closed = true; + } + //impl.close(); + } + + @Override + public void open() throws ProCoreException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isClosed() throws ProCoreException { + return closed; + } + + @Override + public void acceptCommit(long transactionId, long changeSetId, byte[] metadata) throws ProCoreException { + clusters.state.headChangeSetId++; + long committedChangeSetId = changeSetId + 1; + try { + clusters.commitChangeSet(committedChangeSetId, metadata); + + clusters.state.transactionId = transactionId; + + mainProgram.committed(); + + TimeLogger.log("Accepted commit"); + } catch (IllegalAcornStateException e) { + throw new ProCoreException(e); + } + } + + @Override + public long cancelCommit(long transactionId, long changeSetId, byte[] metadata, OnChangeSetUpdate onChangeSetUpdate) throws ProCoreException { + UnsupportedOperationException e = new UnsupportedOperationException("org.simantics.acorn.GraphClientImpl2.cancelCommit() is not supported operation! Closing down to prevent further havoc"); + clusters.notSafeToMakeSnapshot(new IllegalAcornStateException(e)); + throw e; +// System.err.println("GraphClientImpl2.cancelCommit() called!! this is experimental and might cause havoc!"); +// try { +// undo(new long[] {changeSetId}, onChangeSetUpdate); +// } catch (SDBException e) { +// e.printStackTrace(); +// throw new ProCoreException(e); +// } +// clusters.state.headChangeSetId++; +// return clusters.state.headChangeSetId; + } + + @Override + public Transaction askReadTransaction() throws ProCoreException { + return transactionManager.askReadTransaction(); + } + + enum TransactionState { + IDLE,WRITE,READ + } + + class TransactionRequest { + public TransactionState state; + public Semaphore semaphore; + public TransactionRequest(TransactionState state, Semaphore semaphore) { + this.state = state; + this.semaphore = semaphore; + } + } + + class TransactionManager { + + private TransactionState currentTransactionState = TransactionState.IDLE; + + private int reads = 0; + + LinkedList requests = new LinkedList(); + + TLongObjectHashMap requestMap = new TLongObjectHashMap(); + + private synchronized Transaction makeTransaction(TransactionRequest req) { + + final int csId = clusters.state.headChangeSetId; + final long trId = clusters.state.transactionId+1; + requestMap.put(trId, req); + return new Transaction() { + + @Override + public long getTransactionId() { + return trId; + } + + @Override + public long getHeadChangeSetId() { + return csId; + } + }; + } + + /* + * This method cannot be synchronized since it waits and must support multiple entries + * by query thread(s) and internal transactions such as snapshot saver + */ + public Transaction askReadTransaction() throws ProCoreException { + + Semaphore semaphore = new Semaphore(0); + + TransactionRequest req = queue(TransactionState.READ, semaphore); + + try { + semaphore.acquire(); + } catch (InterruptedException e) { + throw new ProCoreException(e); + } + + return makeTransaction(req); + + } + + private synchronized void dispatch() { + TransactionRequest r = requests.removeFirst(); + if(r.state == TransactionState.READ) reads++; + r.semaphore.release(); + } + + private synchronized void processRequests() { + + while(true) { + + if(requests.isEmpty()) return; + TransactionRequest req = requests.peek(); + + if(currentTransactionState == TransactionState.IDLE) { + + // Accept anything while IDLE + currentTransactionState = req.state; + dispatch(); + + } else if (currentTransactionState == TransactionState.READ) { + + if(req.state == currentTransactionState) { + + // Allow other reads + dispatch(); + + } else { + + // Wait + return; + + } + + } else if (currentTransactionState == TransactionState.WRITE) { + + // Wait + return; + + } + + } + + } + + private synchronized TransactionRequest queue(TransactionState state, Semaphore semaphore) { + TransactionRequest req = new TransactionRequest(state, semaphore); + requests.addLast(req); + processRequests(); + return req; + } + + /* + * This method cannot be synchronized since it waits and must support multiple entries + * by query thread(s) and internal transactions such as snapshot saver + */ + public Transaction askWriteTransaction() throws IllegalAcornStateException { + + Semaphore semaphore = new Semaphore(0); + TransactionRequest req = queue(TransactionState.WRITE, semaphore); + + try { + semaphore.acquire(); + } catch (InterruptedException e) { + throw new IllegalAcornStateException(e); + } + mainProgram.startTransaction(clusters.state.headChangeSetId+1); + return makeTransaction(req); + } + + public synchronized long endTransaction(long transactionId) throws ProCoreException { + + TransactionRequest req = requestMap.remove(transactionId); + if(req.state == TransactionState.WRITE) { + currentTransactionState = TransactionState.IDLE; + processRequests(); + } else { + reads--; + if(reads == 0) { + currentTransactionState = TransactionState.IDLE; + processRequests(); + } + } + return clusters.state.transactionId; + } + + } + + @Override + public Transaction askWriteTransaction(final long transactionId) throws ProCoreException { + try { + if (isClosing || unexpectedClose || closed) { + throw new ProCoreException("GraphClientImpl2 is already closing so no more write transactions allowed!"); + } + return transactionManager.askWriteTransaction(); + } catch (IllegalAcornStateException e) { + throw new ProCoreException(e); + } + } + + @Override + public long endTransaction(long transactionId) throws ProCoreException { + return transactionManager.endTransaction(transactionId); + } + + @Override + public String execute(String command) throws ProCoreException { + // This is called only by WriteGraphImpl.commitAccessorChanges + // We can ignore this in Acorn + return ""; + } + + @Override + public byte[] getChangeSetMetadata(long changeSetId) throws ProCoreException { + try { + return clusters.getMetadata(changeSetId); + } catch (AcornAccessVerificationException | IllegalAcornStateException e) { + throw new ProCoreException(e); + } + } + + @Override + public ChangeSetData getChangeSetData(long minChangeSetId, + long maxChangeSetId, OnChangeSetUpdate onChangeSetupate) + throws ProCoreException { + + new Exception("GetChangeSetDataFunction " + minChangeSetId + " " + maxChangeSetId).printStackTrace();; + return null; + + } + + @Override + public ChangeSetIds getChangeSetIds() throws ProCoreException { + throw new UnsupportedOperationException(); + } + + @Override + public Cluster getCluster(byte[] clusterId) throws ProCoreException { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterChanges getClusterChanges(long changeSetId, byte[] clusterId) + throws ProCoreException { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterIds getClusterIds() throws ProCoreException { + try { + return clusters.getClusterIds(); + } catch (IllegalAcornStateException e) { + throw new ProCoreException(e); + } + } + + @Override + public Information getInformation() throws ProCoreException { + return new Information() { + + @Override + public String getServerId() { + return "server"; + } + + @Override + public String getProtocolId() { + return ""; + } + + @Override + public String getDatabaseId() { + return "database"; + } + + @Override + public long getFirstChangeSetId() { + return 0; + } + + }; + } + + @Override + public Refresh getRefresh(long changeSetId) throws ProCoreException { + + final ClusterIds ids = getClusterIds(); + + return new Refresh() { + + @Override + public long getHeadChangeSetId() { + return clusters.state.headChangeSetId; + } + + @Override + public long[] getFirst() { + return ids.getFirst(); + } + + @Override + public long[] getSecond() { + return ids.getSecond(); + } + + }; + + } + + public byte[] getResourceFile(final byte[] clusterUID, final int resourceIndex) throws ProCoreException, AcornAccessVerificationException, IllegalAcornStateException { + return clusters.getResourceFile(clusterUID, resourceIndex); + } + + @Override + public ResourceSegment getResourceSegment(final byte[] clusterUID, final int resourceIndex, final long segmentOffset, short segmentSize) throws ProCoreException { + try { + return clusters.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize); + } catch (AcornAccessVerificationException | IllegalAcornStateException e) { + throw new ProCoreException(e); + } + } + + @Override + public long reserveIds(int count) throws ProCoreException { + return clusters.state.reservedIds++; + } + + @Override + public void updateCluster(byte[] operations) throws ProCoreException { + ClusterInfo info = null; + try { + ClusterUpdateOperation operation = new ClusterUpdateOperation(clusters, operations); + info = clusters.clusterLRU.getOrCreate(operation.uid, true); + if(info == null) + throw new IllegalAcornStateException("info == null for operation " + operation); + info.acquireMutex(); + info.scheduleUpdate(); + mainProgram.schedule(operation); + } catch (IllegalAcornStateException | AcornAccessVerificationException e) { + throw new ProCoreException(e); + } finally { + if (info != null) + info.releaseMutex(); + } + } + + private UndoClusterUpdateProcessor getUndoCSS(String ccsId) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { + + String[] ss = ccsId.split("\\."); + String chunkKey = ss[0]; + int chunkOffset = Integer.parseInt(ss[1]); + ClusterStreamChunk chunk = clusters.streamLRU.getWithoutMutex(chunkKey); + if(chunk == null) throw new IllegalAcornStateException("Cluster Stream Chunk " + chunkKey + " was not found."); + chunk.acquireMutex(); + try { + return chunk.getUndoProcessor(clusters, chunkOffset, ccsId); + } catch (DatabaseException e) { + throw e; + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + chunk.releaseMutex(); + } + } + + private void performUndo(String ccsId, ArrayList> clusterChanges, UndoClusterSupport support) throws ProCoreException, DatabaseException, IllegalAcornStateException, AcornAccessVerificationException { + UndoClusterUpdateProcessor proc = getUndoCSS(ccsId); + + int clusterKey = clusters.getClusterKeyByClusterUIDOrMakeWithoutMutex(proc.getClusterUID()); + + clusters.clusterLRU.acquireMutex(); + try { + + ClusterChange cs = new ClusterChange(clusterChanges, proc.getClusterUID()); + for(int i=0;i> clusterChanges = new ArrayList>(); + + UndoClusterSupport support = new UndoClusterSupport(clusters); + + final int changeSetId = clusters.state.headChangeSetId; + + if(ClusterUpdateProcessorBase.DEBUG) + System.err.println(" === BEGIN UNDO ==="); + + for(int i=0;i ccss = clusters.getChanges(id); + + for(int j=0;j pair = clusterChanges.get(i); + + final ClusterUID cuid = pair.first; + final byte[] data = pair.second; + + onChangeSetUpdate.onChangeSetUpdate(new ChangeSetUpdate() { + + @Override + public long getChangeSetId() { + return changeSetId; + } + + @Override + public int getChangeSetIndex() { + return 0; + } + + @Override + public int getNumberOfClusterChangeSets() { + return clusterChanges.size(); + } + + @Override + public int getIndexOfClusterChangeSet() { + return changeSetIndex; + } + + @Override + public byte[] getClusterId() { + return cuid.asBytes(); + } + + @Override + public boolean getNewCluster() { + return false; + } + + @Override + public byte[] getData() { + return data; + } + + }); + } + } catch (AcornAccessVerificationException | IllegalAcornStateException e1) { + throw new ProCoreException(e1); + } + return false; + } + + public ServiceLocator getServiceLocator() { + return locator; + } + + @Override + public boolean refreshEnabled() { + return false; + } + + @Override + public boolean rolledback() { + return clusters.rolledback(); + } + + + + + + + + + + + + //////////////////////// + + + + + + + + + + + + +} + diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java new file mode 100644 index 000000000..dd8703c1f --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java @@ -0,0 +1,107 @@ +package org.simantics.acorn; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; + +import org.simantics.acorn.exception.InvalidHeadStateException; +import org.simantics.databoard.Bindings; +import org.simantics.databoard.binding.mutable.MutableVariant; +import org.simantics.databoard.serialization.Serializer; +import org.simantics.databoard.util.binary.BinaryMemory; + +public class HeadState { + + public static final String HEAD_STATE = "head.state"; + public static final String SHA_1 = "SHA-1"; + + public int headChangeSetId = 0; + public long transactionId = 1; + public long reservedIds = 3; + + public ArrayList clusters = new ArrayList<>(); + public ArrayList files = new ArrayList<>(); + public ArrayList stream = new ArrayList<>(); + public ArrayList cs = new ArrayList<>(); +// public ArrayList ccs = new ArrayList(); + + public static HeadState load(Path directory) throws InvalidHeadStateException { + Path f = directory.resolve(HEAD_STATE); + + try { + byte[] bytes = Files.readAllBytes(f); + MessageDigest sha1 = MessageDigest.getInstance(SHA_1); + int digestLength = sha1.getDigestLength(); + + sha1.update(bytes, digestLength, bytes.length - digestLength); + byte[] newChecksum = sha1.digest(); + if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) { + throw new InvalidHeadStateException( + "Checksum " + Arrays.toString(newChecksum) + " does not match excpected " + + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + f.toAbsolutePath()); + } + try (ByteArrayInputStream bais = new ByteArrayInputStream(bytes, digestLength, bytes.length - digestLength)) { + HeadState object = (HeadState) org.simantics.databoard.Files.readFile(bais, Bindings.getBindingUnchecked(HeadState.class)); + return object; + } + } catch (IOException i) { + return new HeadState(); +// throw new InvalidHeadStateException(i); + } catch (NoSuchAlgorithmException e) { + throw new Error("SHA-1 Algorithm not found", e); + } catch (Throwable t) { + throw new InvalidHeadStateException(t); + } + } + + public void save(Path directory) throws IOException { + Path f = directory.resolve(HEAD_STATE); + try { + BinaryMemory rf = new BinaryMemory(4096); + try { + MutableVariant v = new MutableVariant(Bindings.getBindingUnchecked(HeadState.class), this); + Serializer s = Bindings.getSerializerUnchecked( Bindings.VARIANT ); + s.serialize(rf, v); + } finally { + rf.close(); + } + + byte[] bytes = rf.toByteBuffer().array(); + + MessageDigest sha1 = MessageDigest.getInstance(SHA_1); + sha1.update(bytes); + byte[] checksum = sha1.digest(); + + try (OutputStream out = Files.newOutputStream(f)) { + out.write(checksum); + out.write(bytes); + } + FileIO.syncPath(f); + } catch (NoSuchAlgorithmException e) { + throw new Error("SHA-1 digest not found, should not happen", e); + } + } + + public static void validateHeadStateIntegrity(Path headState) throws InvalidHeadStateException, IOException { + try { + byte[] bytes = Files.readAllBytes(headState); + MessageDigest sha1 = MessageDigest.getInstance(SHA_1); + int digestLength = sha1.getDigestLength(); + sha1.update(bytes, digestLength, bytes.length - digestLength); + byte[] newChecksum = sha1.digest(); + if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) { + throw new InvalidHeadStateException( + "Checksum " + Arrays.toString(newChecksum) + " does not match excpected " + + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + headState.toAbsolutePath()); + } + } catch (NoSuchAlgorithmException e) { + throw new Error("SHA-1 digest not found, should not happen", e); + } + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java new file mode 100644 index 000000000..78ff9e899 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java @@ -0,0 +1,346 @@ +package org.simantics.acorn; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.lru.ClusterStreamChunk; +import org.simantics.acorn.lru.ClusterUpdateOperation; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.logging.TimeLogger; + +public class MainProgram implements Runnable, Closeable { + + private static final int CLUSTER_THREADS = 4; + private static final int CHUNK_CACHE_SIZE = 100; + + private final GraphClientImpl2 client; + private final ClusterManager clusters; + private final ExecutorService[] clusterUpdateThreads; + private final List[] updateSchedules; + + private int residentOperationBytes = 0; + private long currentChangeSetId = -1; + private int nextChunkId = 0; + private boolean alive = true; + private Semaphore deathBarrier = new Semaphore(0); + + final Semaphore mutex = new Semaphore(1); + final LinkedList operations = new LinkedList<>(); + + static class ClusterThreadFactory implements ThreadFactory { + + final String name; + final boolean daemon; + + public ClusterThreadFactory(String name, boolean daemon) { + this.name = name; + this.daemon = daemon; + } + + @Override + public Thread newThread(Runnable r) { + Thread thread = new Thread(r, name); + thread.setDaemon(daemon); + return thread; + } + } + + public MainProgram(GraphClientImpl2 client, ClusterManager clusters) { + + this.client = client; + this.clusters = clusters; + this.clusterUpdateThreads = new ExecutorService[CLUSTER_THREADS]; + this.updateSchedules = new ArrayList[CLUSTER_THREADS]; + for(int i=0;i(); + } + } + + public void startTransaction(long id) { + currentChangeSetId = id; + nextChunkId = 0; + } + + private static Comparator clusterComparator = new Comparator() { + + @Override + public int compare(ClusterUID o1, ClusterUID o2) { + return Long.compare(o1.second, o2.second); + } + }; + + @Override + public void run() { + try { + + mutex.acquire(); + main: + while(alive) { + + TreeMap> updates = new TreeMap>(clusterComparator); + + synchronized(MainProgram.this) { + + while(!operations.isEmpty() && updates.size() < 100) { + + ClusterStreamChunk chunk = operations.pollFirst(); + + for(int i=chunk.nextToProcess;i ops = updates.get(uid); + if(ops == null) { + ops = new ArrayList(); + updates.put(uid, ops); + } + ops.add(o); + } + + chunk.nextToProcess = chunk.operations.size(); + + if(!chunk.isCommitted()) { + assert(operations.isEmpty()); + operations.add(chunk); + break; + } + + } + + if(updates.isEmpty()) { + try { + long start = System.nanoTime(); + mutex.release(); + MainProgram.this.wait(5000); + mutex.acquire(); + if (!alive) + break main; + long duration = System.nanoTime()-start; + if(duration > 4000000000L) { + + // Was this a time-out or a new stream request? + if(operations.isEmpty()) { + + /* + * We are idling here. + * Flush all caches gradually + */ + + // Write pending cs to disk + boolean written = clusters.csLRU.swapForced(); + while(written) { + if(!updates.isEmpty()) break; + written = clusters.csLRU.swapForced(); + } + // Write pending chunks to disk + written = clusters.streamLRU.swapForced(); + while(written) { + if(!updates.isEmpty()) break; + written = clusters.streamLRU.swapForced(); + } + // Write pending files to disk + written = clusters.fileLRU.swapForced(); + while(written) { + if(!updates.isEmpty()) break; + written = clusters.fileLRU.swapForced(); + } + // Write pending clusters to disk + written = clusters.clusterLRU.swapForced(); + while(written) { + if(!updates.isEmpty()) break; + written = clusters.clusterLRU.swapForced(); + } + + client.tryMakeSnapshot(); + } + } + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + +// long sss = System.nanoTime(); + + for(int i=0;i> entry : updates.entrySet()) { + ClusterUID key = entry.getKey(); + int hash = key.hashCode() & (clusterUpdateThreads.length-1); + updateSchedules[hash].addAll(entry.getValue()); + } + + // final AtomicLong elapsed = new AtomicLong(0); + int acquireAmount = 0; + for(int i=0;i ops = updateSchedules[i]; + if (!ops.isEmpty()) { + acquireAmount++; + clusterUpdateThreads[i].submit(new Callable() { + + @Override + public Object call() throws Exception { + //long st = System.nanoTime(); + try { + for(ClusterUpdateOperation op : ops) { + op.run(); + } + } finally { + s.release(); + } + return null; + + // long duration = System.nanoTime()-st; + // elapsed.addAndGet(duration); + // double dur = 1e-9*duration; + // if(dur > 0.05) + // System.err.println("duration=" + dur + "s. " + ops.size()); + } + }); + } + } + + s.acquire(acquireAmount); + + /* + * Here we are actively processing updates from client. + * Maintain necessary caching here. + */ + + clusters.streamLRU.acquireMutex(); + try { + swapChunks(); + } catch (AcornAccessVerificationException | IllegalAcornStateException e) { + e.printStackTrace(); + } finally { + clusters.streamLRU.releaseMutex(); + } + clusters.csLRU.acquireMutex(); + try { + swapCS(); + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + clusters.csLRU.releaseMutex(); + } + + TimeLogger.log("Performed updates"); + + } + + } catch (Throwable t) { + t.printStackTrace(); + } finally { + deathBarrier.release(); + } + } + + /* + * Mutex for streamLRU is assumed here + * + */ + private void swapChunks() throws AcornAccessVerificationException, IllegalAcornStateException { + + // Cache chunks during update operations + boolean written = clusters.streamLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE); + while(written) { + written = clusters.streamLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE); + } + } + + private void swapCS() throws AcornAccessVerificationException, IllegalAcornStateException { + + // Cache chunks during update operations + boolean written = clusters.csLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE); + while(written) { + written = clusters.csLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE); + } + } + + public synchronized void committed() { + + ClusterStreamChunk last = operations.isEmpty() ? null : operations.getLast(); + if (!alive) { + System.err.println("Trying to commit operation after MainProgram is closed! Operation is " + last); +// return; + } + if(last != null) last.commit(); + + } + + public synchronized void schedule(ClusterUpdateOperation operation) throws IllegalAcornStateException { + if (!alive) { + System.err.println("Trying to schedule operation after MainProgram is closed! Operation is " + operation); +// return; + } + clusters.streamLRU.acquireMutex(); + + try { + + ClusterStreamChunk last = operations.isEmpty() ? null : operations.getLast(); + if(last == null || last.isCommitted()) { + String id = "" + currentChangeSetId + "-" + nextChunkId++; + last = new ClusterStreamChunk(clusters, clusters.streamLRU, id); + operations.add(last); + } + + String chunkId = last.getKey(); + int chunkOffset = last.operations.size(); + operation.scheduled(chunkId + "." + chunkOffset); + + last.addOperation(operation); + + swapChunks(); + + notifyAll(); + } catch (IllegalAcornStateException e) { + throw e; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + clusters.streamLRU.releaseMutex(); + } + } + + @Override + public void close() { + alive = false; + synchronized (this) { + notifyAll(); + } + try { + deathBarrier.acquire(); + } catch (InterruptedException e) { + } + + for (ExecutorService executor : clusterUpdateThreads) + executor.shutdown(); + + for (int i = 0; i < clusterUpdateThreads.length; i++) { + try { + ExecutorService executor = clusterUpdateThreads[i]; + executor.awaitTermination(500, TimeUnit.MILLISECONDS); + clusterUpdateThreads[i] = null; + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java new file mode 100644 index 000000000..7d1580421 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java @@ -0,0 +1,181 @@ +package org.simantics.acorn; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.Serializable; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import org.simantics.acorn.exception.InvalidHeadStateException; +import org.simantics.databoard.Bindings; +import org.simantics.databoard.binding.mutable.MutableVariant; +import org.simantics.databoard.file.RuntimeIOException; +import org.simantics.databoard.serialization.Serializer; +import org.simantics.databoard.util.binary.BinaryMemory; +import org.simantics.utils.FileUtils; + +public class MainState implements Serializable { + + private static final long serialVersionUID = 6237383147637270225L; + + public static final String MAIN_STATE = "main.state"; + + public int headDir = 0; + + public MainState() { + } + + private MainState(int headDir) { + this.headDir = headDir; + } + + public static MainState load(Path directory, Consumer callback) throws IOException { + Files.createDirectories(directory); + Path mainState = directory.resolve(MAIN_STATE); + try { + byte[] bytes = Files.readAllBytes(mainState); + MainState state = null; + try (ByteArrayInputStream bais = new ByteArrayInputStream(bytes)) { + state = (MainState) org.simantics.databoard.Files.readFile(bais, Bindings.getBindingUnchecked(MainState.class)); + } + + while (true) { + Path latest = directory.resolve(Integer.toString(state.headDir - 1)); + try { + Path headState = latest.resolve(HeadState.HEAD_STATE); + HeadState.validateHeadStateIntegrity(headState); + break; + } catch (InvalidHeadStateException e) { + e.printStackTrace(); + state.headDir--; + callback.accept(e); + } finally { + cleanBaseDirectory(directory, latest, callback); + } + } + return state; + } catch(Exception i) { + callback.accept(i); + int largest = -1; + Path latest = findNewHeadStateDir(directory, callback); + if (latest != null) + largest = safeParseInt(-1, latest.getFileName().toString()); + // +1 because we want to return the next head version to use, + // not the latest existing version. + largest++; + MainState state = new MainState( largest ); + cleanBaseDirectory(directory, latest, callback); + return state; + } finally { + if (Files.exists(mainState)) { + Files.delete(mainState); + } + } + } + + public void save(Path directory) throws IOException { + Path f = directory.resolve(MAIN_STATE); + BinaryMemory rf = new BinaryMemory(4096); + try { + MutableVariant v = new MutableVariant(Bindings.getBindingUnchecked(MainState.class), this); + Serializer s = Bindings.getSerializerUnchecked( Bindings.VARIANT ); + s.serialize(rf, v); + } finally { + rf.close(); + } + byte[] bytes = rf.toByteBuffer().array(); + try (OutputStream out = Files.newOutputStream(f)) { + out.write(bytes); + } + FileIO.syncPath(f); + } + + private static boolean isInteger(Path p) { + try { + Integer.parseInt(p.getFileName().toString()); + return true; + } catch (NumberFormatException e) { + return false; + } + } + + /** + * + * @param directory + * @param callback + * @return + * @throws IOException + */ + private static Path findNewHeadStateDir(Path directory, Consumer callback) throws IOException { + try (Stream s = Files.walk(directory, 1)) { + List reverseSortedPaths = s + .filter(p -> !p.equals(directory) && isInteger(p) && Files.isDirectory(p)) + .sorted((p1, p2) -> { + int p1Name = Integer.parseInt(p1.getFileName().toString()); + int p2Name = Integer.parseInt(p2.getFileName().toString()); + return Integer.compare(p2Name, p1Name); + }).collect(Collectors.toList()); + + Path latest = null; + for (Path last : reverseSortedPaths) { + Path headState = last.resolve(HeadState.HEAD_STATE); + try { + HeadState.validateHeadStateIntegrity(headState); + latest = last; + break; + } catch (IOException | InvalidHeadStateException e) { + // Cleanup is done in {@link cleanBaseDirectory} method + callback.accept(e); + } + } + return latest; + } + } + + private static int safeParseInt(int defaultValue, String s) { + try { + return Integer.parseInt(s); + } catch (NumberFormatException e) { + return defaultValue; + } + } + + private static void cleanBaseDirectory(Path directory, Path latest, Consumer callback) throws IOException { + try (Stream s = Files.walk(directory, 1)) { + List reverseSortedPaths = s + .filter(p -> !p.equals(directory) && isInteger(p) && Files.isDirectory(p)) + .sorted((p1, p2) -> { + int p1Name = Integer.parseInt(p1.getFileName().toString()); + int p2Name = Integer.parseInt(p2.getFileName().toString()); + return Integer.compare(p2Name, p1Name); + }).collect(Collectors.toList()); + + for (Path p : reverseSortedPaths) { + if (!p.equals(latest)) { + if (Files.exists(p.resolve(HeadState.HEAD_STATE))) { + // this indicates that there is a possibility that index and vg's are out of sync + // if we are able to find folders with higher number than the current head.state + callback.accept(null); + } + uncheckedDeleteAll(p); + } else { + break; + } + } + } + } + + private static void uncheckedDeleteAll(Path path) { + try { + FileUtils.deleteAll(path.toFile()); + } catch (IOException e) { + throw new RuntimeIOException(e); + } + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java new file mode 100644 index 000000000..86dfdd435 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java @@ -0,0 +1,15 @@ +package org.simantics.acorn; + +import java.io.IOException; +import java.nio.file.Path; + +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.db.exception.SDBException; + +public interface Persistable { + + void toFile(Path path) throws IOException ; + void fromFile(byte[] data) throws IllegalAcornStateException, AcornAccessVerificationException; + +} \ No newline at end of file diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java new file mode 100644 index 000000000..1e7352c3e --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java @@ -0,0 +1,170 @@ +package org.simantics.acorn; + +import java.io.InputStream; + +import org.simantics.db.Session; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.service.ClusterUID; + +public class UndoClusterSupport implements ClusterSupport { + + final ClusterManager impl; + + public UndoClusterSupport(ClusterManager impl) { + this.impl = impl; + } + + @Override + public int createClusterKeyByClusterUID(ClusterUID clusterUID, + long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterId(long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterKey(int clusterKey) { + try { + return impl.getClusterByClusterKey(clusterKey); + } catch (DatabaseException e) { + e.printStackTrace(); + return null; + } + } + + @Override + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByResourceKey(int resourceKey) { + throw new UnsupportedOperationException(); + } + + @Override + public long getClusterIdOrCreate(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public void addStatement(Object cluster) { + } + + @Override + public void cancelStatement(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeStatement(Object cluster) { + } + + @Override + public void cancelValue(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeValue(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void setValue(Object cluster, long clusterId, byte[] bytes, + int length) { + } + + @Override + public void modiValue(Object cluster, long clusterId, long voffset, + int length, byte[] bytes, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public void setImmutable(Object cluster, boolean immutable) { + throw new UnsupportedOperationException(); + } + + @Override + public void setDeleted(Object cluster, boolean deleted) { + throw new UnsupportedOperationException(); + } + + @Override + public void createResource(Object cluster, short resourceIndex, + long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public void addStatementIndex(Object cluster, int resourceKey, + ClusterUID clusterUID, byte op) { + } + + @Override + public void setStreamOff(boolean setOff) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getStreamOff() { + throw new UnsupportedOperationException(); + } + + @Override + public InputStream getValueStreamEx(int resourceIndex, long clusterId) + throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId) + throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId, long voffset, + int length) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public long getValueSizeEx(int resourceIndex, long clusterId) + throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public int wait4RequestsLess(int limit) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public Session getSession() { + throw new UnsupportedOperationException(); + } + + @Override + public IClusterTable getClusterTable() { + throw new UnsupportedOperationException(); + } + + @Override + public int getClusterKeyByClusterUIDOrMake(long id1, long id2) { + throw new UnsupportedOperationException(); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java new file mode 100644 index 000000000..3977ad73d --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java @@ -0,0 +1,321 @@ +package org.simantics.acorn.backup; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.LinkOption; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Arrays; +import java.util.concurrent.Future; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import org.simantics.acorn.AcornSessionManagerImpl; +import org.simantics.acorn.GraphClientImpl2; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.backup.BackupException; +import org.simantics.backup.IBackupProvider; +import org.simantics.db.server.ProCoreException; +import org.simantics.utils.FileUtils; + +/** + * @author Jani + * + * TODO: get rid of {@link GraphClientImpl2#getInstance()} invocations somehow in a cleaner way + */ +public class AcornBackupProvider implements IBackupProvider { + + private static final String IDENTIFIER = "AcornBackupProvider"; + private long trId = -1; + private final Semaphore lock = new Semaphore(1); + private final GraphClientImpl2 client; + + public AcornBackupProvider() { + this.client = AcornSessionManagerImpl.getInstance().getClient(); + } + + private static Path getAcornMetadataFile(Path dbFolder) { + return dbFolder.getParent().resolve(IDENTIFIER); + } + + @Override + public void lock() throws BackupException { + try { + if (trId != -1) + throw new IllegalStateException(this + " backup provider is already locked"); + trId = client.askWriteTransaction(-1).getTransactionId(); + } catch (ProCoreException e) { + e.printStackTrace(); + } + } + + @Override + public Future backup(Path targetPath, int revision) throws BackupException { + boolean releaseLock = true; + try { + lock.acquire(); + + client.makeSnapshot(true); + + Path dbDir = client.getDbFolder(); + int newestFolder = client.clusters.mainState.headDir - 1; + int latestFolder = -2; + Path AcornMetadataFile = getAcornMetadataFile(dbDir); + if (Files.exists(AcornMetadataFile)) { + try (BufferedReader br = Files.newBufferedReader(AcornMetadataFile)) { + latestFolder = Integer.parseInt( br.readLine() ); + } + } + + AcornBackupRunnable r = new AcornBackupRunnable( + lock, targetPath, revision, dbDir, latestFolder, newestFolder); + new Thread(r, "Acorn backup thread").start(); + + releaseLock = false; + return r; + } catch (InterruptedException e) { + releaseLock = false; + throw new BackupException("Failed to lock Acorn for backup.", e); + } catch (NumberFormatException e) { + throw new BackupException("Failed to read Acorn head state file.", e); + } catch (IllegalAcornStateException | IOException e) { + throw new BackupException("I/O problem during Acorn backup.", e); + } finally { + if (releaseLock) + lock.release(); + } + } + + @Override + public void unlock() throws BackupException { + try { + if (trId == -1) + throw new BackupException(this + " backup provider is not locked"); + client.endTransaction(trId); + trId = -1; + } catch (ProCoreException e) { + throw new BackupException(e); + } + } + + @Override + public void restore(Path fromPath, int revision) { + try { + // 1. Resolve initial backup restore target. + // This can be DB directory directly or a temporary directory that + // will replace the DB directory. + Path dbRoot = client.getDbFolder(); + Path restorePath = dbRoot; + if (!Files.exists(dbRoot, LinkOption.NOFOLLOW_LINKS)) { + Files.createDirectories(dbRoot); + } else { + Path dbRootParent = dbRoot.getParent(); + restorePath = dbRootParent == null ? Files.createTempDirectory("restore") + : Files.createTempDirectory(dbRootParent, "restore"); + } + + // 2. Restore the backup. + Files.walkFileTree(fromPath, new RestoreCopyVisitor(restorePath, revision)); + + // 3. Override existing DB root with restored temporary copy if necessary. + if (dbRoot != restorePath) { + FileUtils.deleteAll(dbRoot.toFile()); + Files.move(restorePath, dbRoot); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + + private class RestoreCopyVisitor extends SimpleFileVisitor { + + private final Path toPath; + private final int revision; + private Path currentSubFolder; + + public RestoreCopyVisitor(Path toPath, int revision) { + this.toPath = toPath; + this.revision = revision; + } + + @Override + public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { + Path dirName = dir.getFileName(); + if (dirName.toString().equals(IDENTIFIER)) { + currentSubFolder = dir; + return FileVisitResult.CONTINUE; + } else if (dir.getParent().getFileName().toString().equals(IDENTIFIER)) { + Path targetPath = toPath.resolve(dirName); + if (!Files.exists(targetPath)) { + Files.createDirectory(targetPath); + } + return FileVisitResult.CONTINUE; + } else if (dirName.toString().length() == 1 && Character.isDigit(dirName.toString().charAt(0))) { + int dirNameInt = Integer.parseInt(dirName.toString()); + if (dirNameInt <= revision) { + return FileVisitResult.CONTINUE; + } else { + return FileVisitResult.SKIP_SUBTREE; + } + } else { + return FileVisitResult.CONTINUE; + } + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (file.getFileName().toString().endsWith(".tar.gz")) + return FileVisitResult.CONTINUE; + System.out.println("Restore " + file + " to " + toPath.resolve(currentSubFolder.relativize(file))); + Files.copy(file, toPath.resolve(currentSubFolder.relativize(file)), StandardCopyOption.REPLACE_EXISTING); + return FileVisitResult.CONTINUE; + } + } + + private static class AcornBackupRunnable implements Runnable, Future { + + private final Semaphore lock; + private final Path targetPath; + private final int revision; + private final Path baseDir; + private final int latestFolder; + private final int newestFolder; + + private boolean done = false; + private final Semaphore completion = new Semaphore(0); + private BackupException exception = null; + + public AcornBackupRunnable(Semaphore lock, Path targetPath, int revision, + Path baseDir, int latestFolder, int newestFolder) { + this.lock = lock; + this.targetPath = targetPath; + this.revision = revision; + this.baseDir = baseDir; + this.latestFolder = latestFolder; + this.newestFolder = newestFolder; + } + + @Override + public void run() { + try { + doBackup(); + writeHeadstateFile(); + } catch (IOException e) { + exception = new BackupException("Acorn backup failed", e); + rollback(); + } finally { + done = true; + lock.release(); + completion.release(); + } + } + + private void doBackup() throws IOException { + Path target = targetPath.resolve(String.valueOf(revision)).resolve(IDENTIFIER); + if (!Files.exists(target)) + Files.createDirectories(target); + Files.walkFileTree(baseDir, + new BackupCopyVisitor(baseDir, target)); + } + + private void writeHeadstateFile() throws IOException { + Path AcornMetadataFile = getAcornMetadataFile(baseDir); + if (!Files.exists(AcornMetadataFile)) { + Files.createFile(AcornMetadataFile); + } + Files.write(AcornMetadataFile, + Arrays.asList(Integer.toString(newestFolder)), + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE); + } + + private void rollback() { + // TODO + } + + private class BackupCopyVisitor extends SimpleFileVisitor { + + private Path fromPath; + private Path toPath; + + public BackupCopyVisitor(Path fromPath, Path toPath) { + this.fromPath = fromPath; + this.toPath = toPath; + } + + @Override + public FileVisitResult preVisitDirectory(Path dir, + BasicFileAttributes attrs) throws IOException { + Path dirName = dir.getFileName(); + if (dirName.equals(fromPath)) { + Path targetPath = toPath.resolve(fromPath.relativize(dir)); + if (!Files.exists(targetPath)) { + Files.createDirectory(targetPath); + } + return FileVisitResult.CONTINUE; + } else { + int dirNameInt = Integer.parseInt(dirName.toString()); + if (latestFolder < dirNameInt && dirNameInt <= newestFolder) { + Path targetPath = toPath.resolve(fromPath + .relativize(dir)); + if (!Files.exists(targetPath)) { + Files.createDirectory(targetPath); + } + return FileVisitResult.CONTINUE; + } + return FileVisitResult.SKIP_SUBTREE; + } + } + + @Override + public FileVisitResult visitFile(Path file, + BasicFileAttributes attrs) throws IOException { + System.out.println("Backup " + file + " to " + + toPath.resolve(fromPath.relativize(file))); + Files.copy(file, toPath.resolve(fromPath.relativize(file)), + StandardCopyOption.REPLACE_EXISTING); + return FileVisitResult.CONTINUE; + } + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return done; + } + + @Override + public BackupException get() throws InterruptedException { + completion.acquire(); + completion.release(); + return exception; + } + + @Override + public BackupException get(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException { + if (completion.tryAcquire(timeout, unit)) + completion.release(); + else + throw new TimeoutException("Acorn backup completion waiting timed out."); + return exception; + } + + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java new file mode 100644 index 000000000..51241728d --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java @@ -0,0 +1,1109 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn.cluster; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; + +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.internal.ClusterChange; +import org.simantics.acorn.internal.ClusterStream; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.acorn.internal.DebugPolicy; +import org.simantics.db.Resource; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.exception.ExternalValueException; +import org.simantics.db.exception.ValidationException; +import org.simantics.db.impl.ClusterI; +import org.simantics.db.impl.ClusterI.PredicateProcedure; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.ClusterTraitsBase; +import org.simantics.db.impl.ForEachObjectContextProcedure; +import org.simantics.db.impl.ForEachObjectProcedure; +import org.simantics.db.impl.ForPossibleRelatedValueContextProcedure; +import org.simantics.db.impl.ForPossibleRelatedValueProcedure; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.impl.Table; +import org.simantics.db.impl.TableHeader; +import org.simantics.db.impl.graph.ReadGraphImpl; +import org.simantics.db.impl.query.QueryProcessor; +import org.simantics.db.procedure.AsyncContextMultiProcedure; +import org.simantics.db.procedure.AsyncMultiProcedure; +import org.simantics.db.procore.cluster.ClusterMap; +import org.simantics.db.procore.cluster.ClusterPrintDebugInfo; +import org.simantics.db.procore.cluster.ClusterTraits; +import org.simantics.db.procore.cluster.CompleteTable; +import org.simantics.db.procore.cluster.FlatTable; +import org.simantics.db.procore.cluster.ForeignTable; +import org.simantics.db.procore.cluster.ObjectTable; +import org.simantics.db.procore.cluster.PredicateTable; +import org.simantics.db.procore.cluster.ResourceTable; +import org.simantics.db.procore.cluster.ValueTable; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.datastructures.Callback; + +final public class ClusterBig extends ClusterImpl { + private static final int TABLE_HEADER_SIZE = TableHeader.HEADER_SIZE + TableHeader.EXTRA_SIZE; + private static final int RESOURCE_TABLE_OFFSET = 0; + private static final int PREDICATE_TABLE_OFFSET = RESOURCE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int OBJECT_TABLE_OFFSET = PREDICATE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int VALUE_TABLE_OFFSET = OBJECT_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int FLAT_TABLE_OFFSET = VALUE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int COMPLETE_TABLE_OFFSET = FLAT_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int FOREIGN_TABLE_OFFSET = COMPLETE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int INT_HEADER_SIZE = FOREIGN_TABLE_OFFSET + TABLE_HEADER_SIZE; + private final int clusterBits; + final private ResourceTable resourceTable; + //final private ResourceTable movedResourceTable; + final private PredicateTable predicateTable; + final private ObjectTable objectTable; + final private ValueTable valueTable; + final private FlatTable flatTable; + final private ForeignTable foreignTable; + final private CompleteTable completeTable; + final private ClusterMap clusterMap; + final private int[] headerTable; + final private ClusterSupport2 clusterSupport; + + public ClusterBig(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) { + super(clusterTable, clusterUID, clusterKey, support); + if(DebugPolicy.REPORT_CLUSTER_EVENTS) + new Exception(getClusterUID().toString()).printStackTrace(); + this.headerTable = new int[INT_HEADER_SIZE]; + this.resourceTable = new ResourceTable(this, headerTable, RESOURCE_TABLE_OFFSET); + this.foreignTable = new ForeignTable(this, headerTable, FOREIGN_TABLE_OFFSET); + this.predicateTable = new PredicateTable(this, headerTable, PREDICATE_TABLE_OFFSET); + this.objectTable = new ObjectTable(this, headerTable, OBJECT_TABLE_OFFSET); + this.valueTable = new ValueTable(this, headerTable, VALUE_TABLE_OFFSET); + this.completeTable = new CompleteTable(this, headerTable, COMPLETE_TABLE_OFFSET); + this.flatTable = null; + this.clusterMap = new ClusterMap(foreignTable, flatTable); + this.clusterSupport = support; + this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey); + this.importance = 0; +// clusterTable.setDirtySizeInBytes(true); + } + protected ClusterBig(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey) + throws DatabaseException { + super(clusterTable, checkValidity(0, longs, ints, bytes), clusterKey, support); + if(DebugPolicy.REPORT_CLUSTER_EVENTS) + new Exception(getClusterUID().toString()).printStackTrace(); + if (ints.length < INT_HEADER_SIZE) + throw new IllegalArgumentException("Too small integer table for cluster."); + this.headerTable = ints; + this.resourceTable = new ResourceTable(this, ints, RESOURCE_TABLE_OFFSET, longs); + this.foreignTable = new ForeignTable(this, headerTable, FOREIGN_TABLE_OFFSET, longs); + this.predicateTable = new PredicateTable(this, ints, PREDICATE_TABLE_OFFSET, ints); + this.objectTable = new ObjectTable(this, ints, OBJECT_TABLE_OFFSET, ints); + this.valueTable = new ValueTable(this, ints, VALUE_TABLE_OFFSET, bytes); + this.flatTable = null; + this.completeTable = new CompleteTable(this, headerTable, COMPLETE_TABLE_OFFSET, ints); + this.clusterMap = new ClusterMap(foreignTable, flatTable); + this.clusterSupport = support; + this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey); + } + void analyse() { + System.out.println("Cluster " + clusterId); + System.out.println("-size:" + getUsedSpace()); + System.out.println(" -rt:" + (resourceTable.getTableCapacity() * 8 + 8)); + System.out.println(" -ft:" + foreignTable.getTableCapacity() * 8); + System.out.println(" -pt:" + predicateTable.getTableCapacity() * 4); + System.out.println(" -ot:" + objectTable.getTableCapacity() * 4); + System.out.println(" -ct:" + completeTable.getTableCapacity() * 4); + System.out.println(" -vt:" + valueTable.getTableCapacity()); + + System.out.println("-resourceTable:"); + System.out.println(" -resourceCount=" + resourceTable.getResourceCount()); + System.out.println(" -size=" + resourceTable.getTableSize()); + System.out.println(" -capacity=" + resourceTable.getTableCapacity()); + System.out.println(" -count=" + resourceTable.getTableCount()); + System.out.println(" -size=" + resourceTable.getTableSize()); + //resourceTable.analyse(); + } + public void checkDirectReference(int dr) + throws DatabaseException { + if (!ClusterTraits.statementIndexIsDirect(dr)) + throw new ValidationException("Reference is not direct. Reference=" + dr); + if (ClusterTraits.isFlat(dr)) + throw new ValidationException("Reference is flat. Reference=" + dr); + if (ClusterTraits.isLocal(dr)) { + if (dr < 1 || dr > resourceTable.getUsedSize()) + throw new ValidationException("Illegal local reference. Reference=" + dr); + } else { + int fi = ClusterTraits.getForeignIndexFromReference(dr); + int ri = ClusterTraits.getResourceIndexFromForeignReference(dr); + if (fi < 1 || fi > foreignTable.getUsedSize()) + throw new ValidationException("Illegal foreign reference. Reference=" + dr + " foreign index=" + fi); + if (ri < 1 || ri > ClusterTraits.getMaxNumberOfResources()) + throw new ValidationException("Illegal foreign reference. Reference=" + dr + " resource index=" + ri); + } + } + public void checkPredicateIndex(int pi) + throws DatabaseException { + predicateTable.checkPredicateSetIndex(this, pi); + } + public void checkObjectSetReference(int or) + throws DatabaseException { + if (ClusterTraits.statementIndexIsDirect(or)) + throw new ValidationException("Illegal object set reference. Reference=" + or); + int oi = ClusterTraits.statementIndexGet(or); + this.objectTable.checkObjectSetIndex(this, oi); + } + + public void checkValueInit() + throws DatabaseException { + valueTable.checkValueInit(); + } + public void checkValue(int capacity, int index) + throws DatabaseException { + valueTable.checkValue(capacity, index); + } + public void checkValueFini() + throws DatabaseException { + valueTable.checkValueFini(); + } + public void checkForeingIndex(int fi) + throws DatabaseException { + if (fi<1 || fi > foreignTable.getUsedSize()) + throw new ValidationException("Illegal foreign index=" + fi); + } + public void checkCompleteSetReference(int cr) + throws DatabaseException { + if (!ClusterTraits.completeReferenceIsMultiple(cr)) + throw new ValidationException("Illegal complete set reference. Reference=" + cr); + int ci = cr; + this.completeTable.checkCompleteSetIndex(this, ci); + } + public void check() + throws DatabaseException { + this.completeTable.check(this); + this.objectTable.check(this); + // Must be after object table check. + this.predicateTable.check(this); + this.resourceTable.check(this); + } + @Override + public CompleteTypeEnum getCompleteType(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceRef = getLocalReference(resourceKey); + int completeRef = resourceTable.getCompleteObjectRef(resourceRef); + CompleteTypeEnum ct = ClusterTraits.completeReferenceGetType(completeRef); + if (DEBUG) + System.out.println("Cluster.getCompleteType rk=" + resourceKey + " ct=" + ct); + int i = ct.getValue(); + switch (i) { + case 0: return CompleteTypeEnum.NotComplete; + case 1: return CompleteTypeEnum.InstanceOf; + case 2: return CompleteTypeEnum.Inherits; + case 3: return CompleteTypeEnum.SubrelationOf; + default: throw new DatabaseException("Illegal complete type enumeration."); + } + } + + @Override + public int getCompleteObjectKey(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceRef = getLocalReference(resourceKey); + int completeRef = resourceTable.getCompleteObjectRef(resourceRef); + int clusterIndex; + int resourceIndex = ClusterTraits.completeReferenceGetResourceIndex(completeRef); + + ClusterI.CompleteTypeEnum completeType = ClusterTraits.completeReferenceGetType(completeRef); + if (completeType == ClusterI.CompleteTypeEnum.NotComplete) + throw new DatabaseException("Resource has multiple complete objects. Resource key=" + resourceKey + "."); + + if (ClusterTraits.completeReferenceIsLocal(completeRef)) { + clusterIndex = clusterKey; + } else { + int foreignIndex = ClusterTraits.completeReferenceGetForeignIndex(completeRef); +// System.err.println("completeRef=" + completeRef + " foreignIndex=" + foreignIndex ); + ClusterUID clusterUID = foreignTable.getResourceUID(foreignIndex).asCID(); + ClusterI c = support.getClusterByClusterUIDOrMake(clusterUID); + clusterIndex = c.getClusterKey(); + } + int key = ClusterTraits.createResourceKey(clusterIndex, resourceIndex); + if (DEBUG) + System.out.println("Cluster.complete object rk=" + resourceKey + " ck=" + key); + return key; + } + + @Override + public boolean isComplete(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceRef = getLocalReference(resourceKey); + int completeRef = resourceTable.getCompleteObjectRef(resourceRef); + ClusterI.CompleteTypeEnum completeType = ClusterTraits.completeReferenceGetType(completeRef); + boolean complete = completeType != ClusterI.CompleteTypeEnum.NotComplete; + if (DEBUG) + System.out.println("Cluster.key=" + resourceKey + " isComplete=" + complete); + return complete; + } + + public int getSingleObject(int resourceKey, int predicateKey, int objectIndex, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.getSingleObject: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + } + return objectTable.getSingleObject(objectIndex, support, this); + } + + public void forObjects(int resourceKey, int predicateKey, int objectIndex, QueryProcessor processor, ReadGraphImpl graph, AsyncMultiProcedure procedure, + ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.forObjects1: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); + return; + } + objectTable.foreachObject(graph, objectIndex, procedure, this); + } + public void forObjects(int resourceKey, int predicateKey, int objectIndex, QueryProcessor processor, ReadGraphImpl graph, C context, AsyncContextMultiProcedure procedure, + ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.forObjects1: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); + return; + } + objectTable.foreachObject(graph, objectIndex, context, procedure, this); + } + @Override + public boolean forObjects(int resourceKey, int predicateKey, int objectIndex, ObjectProcedure procedure, + Context context, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.forObjects2: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + } + return objectTable.foreachObject(objectIndex, procedure, context, support, this); + } + + @Override + public int getSingleObject(int resourceKey, int predicateKey, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey); + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public int getSingleObject(int resourceKey, ForPossibleRelatedValueProcedure procedure, ClusterSupport support) throws DatabaseException { + final int predicateKey = procedure.predicateKey; + if (DEBUG) + System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey); + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public int getSingleObject(int resourceKey, ForPossibleRelatedValueContextProcedure procedure, ClusterSupport support) throws DatabaseException { + final int predicateKey = procedure.predicateKey; + if (DEBUG) + System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey); + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public void forObjects(ReadGraphImpl graph, int resourceKey, + int predicateKey, AsyncMultiProcedure procedure) + throws DatabaseException { + + throw new UnsupportedOperationException(); + +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// +// if (DEBUG) +// System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey); +// final int resourceIndex = getLocalReference(resourceKey); +// final int pRef = getInternalReferenceOrZero(predicateKey, support); +// final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); +// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); +// if (0 == predicateIndex) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); +// forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, procedure, support); + + } + + @Override + public void forObjects(ReadGraphImpl graph, int resourceKey, ForEachObjectProcedure procedure) throws DatabaseException { + + throw new UnsupportedOperationException(); + +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// final int predicateKey = procedure.predicateKey; +// if (DEBUG) +// System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey); +// final int resourceIndex = getLocalReference(resourceKey); +// final int pRef = getInternalReferenceOrZero(predicateKey, support); +// final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); +// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); +// if (0 == predicateIndex) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); +// forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, procedure, support); + + } + @Override + public void forObjects(ReadGraphImpl graph, int resourceKey, C context, + ForEachObjectContextProcedure procedure) throws DatabaseException { + + throw new UnsupportedOperationException(); + +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// +// final int predicateKey = procedure.predicateKey; +// +// if (DEBUG) +// System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey); +// final int resourceIndex = getLocalReference(resourceKey); +// final int pRef = getInternalReferenceOrZero(predicateKey, support); +// final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); +// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { +// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); +// if (0 == predicateIndex) { +// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); +// forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, context, procedure, support); + + } + + @Override + public boolean forObjects(int resourceKey, int predicateKey, + ObjectProcedure procedure, Context context, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.forObjects4: rk=" + resourceKey + " pk=" + predicateKey); + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); + return forObjects(resourceKey, predicateKey, objectIndex, procedure, context, support); + } + @Override + public boolean forPredicates(int resourceKey, + PredicateProcedure procedure, Context context, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.forPredicates: rk=" + resourceKey); + final int resourceIndex = getLocalReference(resourceKey); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.foreachPredicate(resourceIndex, + procedure, context, support, this, completeTable); + else { + boolean broken = resourceTable.foreachPredicate(resourceIndex, + procedure, context, support, this, completeTable); + if (broken) + return true; + } + return predicateTable.foreachPredicate(predicateIndex, procedure, context, support, this); + } + @Override + public ClusterI addRelation(int sResourceKey, ClusterUID puid, int pResourceKey, ClusterUID ouid, int oResourceKey, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey); + int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION); + int pri = getReferenceOrCreateIfForeign(pResourceKey, puid, support, ClusterStream.NULL_OPERATION); + int ori = getReferenceOrCreateIfForeign(oResourceKey, ouid, support, ClusterStream.NULL_OPERATION); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + boolean ret = addRelationInternal(sri, pri, ori, completeType); +// check(); + if (ret) { + support.addStatement(this); + return this; + } else { + support.cancelStatement(this); + return null; + } + } + @Override + public ClusterI addRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey); + int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION); + int pri = getReferenceOrCreateIfForeign(pResourceKey, support, ClusterStream.NULL_OPERATION); + int ori = getReferenceOrCreateIfForeign(oResourceKey, support, ClusterStream.NULL_OPERATION); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + boolean ret = addRelationInternal(sri, pri, ori, completeType); +// check(); + if (ret) { + support.addStatement(this); + return this; + } else { + support.cancelStatement(this); + return null; + } + } + @Override + public boolean removeRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) + throws DatabaseException { +// check(); + int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.REMOVE_OPERATION); + int pri = getInternalReferenceOrZeroAnd(pResourceKey, support, ClusterStream.NULL_OPERATION); + int ori = getInternalReferenceOrZeroAnd(oResourceKey, support, ClusterStream.NULL_OPERATION); + boolean ret = false; + if (0 != pri && 0 != ori) { + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + ret = removeRelationInternal(sri, pri, ori, completeType, support); + } + if (ret) + support.removeStatement(this); + else + support.cancelStatement(this); +// check(); + return ret; + } + @Override + public void denyRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) + throws DatabaseException { + int sri = checkResourceKeyIsOursAndGetResourceIndexIf(sResourceKey, support); + ResourceIndexAndId p = checkResourceKeyAndGetResourceIndexIf(pResourceKey, support); + ResourceIndexAndId o = checkResourceKeyAndGetResourceIndexIf(oResourceKey, support); + if (0 == sri || 0 == p.index || 0 == o.index) + return; +// check(); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + boolean ret = removeRelationInternal(sri, p.reference, o.reference, completeType, support); + if (ret) { + support.addStatementIndex(this, sResourceKey, getClusterUID(), ClusterChange.REMOVE_OPERATION); + support.addStatementIndex(this, pResourceKey, p.clusterUID, ClusterStream.NULL_OPERATION); + support.addStatementIndex(this, oResourceKey, o.clusterUID, ClusterStream.NULL_OPERATION); + support.removeStatement(this); + } +// check(); + return; + } + @Override + public InputStream getValueStream(int rResourceId, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterBig.getValue " + rResourceId); + int resourceIndex = getLocalReference(rResourceId); + try { + byte[] buffer = resourceTable.getValue(valueTable, resourceIndex); + if(buffer == null) return null; + return new ByteArrayInputStream(buffer); + } catch (ExternalValueException e) { + return support.getValueStreamEx(resourceIndex, clusterUID.second); + } + } + @Override + public byte[] getValue(int rResourceId, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("ClusterBig.getValue " + rResourceId); + int resourceIndex = getLocalReference(rResourceId); + try { + return resourceTable.getValue(valueTable, resourceIndex); + } catch (ExternalValueException e) { + try { + return clusterSupport.impl.getResourceFile(clusterUID.asBytes(), resourceIndex); + } catch (AcornAccessVerificationException | IllegalAcornStateException e1) { + throw new DatabaseException(e1); + } + } + } + @Override + public boolean hasValue(int rResourceId, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReference(rResourceId); + return resourceTable.hasValue(resourceIndex); + } + @Override + public boolean removeValue(int rResourceId, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterChange.DELETE_OPERATION); + support.removeValue(this); + return resourceTable.removeValue(valueTable, resourceIndex); + } + + @Override + public ClusterI setValue(int rResourceId, byte[] value, int length, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.SET_OPERATION); + support.setValue(this, getClusterId(), value, length); + resourceTable.setValue(valueTable, resourceIndex, value, length); + return this; + } + @Override + public ClusterI modiValueEx(int rResourceId, long voffset, int length, byte[] value, int offset, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.MODI_OPERATION); + support.modiValue(this, getClusterId(), voffset, length, value, offset); + resourceTable.setValueEx(valueTable, resourceIndex); + return this; + } + @Override + public byte[] readValueEx(int rResourceId, long voffset, int length, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReference(rResourceId); + boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex); + if (!isExternal) + throw new DatabaseException("ClusterI.readValue supported only for external value. Resource key=" + rResourceId); + return support.getValueEx(resourceIndex, getClusterId(), voffset, length); + } + @Override + public long getValueSizeEx(int resourceKey, ClusterSupport support) + throws DatabaseException, ExternalValueException { + int resourceIndex = getLocalReference(resourceKey); + boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex); + if (!isExternal) + throw new ExternalValueException("ClusterI.getSize supported only for external value. Resource key=" + resourceKey); + return support.getValueSizeEx(resourceIndex, getClusterId()); + } + public boolean isValueEx(int resourceKey) + throws DatabaseException { + int resourceIndex = getLocalReference(resourceKey); + return resourceTable.isValueEx(valueTable, resourceIndex); + } + @Override + public void setValueEx(int resourceKey) + throws DatabaseException { + int resourceIndex = getLocalReference(resourceKey); + resourceTable.setValueEx(valueTable, resourceIndex); + } + @Override + public int createResource(ClusterSupport support) + throws DatabaseException { + short resourceIndex = resourceTable.createResource(); + + if(DebugPolicy.REPORT_RESOURCE_ID_ALLOCATION) + System.out.println("[RID_ALLOCATION]: ClusterBig[" + clusterId + "] allocates " + resourceIndex); + + support.createResource(this, resourceIndex, clusterId); + return ClusterTraits.createResourceKey(clusterKey, resourceIndex); + } + @Override + public boolean hasResource(int resourceKey, ClusterSupport support) { + int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey); + if (this.clusterKey != clusterKey) // foreign resource + return false; + int resourceIndex; + try { + resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + } catch (DatabaseException e) { + return false; + } + if (resourceIndex > 0 & resourceIndex <= resourceTable.getTableCount()) + return true; + else + return false; + } + @Override + public int getNumberOfResources(ClusterSupport support) { + return resourceTable.getUsedSize(); + } + @Override + public long getUsedSpace() { + long rt = resourceTable.getTableCapacity() * 8 + 8; // (8 = cluster id) + long ft = foreignTable.getTableCapacity() * 8; + long pt = predicateTable.getTableCapacity() * 4; + long ot = objectTable.getTableCapacity() * 4; + long ct = completeTable.getTableCapacity() * 4; + long vt = valueTable.getTableCapacity() * 1; + long cm = clusterMap.getUsedSpace(); + + return rt + ft + pt + ot + ct + vt + cm; +// System.out.println("resource table " + rt); +// System.out.println("foreign table (non flat cluster table) " + ft); +// System.out.println("predicate table " + pt); +// long pt2 = getRealSizeOfPredicateTable() * 4; +// System.out.println("predicate table real size " + pt2); +// System.out.println("object table " + ot); +// long ot2 = getRealSizeOfObjectTable() * 4; +// System.out.println("object table real size " + ot2); +// System.out.println("value table " + vt); + } + int getRealSizeOfPredicateTable() throws DatabaseException { + SizeOfPredicateTable proc = new SizeOfPredicateTable(resourceTable, predicateTable); + resourceTable.foreachResource(proc, 0, null, null); + return proc.getSize(); + } + int getRealSizeOfObjectTable() throws DatabaseException { + SizeOfObjectTable proc = new SizeOfObjectTable(resourceTable, predicateTable, objectTable); + resourceTable.foreachResource(proc, 0, null, null); + return proc.getSize(); + } + @Override + public boolean isEmpty() { + return resourceTable.getTableCount() == 0; + } + @Override + public void printDebugInfo(String message, ClusterSupport support) + throws DatabaseException { + predicateTable.printDebugInfo(); + objectTable.printDebugInfo(); + ClusterPrintDebugInfo proc = new ClusterPrintDebugInfo(this + , resourceTable, predicateTable, support, objectTable); + resourceTable.foreachResource(proc, 0, null, null); + } + private int getInternalReferenceOrZero(int resourceKey, ClusterSupport support) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { // foreign resource + ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey); + ClusterUID clusterUID = foreignCluster.getClusterUID(); + int foreignResourceIndex = clusterMap.getForeignReferenceOrZero(resourceIndex, clusterUID); + return foreignResourceIndex; + } + return resourceIndex; + } + private int getInternalReferenceOrZeroAnd(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { // foreign resource + ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey); + int foreignResourceIndex = clusterMap.getForeignReferenceOrZero(resourceIndex, clusterUID); + support.addStatementIndex(this, resourceKey, clusterUID, op); + return foreignResourceIndex; + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private short getLocalReference(int resourceKey) throws DatabaseException { + return ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey); + } + private int getLocalReferenceAnd(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + int resourceIndex = getLocalReference(resourceKey); + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private int checkResourceKeyIsOursAndGetResourceIndexIf(int resourceKey, ClusterSupport support) + throws DatabaseException { + int clusterShortId = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + if (this.clusterKey != clusterShortId) + return 0; + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + return resourceIndex; + } + private int getReferenceOrCreateIfForeign(int resourceKey, ClusterUID clusterUID, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { + support.addStatementIndex(this, resourceKey, clusterUID, op); + return clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID); + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private int getReferenceOrCreateIfForeign(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { + ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey); + support.addStatementIndex(this, resourceKey, clusterUID, op); + return clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID); + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private class ResourceIndexAndId { + ResourceIndexAndId(int reference, int index, ClusterUID clusterUID) { + this.reference = reference; + this.index = index; + this.clusterUID = clusterUID; + } + public final int reference; + public final int index; + public final ClusterUID clusterUID; + } + private ResourceIndexAndId checkResourceKeyAndGetResourceIndexIf(int resourceKey, ClusterSupport support) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { // foreign resource + ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey); + ClusterUID clusterUID = foreignCluster.getClusterUID(); + int ref = clusterMap.getForeignReferenceOrCreateByResourceIndex(resourceIndex, clusterUID); + return new ResourceIndexAndId(ref, resourceIndex, clusterUID); + } + return new ResourceIndexAndId(resourceIndex, resourceIndex, getClusterUID()); + } + + @Override + final public int execute(int resourceIndex) throws DatabaseException { + int key; + if(resourceIndex > 0) { + key = clusterBits | resourceIndex; + } else { + ClusterUID clusterUID = clusterMap.getResourceUID(resourceIndex).asCID(); + ClusterI cluster = clusterSupport.getClusterByClusterUIDOrMake(clusterUID); + int foreingResourceIndex = clusterMap.getForeignResourceIndex(resourceIndex); + key = ClusterTraits.createResourceKey(cluster.getClusterKey(), foreingResourceIndex); + } + if (DEBUG) + System.out.println("Cluster.execute key=" + key); + return key; + } + + private boolean addRelationInternal(int sReference, int pReference, int oReference, ClusterI.CompleteTypeEnum completeType) + throws DatabaseException { + int predicateIndex = resourceTable.addStatement(sReference, pReference, + oReference, predicateTable, objectTable, completeType, completeTable); + if (0 == predicateIndex) + return true; // added to resourceTable + else if (0 > predicateIndex) + return false; // old complete statemenent + int newPredicateIndex = predicateTable.addPredicate(predicateIndex, + pReference, oReference, objectTable); + if (0 == newPredicateIndex) + return false; + if (predicateIndex != newPredicateIndex) + resourceTable.setPredicateIndex(sReference, newPredicateIndex); + return true; + } + private boolean removeRelationInternal(int sResourceIndex, int pResourceIndex, + int oResourceIndex, ClusterI.CompleteTypeEnum completeType, ClusterSupport support) + throws DatabaseException { + int predicateIndex = resourceTable.getPredicateIndex(sResourceIndex); + if (0 == predicateIndex || ClusterI.CompleteTypeEnum.NotComplete != completeType) + return resourceTable.removeStatementFromCache(sResourceIndex, + pResourceIndex, oResourceIndex, completeType, completeTable); + PredicateTable.Status ret = predicateTable.removePredicate(predicateIndex, pResourceIndex, oResourceIndex, objectTable); + switch (ret) { + case NothingRemoved: + return false; + case PredicateRemoved: { + if (0 == predicateTable.getPredicateSetSize(predicateIndex)) + resourceTable.setPredicateIndex(sResourceIndex, 0); + // intentionally dropping to next case + } default: + break; + } + resourceTable.removeStatement(sResourceIndex, + pResourceIndex, oResourceIndex, + completeType, completeTable, + predicateTable, objectTable, this, support); + return true; + } + @Override + public void load() { + throw new Error("Not supported."); + } + + @Override + public void load(Callback r) { + throw new Error("Not supported."); + } + + public int makeResourceKey(int resourceIndex) throws DatabaseException { + int key = 0; + if (resourceIndex > 0) // local resource + key = ClusterTraits.createResourceKey(clusterKey, resourceIndex); + else { + ClusterUID clusterUID = clusterMap.getResourceUID(resourceIndex).asCID(); + int clusterKey = clusterSupport.getClusterKeyByClusterUIDOrMake(clusterUID); + int foreingResourceIndex = clusterMap.getForeignResourceIndex(resourceIndex); + key = ClusterTraits.createResourceKey(clusterKey, foreingResourceIndex); + } + if (0 == key) + throw new DatabaseException("Failed to make resource key from " + resourceIndex); + return key; + } + @Override + public ClusterBig toBig(ClusterSupport support) throws DatabaseException { + throw new Error("Not implemented"); + } + @Override + public void load(ClusterSupport session, Runnable callback) { + throw new Error("Not implemented"); + } + @Override + public ClusterI getClusterByResourceKey(int resourceKey, + ClusterSupport support) { + throw new Error("Not implemented"); + } + @Override + public void increaseReferenceCount(int amount) { + throw new Error("Not implemented"); + } + @Override + + public void decreaseReferenceCount(int amount) { + throw new Error("Not implemented"); + } + @Override + public int getReferenceCount() { + throw new Error("Not implemented"); + } + @Override + public void releaseMemory() { + } + @Override + public void compact() { + clusterMap.compact(); + } + public boolean contains(int resourceKey) { + return ClusterTraitsBase.isCluster(clusterBits, resourceKey); + } + @Override + public ClusterTypeEnum getType() { + return ClusterTypeEnum.BIG; + } + @Override + public boolean getImmutable() { + int status = resourceTable.getClusterStatus(); + return (status & ClusterStatus.ImmutableMaskSet) == 1; + } + @Override + public void setImmutable(boolean immutable, ClusterSupport support) { + int status = resourceTable.getClusterStatus(); + if (immutable) + status |= ClusterStatus.ImmutableMaskSet; + else + status &= ClusterStatus.ImmutableMaskClear; + resourceTable.setClusterStatus(status); + support.setImmutable(this, immutable); + } + + @Override + public ClusterTables store() throws IOException { + + ClusterTables result = new ClusterTables(); + + int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE); + + int byteSize = valueTable.getTableSize(); + byte[] byteBytes = new byte[byteSize]; + valueTable.store(byteBytes, 0); + + //FileUtils.writeFile(bytes, valueTable.table); + + result.bytes = byteBytes; + + int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize(); + long[] longBytes = new long[longSize]; + + longBytes[0] = 0; + longBytes[1] = LONG_HEADER_VERSION; + longBytes[2] = 0; + longBytes[3] = clusterUID.second; + +// Bytes.writeLE8(longBytes, 0, 0); +// Bytes.writeLE8(longBytes, 8, LONG_HEADER_VERSION); +// Bytes.writeLE8(longBytes, 16, 0); +// Bytes.writeLE8(longBytes, 24, clusterUID.second); + + int longPos = resourceTable.store(longBytes, LONG_HEADER_SIZE); + foreignTable.store(longBytes, longPos); + + result.longs = longBytes; + +// FileUtils.writeFile(longs, longBytes); + + int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize(); + int[] intBytes = new int[intSize]; + int intPos = INT_HEADER_SIZE; + intPos = predicateTable.store(intBytes, intPos); + intPos = objectTable.store(intBytes, intPos); + intPos = completeTable.store(intBytes, intPos); + // write header + for(int i=0;i getPredicateTable() { + return predicateTable; + } + @Override + public Table getForeignTable() { + return foreignTable; + } + @Override + public Table getCompleteTable() { + return completeTable; + } + @Override + public Table getValueTable() { + return valueTable; + } + @Override + public Table getObjectTable() { + return objectTable; + } +} + +class SizeOfPredicateTable implements ClusterI.ObjectProcedure { + private final ResourceTable mrResourceTable; + private final PredicateTable mrPredicateTable; + private int size = 0; + SizeOfPredicateTable(ResourceTable resourceTable + , PredicateTable predicateTable) { + mrResourceTable = resourceTable; + mrPredicateTable = predicateTable; + } + @Override + public boolean execute(Integer i, int resourceRef) { + int predicateIndex = mrResourceTable.getPredicateIndex(resourceRef); + if (0 == predicateIndex) + return false; // continue loop + size += mrPredicateTable.getPredicateSetSize(predicateIndex); + return false; // continue loop + } + + public int getSize() { + return size; + } + +} + +class SizeOfObjectTable implements ClusterI.ObjectProcedure { + private final ResourceTable mrResourceTable; + private final PredicateTable mrPredicateTable; + private final ObjectTable mrObjectTable; + private int size = 0; + SizeOfObjectTable(ResourceTable resourceTable + , PredicateTable predicateTable, ObjectTable objectTable) { + mrResourceTable = resourceTable; + mrPredicateTable = predicateTable; + mrObjectTable = objectTable; + } + + @Override + public boolean execute(Integer i, int resourceRef) { + int predicateIndex = mrResourceTable.getPredicateIndex(resourceRef); + if (0 == predicateIndex) + return false; // continue loop + ClusterI.PredicateProcedure procedure = new PredicateProcedure() { + @Override + public boolean execute(Object context, int pRef, int oIndex) { + if (ClusterTraits.statementIndexIsDirect(oIndex)) + return false; // no table space reserved, continue looping + int objectIndex; + try { + objectIndex = ClusterTraits.statementIndexGet(oIndex); + size += mrObjectTable.getObjectSetSize(objectIndex); + } catch (DatabaseException e) { + e.printStackTrace(); + } + return false; // continue looping + } + }; + try { + mrPredicateTable.foreachPredicate(predicateIndex, procedure, null, null, null); + } catch (DatabaseException e) { + e.printStackTrace(); + } + return false; // continue loop + } + + public int getSize() { + return size; + } + +} \ No newline at end of file diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java new file mode 100644 index 000000000..353d9382a --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java @@ -0,0 +1,226 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn.cluster; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.UUID; + +import org.simantics.acorn.internal.Change; +import org.simantics.acorn.internal.ClusterChange; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.db.common.utils.Logger; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.exception.InvalidClusterException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterI; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.impl.Modifier; +import org.simantics.db.service.ClusterCollectorPolicy.CollectorCluster; +import org.simantics.db.service.ClusterUID; +import org.simantics.db.service.ClusteringSupport.Id; +import org.simantics.utils.strings.AlphanumComparator; + +public abstract class ClusterImpl extends ClusterBase implements Modifier, CollectorCluster { + protected static final int LONG_HEADER_SIZE = 7; + protected static final long LONG_HEADER_VERSION = 1; + protected static ClusterUID checkValidity(long type, long[] longs, int[] ints, byte[] bytes) + throws InvalidClusterException { + if (longs.length < LONG_HEADER_SIZE) + throw new InvalidClusterException("Header size mismatch. Expected=" + ClusterImpl.LONG_HEADER_SIZE + ", got=" + longs.length); + if (longs[0] != type) + throw new InvalidClusterException("Type mismatch. Expected=" + type + ", got=" + longs[0] + " " + ClusterUID.make(longs[2], longs[3])); + if (longs[1] != ClusterImpl.LONG_HEADER_VERSION) + throw new InvalidClusterException("Header size mismatch. Expected=" + ClusterImpl.LONG_HEADER_VERSION + ", got=" + longs[1]); + return ClusterUID.make(longs[2], longs[3]); + } + protected static Id getUniqueId(long[] longs) { + return new IdImpl(new UUID(longs[3], longs[4])); + } + static final boolean DEBUG = false; + final public IClusterTable clusterTable; + // This can be null iff the cluster has been converted to big + public Change change = new Change(); + public ClusterChange cc; + public byte[] foreignLookup; + + private boolean dirtySizeInBytes = true; + private long sizeInBytes = 0; + + protected ClusterImpl() { + clusterTable = null; + } + + public ClusterImpl(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport support) { + super(support, clusterUID, clusterKey); +// SessionImplSocket session = (SessionImplSocket)support.getSession(); +// if(session != null) + this.clusterTable = clusterTable; +// else + } + + public static ClusterImpl dummy() { + return new ClusterSmall(); + } + + public static ClusterImpl make(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) { + return new ClusterSmall(clusterUID, clusterKey, support, clusterTable); + } + public static ClusterSmall proxy(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, long clusterId, ClusterSupport2 support) { + if (DEBUG) + new Exception("Cluster proxy for " + clusterUID).printStackTrace(); + return new ClusterSmall(null, clusterUID, clusterKey, support); + } + public static ClusterImpl make(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey) + throws DatabaseException { + if (longs[0] == 0) + return new ClusterBig(clusterTable, longs, ints, bytes, support, clusterKey); + else + return new ClusterSmall(clusterTable, longs, ints, bytes, support, clusterKey); + } + +// public boolean virtual = false; + + @Override + public boolean hasVirtual() { + return false; +// return clusterTable.hasVirtual(clusterKey); + } + + @Override + public void markVirtual() { +// clusterTable.markVirtual(clusterKey); +// virtual = true; + } + + @Override + public boolean isWriteOnly() { + return false; + } + @Override + public boolean isLoaded() { + return true; + } + + @Override + public void resized() { + dirtySizeInBytes = true; +// if(clusterTable != null) +// clusterTable.setDirtySizeInBytes(true); + } + + public long getCachedSize() { + if(dirtySizeInBytes) { + try { + sizeInBytes = getUsedSpace(); + //System.err.println("recomputed size of cluster " + getClusterId() + " => " + sizeInBytes); + } catch (DatabaseException e) { + Logger.defaultLogError(e); + } + dirtySizeInBytes = false; + } + return sizeInBytes; + } + + protected void calculateModifiedId() { +// setModifiedId(new IdImpl(UUID.randomUUID())); + } + + public static class ClusterTables { + public byte[] bytes; + public int[] ints; + public long[] longs; + } + + public byte[] storeBytes() throws IOException { + throw new UnsupportedOperationException(); + } + + public ClusterTables store() throws IOException { + throw new UnsupportedOperationException(); + } + + abstract protected int getResourceTableCount(); + + public String dump(final ClusterSupport support) { + + StringBuilder sb = new StringBuilder(); + for(int i=1;i stms = new ArrayList(); + try { + + byte[] value = getValue(i, support); + if(value != null) + sb.append(" bytes: " + Arrays.toString(value) + "\n"); + + forPredicates(i, new PredicateProcedure() { + + @Override + public boolean execute(Integer c, final int predicateKey, int objectIndex) { + + try { + + forObjects(resourceKey, predicateKey, objectIndex, new ObjectProcedure() { + + @Override + public boolean execute(Integer context, int objectKey) throws DatabaseException { + + ClusterUID puid = support.getClusterByResourceKey(predicateKey).getClusterUID(); + ClusterUID ouid = support.getClusterByResourceKey(objectKey).getClusterUID(); + + stms.add(" " + puid + " " + (predicateKey&0xFFF) + " " + ouid + " " + (objectKey&0xFFF)); + + return false; + + } + + }, 0, support); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + return false; + + } + + },0,support); + + Collections.sort(stms, AlphanumComparator.COMPARATOR); + + for(String s : stms) { + sb.append(s); + sb.append("\n"); + } + + } catch (DatabaseException e) { + e.printStackTrace(); + } + } + + return sb.toString(); + + } + + abstract public boolean isValueEx(int resourceIndex) throws DatabaseException; + + abstract public ClusterI addRelation(int resourceKey, ClusterUID puid, int predicateKey, ClusterUID ouid, int objectKey, ClusterSupport support) throws DatabaseException; + + @Override + public IClusterTable getClusterTable() { + return clusterTable; + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java new file mode 100644 index 000000000..b84d4d51f --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java @@ -0,0 +1,1309 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn.cluster; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; + +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.internal.ClusterChange; +import org.simantics.acorn.internal.ClusterStream; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.acorn.internal.DebugPolicy; +import org.simantics.db.Resource; +import org.simantics.db.common.utils.Logger; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.exception.ExternalValueException; +import org.simantics.db.exception.ValidationException; +import org.simantics.db.impl.ClusterI; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.ClusterTraitsBase; +import org.simantics.db.impl.ForEachObjectContextProcedure; +import org.simantics.db.impl.ForEachObjectProcedure; +import org.simantics.db.impl.ForPossibleRelatedValueContextProcedure; +import org.simantics.db.impl.ForPossibleRelatedValueProcedure; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.impl.Table; +import org.simantics.db.impl.TableHeader; +import org.simantics.db.impl.graph.ReadGraphImpl; +import org.simantics.db.procedure.AsyncContextMultiProcedure; +import org.simantics.db.procedure.AsyncMultiProcedure; +import org.simantics.db.procore.cluster.ClusterMapSmall; +import org.simantics.db.procore.cluster.ClusterTraits; +import org.simantics.db.procore.cluster.ClusterTraitsSmall; +import org.simantics.db.procore.cluster.CompleteTableSmall; +import org.simantics.db.procore.cluster.ForeignTableSmall; +import org.simantics.db.procore.cluster.ObjectTable; +import org.simantics.db.procore.cluster.OutOfSpaceException; +import org.simantics.db.procore.cluster.PredicateTable; +import org.simantics.db.procore.cluster.ResourceTableSmall; +import org.simantics.db.procore.cluster.ValueTableSmall; +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; +import org.simantics.db.service.ResourceUID; +import org.simantics.utils.datastructures.Callback; + +import gnu.trove.map.hash.TIntShortHashMap; +import gnu.trove.procedure.TIntProcedure; +import gnu.trove.set.hash.TIntHashSet; + +final public class ClusterSmall extends ClusterImpl { + private static final int TABLE_HEADER_SIZE = TableHeader.HEADER_SIZE + TableHeader.EXTRA_SIZE; + private static final int RESOURCE_TABLE_OFFSET = 0; + private static final int PREDICATE_TABLE_OFFSET = RESOURCE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int OBJECT_TABLE_OFFSET = PREDICATE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int VALUE_TABLE_OFFSET = OBJECT_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int FLAT_TABLE_OFFSET = VALUE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int COMPLETE_TABLE_OFFSET = FLAT_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int FOREIGN_TABLE_OFFSET = COMPLETE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int INT_HEADER_SIZE = FOREIGN_TABLE_OFFSET + TABLE_HEADER_SIZE; + private final int clusterBits; + private final ResourceTableSmall resourceTable; + private final PredicateTable predicateTable; + private final ObjectTable objectTable; + private final ValueTableSmall valueTable; + private final ForeignTableSmall foreignTable; + private final CompleteTableSmall completeTable; + private final ClusterMapSmall clusterMap; + private final int[] headerTable; + public final ClusterSupport2 clusterSupport; + private boolean proxy; + private boolean deleted = false; + + protected ClusterSmall() { + this.proxy = true; + this.headerTable = null; + this.resourceTable = null; + this.foreignTable = null; + this.predicateTable = null; + this.objectTable = null; + this.valueTable = null; + this.completeTable = null; + this.clusterMap = null; + this.clusterSupport = null; + this.clusterBits = 0; + this.importance = 0; + } + + public ClusterSmall(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) { + super(clusterTable, clusterUID, clusterKey, support); + if(DebugPolicy.REPORT_CLUSTER_EVENTS) + new Exception(clusterUID.toString()).printStackTrace(); + this.proxy = true; + this.headerTable = null; + this.resourceTable = null; + this.foreignTable = null; + this.predicateTable = null; + this.objectTable = null; + this.valueTable = null; + this.completeTable = null; + this.clusterMap = null; + this.clusterSupport = support; + this.clusterBits = 0; + this.importance = 0; +// new Exception("ClusterSmall " + clusterKey).printStackTrace(); + } + ClusterSmall(ClusterUID clusterUID, int clusterKey, ClusterSupport2 support, IClusterTable clusterTable) { + super(clusterTable, clusterUID, clusterKey, support); + if(DebugPolicy.REPORT_CLUSTER_EVENTS) + new Exception(clusterUID.toString()).printStackTrace(); + this.proxy = false; + this.clusterSupport = support; + this.headerTable = new int[INT_HEADER_SIZE]; + this.resourceTable = new ResourceTableSmall(this, headerTable, RESOURCE_TABLE_OFFSET); + this.foreignTable = new ForeignTableSmall(this, headerTable, FOREIGN_TABLE_OFFSET); + this.predicateTable = new PredicateTable(this, headerTable, PREDICATE_TABLE_OFFSET); + this.objectTable = new ObjectTable(this, headerTable, OBJECT_TABLE_OFFSET); + this.valueTable = new ValueTableSmall(this, headerTable, VALUE_TABLE_OFFSET); + this.completeTable = new CompleteTableSmall(this, headerTable, COMPLETE_TABLE_OFFSET); + this.clusterMap = new ClusterMapSmall(this, foreignTable); + this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey); +// if(clusterTable != null) +// this.importance = -clusterTable.timeCounter(); +// else + this.importance = 0; +// new Exception("ClusterSmall " + clusterKey).printStackTrace(); + } + protected ClusterSmall(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey) + throws DatabaseException { + super(clusterTable, checkValidity(-1, longs, ints, bytes), clusterKey, support); + this.proxy = false; + this.clusterSupport = support; + if (ints.length < INT_HEADER_SIZE) + throw new IllegalArgumentException("Too small integer table for cluster."); + this.headerTable = ints; + if(DebugPolicy.REPORT_CLUSTER_EVENTS) new Exception(Long.toString(clusterId)).printStackTrace(); + this.resourceTable = new ResourceTableSmall(this, ints, RESOURCE_TABLE_OFFSET, longs); + this.foreignTable = new ForeignTableSmall(this, headerTable, FOREIGN_TABLE_OFFSET, longs); + this.predicateTable = new PredicateTable(this, ints, PREDICATE_TABLE_OFFSET, ints); + this.objectTable = new ObjectTable(this, ints, OBJECT_TABLE_OFFSET, ints); + this.valueTable = new ValueTableSmall(this, ints, VALUE_TABLE_OFFSET, bytes); + this.completeTable = new CompleteTableSmall(this, headerTable, COMPLETE_TABLE_OFFSET, ints); + this.clusterMap = new ClusterMapSmall(this, foreignTable); + this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey); +// if(clusterTable != null) { +// this.importance = clusterTable.timeCounter(); +// clusterTable.markImmutable(this, getImmutable()); +// } +// new Exception("ClusterSmall " + clusterKey).printStackTrace(); + } + void analyse() { + System.out.println("Cluster " + clusterId); + System.out.println("-size:" + getUsedSpace()); + System.out.println(" -rt:" + (resourceTable.getTableCapacity() * 8 + 8)); + System.out.println(" -ft:" + foreignTable.getTableCapacity() * 8); + System.out.println(" -pt:" + predicateTable.getTableCapacity() * 4); + System.out.println(" -ot:" + objectTable.getTableCapacity() * 4); + System.out.println(" -ct:" + completeTable.getTableCapacity() * 4); + System.out.println(" -vt:" + valueTable.getTableCapacity()); + + System.out.println("-resourceTable:"); + System.out.println(" -resourceCount=" + resourceTable.getResourceCount()); + System.out.println(" -size=" + resourceTable.getTableSize()); + System.out.println(" -capacity=" + resourceTable.getTableCapacity()); + System.out.println(" -count=" + resourceTable.getTableCount()); + System.out.println(" -size=" + resourceTable.getTableSize()); + //resourceTable.analyse(); + } + public void checkDirectReference(int dr) + throws DatabaseException { + if (!ClusterTraits.statementIndexIsDirect(dr)) + throw new ValidationException("Reference is not direct. Reference=" + dr); + if (ClusterTraits.isFlat(dr)) + throw new ValidationException("Reference is flat. Reference=" + dr); + if (ClusterTraits.isLocal(dr)) { + if (dr < 1 || dr > resourceTable.getUsedSize()) + throw new ValidationException("Illegal local reference. Reference=" + dr); + } else { + int fi = ClusterTraits.getForeignIndexFromReference(dr); + int ri = ClusterTraits.getResourceIndexFromForeignReference(dr); + if (fi < 1 || fi > foreignTable.getUsedSize()) + throw new ValidationException("Illegal foreign reference. Reference=" + dr + " foreign index=" + fi); + if (ri < 1 || ri > ClusterTraits.getMaxNumberOfResources()) + throw new ValidationException("Illegal foreign reference. Reference=" + dr + " resource index=" + ri); + } + } + public void checkPredicateIndex(int pi) + throws DatabaseException { + // predicateTable.checkPredicateSetIndex(this, pi); + } + public void checkObjectSetReference(int or) + throws DatabaseException { + if (ClusterTraits.statementIndexIsDirect(or)) + throw new ValidationException("Illegal object set reference. Reference=" + or); + int oi = ClusterTraits.statementIndexGet(or); + this.objectTable.checkObjectSetIndex(this, oi); + } + + public void checkValueInit() + throws DatabaseException { + valueTable.checkValueInit(); + } + public void checkValue(int capacity, int index) + throws DatabaseException { + valueTable.checkValue(capacity, index); + } + public void checkValueFini() + throws DatabaseException { + valueTable.checkValueFini(); + } + public void checkForeingIndex(int fi) + throws DatabaseException { + if (fi<1 || fi > foreignTable.getUsedSize()) + throw new ValidationException("Illegal foreign index=" + fi); + } + public void checkCompleteSetReference(int cr) + throws DatabaseException { + if (!ClusterTraits.completeReferenceIsMultiple(cr)) + throw new ValidationException("Illegal complete set reference. Reference=" + cr); + int ci = cr; + this.completeTable.checkCompleteSetIndex(this, ci); + } + public void check() + throws DatabaseException { +// this.completeTable.check(this); +// this.objectTable.check(this); +// // Must be after object table check. +// this.predicateTable.check(this); +// this.resourceTable.check(this); + } + @Override + public CompleteTypeEnum getCompleteType(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceRef = getLocalReference(resourceKey); + CompleteTypeEnum ct = resourceTable.getCompleteType(resourceRef); + if (DEBUG) + System.out.println("ClusterSmall.getCompleteType rk=" + resourceKey + " ct=" + ct); + return ct; + } + + @Override + public int getCompleteObjectKey(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceIndexOld = getLocalReference(resourceKey); + short completeRef = resourceTable.getCompleteObjectRef(resourceIndexOld); + int clusterIndex; + int resourceIndex; + if (0 == completeRef) + throw new DatabaseException("Resource's complete object refernce is null. Resource key=" + resourceKey + "."); + ClusterI.CompleteTypeEnum completeType = resourceTable.getCompleteType(resourceIndexOld); + if (completeType == ClusterI.CompleteTypeEnum.NotComplete) + throw new DatabaseException("Resource has multiple complete objects. Resource key=" + resourceKey + "."); + if (ClusterTraitsSmall.resourceRefIsLocal(completeRef)) { + clusterIndex = clusterKey; + resourceIndex = completeRef; + } else { // Resource has one complete statement. + ResourceUID resourceUID = clusterMap.getForeignResourceUID(completeRef); + ClusterUID uid = resourceUID.asCID(); + clusterIndex = clusterSupport.getClusterKeyByUID(0, uid.second); + //ClusterI c = clusterTable.getClusterByClusterUIDOrMakeProxy(uid); + //clusterIndex = c.getClusterKey(); + //assert(clusterIndex == clusterTable.getClusterByClusterUIDOrMakeProxy(uid).getClusterKey()); + resourceIndex = resourceUID.getIndex(); + } + int key = ClusterTraits.createResourceKey(clusterIndex, resourceIndex); + if (DEBUG) + System.out.println("ClusterSmall.complete object rk=" + resourceKey + " ck=" + key); + return key; + } + + @Override + public boolean isComplete(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceRef = getLocalReference(resourceKey); + final ClusterI.CompleteTypeEnum completeType = resourceTable.getCompleteType(resourceRef); + boolean complete = completeType != ClusterI.CompleteTypeEnum.NotComplete; + if (DEBUG) + System.out.println("ClusterSmall.key=" + resourceKey + " isComplete=" + complete); + return complete; + } + public int getSingleObject(int resourceKey, int predicateKey, int objectIndex, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.getSingleObject: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey); + final short pRef = getInternalReferenceOrZero2(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + } + return objectTable.getSingleObject(objectIndex, support, this); + } + + public void forObjects(ReadGraphImpl graph, int resourceKey, int predicateKey, int objectIndex, AsyncMultiProcedure procedure, + ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.forObjects1: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final int pRef = getInternalReferenceOrZero2(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); + return; + } + objectTable.foreachObject(graph, objectIndex, procedure, this); + } + + public void forObjects(ReadGraphImpl graph, int resourceKey, int predicateKey, int objectIndex, C context, AsyncContextMultiProcedure procedure, + ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.forObjects1: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final int pRef = getInternalReferenceOrZero2(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); + return; + } + objectTable.foreachObject(graph, objectIndex, context, procedure, this); + } + + @Override + public boolean forObjects(int resourceKey, int predicateKey, int objectIndex, ObjectProcedure procedure, + Context context, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.forObjects2: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final short pRef = getInternalReferenceOrZero2(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + } + return objectTable.foreachObject(objectIndex, procedure, context, support, this); + } + + @Override + public int getSingleObject(int resourceKey, int predicateKey, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey); + final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final short pRef = getInternalReferenceOrZero2(predicateKey, support); + final int completeType = ClusterTraitsBase.getCompleteTypeIntFromResourceKey(predicateKey); + final ClusterI.CompleteTypeEnum pCompleteType = CompleteTypeEnum.make(completeType); + if (completeType > 0) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; + if (0 == predicateIndex) // All relevant data is in resource table. + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public int getSingleObject(int resourceKey, ForPossibleRelatedValueProcedure procedure, ClusterSupport support) throws DatabaseException { + final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final int predicateKey = procedure.predicateKey; + int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey); + short pRef = 0; + if(procedure.clusterKey[0] == clusterKey) { + pRef = (short)procedure.predicateReference[0]; + } else { + pRef = getInternalReferenceOrZero2(predicateKey, support); + procedure.clusterKey[0] = clusterKey; + procedure.predicateReference[0] = pRef; + } + + final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType; + if (CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; + if (0 == predicateIndex) // All relevant data is in resource table. + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public int getSingleObject(int resourceKey, ForPossibleRelatedValueContextProcedure procedure, ClusterSupport support) throws DatabaseException { + final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final int predicateKey = procedure.predicateKey; + int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey); + short pRef = 0; + if(procedure.clusterKey[0] == clusterKey) { + pRef = (short)procedure.predicateReference[0]; + } else { + pRef = getInternalReferenceOrZero2(predicateKey, support); + procedure.clusterKey[0] = clusterKey; + procedure.predicateReference[0] = pRef; + } + final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType; + if (CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; + if (0 == predicateIndex) // All relevant data is in resource table. + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public void forObjects(ReadGraphImpl graph, int resourceKey, + int predicateKey, AsyncMultiProcedure procedure) throws DatabaseException { + + throw new UnsupportedOperationException(); + +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// if (DEBUG) +// System.out.println("ClusterSmall.forObjects3: rk=" + resourceKey + " pk=" + predicateKey); +// final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); +// final int pRef = getInternalReferenceOrZero2(predicateKey, support); +// final int completeType = ClusterTraitsBase.getCompleteTypeIntFromResourceKey(predicateKey); +// final ClusterI.CompleteTypeEnum pCompleteType = CompleteTypeEnum.make(completeType); +// if (completeType > 0) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; +// if (0 == predicateIndex) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); +// forObjects(graph, resourceKey, predicateKey, objectIndex, procedure, support); + } + + public void forObjects(ReadGraphImpl graph, int resourceKey, ForEachObjectProcedure procedure) throws DatabaseException { + + throw new UnsupportedOperationException(); + +// final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); +// final int predicateKey = procedure.predicateKey; +// int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey); +// int pRef = 0; +// if(procedure.clusterKey[0] == clusterKey) { +// pRef = procedure.predicateReference[0]; +// } else { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// pRef = getInternalReferenceOrZero2(predicateKey, support); +// procedure.clusterKey[0] = clusterKey; +// procedure.predicateReference[0] = pRef; +// } +// final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType; +// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; +// if (0 == predicateIndex) { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int hashBase = predicateIndex + predicateTable.offset; +// if (predicateTable.table[hashBase-1] < 0) { +// int objectIndex = TableIntArraySet2.get(predicateTable.table, hashBase, pRef & 0xFFFF); +// //int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// forObjects(graph, resourceKey, predicateKey, objectIndex, procedure, support); +// } else { +// procedure.finished(graph); +//// graph.dec(); +// } + } + + public void forObjects(ReadGraphImpl graph, int resourceKey, C context, ForEachObjectContextProcedure procedure) throws DatabaseException { + + throw new UnsupportedOperationException(); + +// final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); +// final int predicateKey = procedure.predicateKey; +// int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey); +// int pRef = 0; +// if(procedure.clusterKey[0] == clusterKey) { +// pRef = procedure.predicateReference[0]; +// } else { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// pRef = getInternalReferenceOrZero2(predicateKey, support); +// procedure.clusterKey[0] = clusterKey; +// procedure.predicateReference[0] = pRef; +// } +// +// final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType; +// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; +// if (0 == predicateIndex) { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int hashBase = predicateIndex + predicateTable.offset; +// if(predicateTable.table[hashBase-1] < 0) { +// int objectIndex = TableIntArraySet2.get(predicateTable.table, hashBase, pRef & 0xFFFF); +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// forObjects(graph, resourceKey, predicateKey, objectIndex, context, procedure, support); +// } else { +// int objectIndex = TableIntSet2.get(predicateTable.table, hashBase, pRef & 0xFFFF); +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// forObjects(graph, resourceKey, predicateKey, objectIndex, context, procedure, support); +// } + } + @Override + public boolean forObjects(int resourceKey, int predicateKey, + ObjectProcedure procedure, Context context, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.forObjects4: rk=" + resourceKey + " pk=" + predicateKey); + final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final short pRef = getInternalReferenceOrZero2(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + // PredicateType is complete i.e. all relevant data is in resource table. + if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { + if (DEBUG) + System.out.println("ClusterSmall.forObjects: complete type was " + pCompleteType + " cluster=" + getClusterUID()); + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + } + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) { // All relevant data is in resource table. + if (DEBUG) + System.out.println("ClusterSmall.forObjects: no predicate table " + pCompleteType); + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + } + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); + return forObjects(resourceKey, predicateKey, objectIndex, procedure, context, support); + } + @Override + public boolean forPredicates(int resourceKey, + PredicateProcedure procedure, Context context, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.forPredicates: rk=" + resourceKey ); + final int resourceIndex = getLocalReference(resourceKey); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.foreachPredicate(resourceIndex, + procedure, context, support, this, completeTable); + else { + boolean broken = resourceTable.foreachPredicate(resourceIndex, + procedure, context, support, this, completeTable); + if (broken) + return true; + } + return predicateTable.foreachPredicate(predicateIndex, + procedure, context, support, this); + } + + @Override + public ClusterI addRelation(int sResourceKey, ClusterUID puid, int pResourceKey, ClusterUID ouid, int oResourceKey, ClusterSupport support) throws DatabaseException { + + if(proxy) { + throw new UnsupportedOperationException(); +// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey); +// return cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support); + } + + // check(); + boolean ret; + try { + short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION); + short pri = getReferenceOrCreateIfForeign(pResourceKey, puid, support, ClusterStream.NULL_OPERATION); + short ori = getReferenceOrCreateIfForeign(oResourceKey, ouid, support, ClusterStream.NULL_OPERATION); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + ret = addRelationInternal(sri, pri, ori, completeType); + calculateModifiedId(); + } catch (OutOfSpaceException e) { + boolean streamOff = support.getStreamOff(); + if (!streamOff) { + support.cancelStatement(this); + support.setStreamOff(true); + } + ClusterI cluster = toBig(clusterSupport); + if (!streamOff) + support.setStreamOff(false); + ClusterI cluster2 = cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support); + if (cluster != cluster2) + throw new DatabaseException("Internal error. Contact application support."); + return cluster; + } +// check(); + if (ret) { + support.addStatement(this); + return this; + } else { + support.cancelStatement(this); + return null; + } + + } + + @Override + public ClusterI addRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) throws DatabaseException { + + if (DEBUG) + System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey); + + if(proxy) { + throw new UnsupportedOperationException(); +// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey); +// return cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support); + } + + // check(); + boolean ret; + try { + short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION); + short pri = getReferenceOrCreateIfForeign(pResourceKey, support, ClusterStream.NULL_OPERATION); + short ori = getReferenceOrCreateIfForeign(oResourceKey, support, ClusterStream.NULL_OPERATION); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + ret = addRelationInternal(sri, pri, ori, completeType); + calculateModifiedId(); + } catch (OutOfSpaceException e) { + boolean streamOff = support.getStreamOff(); + if (!streamOff) { + support.cancelStatement(this); + support.setStreamOff(true); + } + ClusterI cluster = toBig(clusterSupport); + if (!streamOff) + support.setStreamOff(false); + ClusterI cluster2 = cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support); + if (cluster != cluster2) + throw new DatabaseException("Internal error. Contact application support."); + return cluster; + } +// check(); + if (ret) { + support.addStatement(this); + return this; + } else { + support.cancelStatement(this); + return null; + } + } + @Override + public boolean removeRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) + throws DatabaseException { + // check(); + short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.REMOVE_OPERATION); + short pri = getInternalReferenceOrZeroAnd(pResourceKey, support, ClusterStream.NULL_OPERATION); + short ori = getInternalReferenceOrZeroAnd(oResourceKey, support, ClusterStream.NULL_OPERATION); + boolean ret = false; + if (0 != pri && 0 != ori) { + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + ret = removeRelationInternal(sri, pri, ori, completeType, support); + calculateModifiedId(); + } + if (ret) + support.removeStatement(this); + else + support.cancelStatement(this); + // check(); + return ret; + } + @Override + public void denyRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) + throws DatabaseException { + short s = checkResourceKeyIsOursAndGetResourceIndexIf(sResourceKey, support); + ResourceReferenceAndCluster p = checkResourceKeyAndGetResourceIndexIf(pResourceKey, support); + ResourceReferenceAndCluster o = checkResourceKeyAndGetResourceIndexIf(oResourceKey, support); + if (0 == s || 0 == p.reference || 0 == o.reference) + return; + // check(); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + boolean ret = removeRelationInternal(s, p.reference, o.reference, completeType, support); + if (ret) { + support.addStatementIndex(this, sResourceKey, getClusterUID(), ClusterChange.REMOVE_OPERATION); + support.addStatementIndex(this, pResourceKey, p.clusterUID, ClusterStream.NULL_OPERATION); + support.addStatementIndex(this, oResourceKey, o.clusterUID, ClusterStream.NULL_OPERATION); + support.removeStatement(this); + } + calculateModifiedId(); + // check(); + return; + } + @Override + public InputStream getValueStream(int resourceKey, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.getValue " + resourceKey); + int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey); + try { + byte[] buffer = resourceTable.getValue(valueTable, resourceIndex); + if(buffer == null) return null; + return new ByteArrayInputStream(buffer); + } catch (ExternalValueException e) { + return support.getValueStreamEx(resourceIndex, clusterUID.second); + } + } + @Override + public byte[] getValue(int resourceKey, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.getValue " + resourceKey); + int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey); + try { + return resourceTable.getValue(valueTable, resourceIndex); + } catch (ExternalValueException e) { + return clusterSupport.impl.getResourceFile(clusterUID.asBytes(), resourceIndex); + //return support.getValueEx(resourceIndex, clusterUID.second); + } + } + @Override + public boolean hasValue(int resourceKey, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReference(resourceKey); + return resourceTable.hasValue(resourceIndex); + } + @Override + public boolean removeValue(int resourceKey, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(resourceKey, support, ClusterChange.DELETE_OPERATION); + support.removeValue(this); + calculateModifiedId(); + return resourceTable.removeValue(valueTable, resourceIndex); + } + @Override + public ClusterI setValue(int rResourceId, byte[] value, int length, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.SET_OPERATION); + support.setValue(this, getClusterId(), value, length); + try { + resourceTable.setValue(valueTable, resourceIndex, value, length); + calculateModifiedId(); + return this; + } catch (OutOfSpaceException e) { + boolean streamOff = support.getStreamOff(); + if (!streamOff) + support.setStreamOff(true); + ClusterI cluster = toBig(support); + cluster.setValue(rResourceId, value, length, support); + if (!streamOff) + support.setStreamOff(false); + return cluster; + } + } + @Override + public ClusterI modiValueEx(int rResourceId, long voffset, int length, byte[] value, int offset, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.MODI_OPERATION); + support.modiValue(this, getClusterId(), voffset, length, value, offset); + resourceTable.setValueEx(valueTable, resourceIndex); + calculateModifiedId(); + return this; + } + @Override + public byte[] readValueEx(int rResourceId, long voffset, int length, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReference(rResourceId); + boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex); + if (!isExternal) + throw new DatabaseException("ClusterI.readValue supported only for external value. Resource key=" + rResourceId); + return support.getValueEx(resourceIndex, getClusterId(), voffset, length); + } + @Override + public boolean isValueEx(int resourceKey) throws DatabaseException { + int resourceIndex = getLocalReference(resourceKey); + return resourceTable.isValueEx(valueTable, resourceIndex); + } + @Override + public long getValueSizeEx(int rResourceId, ClusterSupport support) + throws DatabaseException, ExternalValueException { + int resourceIndex = getLocalReference(rResourceId); + boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex); + if (!isExternal) + throw new ExternalValueException("ClusterI.getValueSizeEx supported only for external value. Resource key=" + rResourceId); + return support.getValueSizeEx(resourceIndex, getClusterId()); + } + @Override + public void setValueEx(int rResourceId) + throws DatabaseException { + int resourceIndex = getLocalReference(rResourceId); + resourceTable.setValueEx(valueTable, resourceIndex); + } + @Override + public int createResource(ClusterSupport support) + throws DatabaseException { + + if(proxy) { + throw new UnsupportedOperationException(); +// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey); +// return cluster.createResource(support); + } + + short resourceIndex = resourceTable.createResource(); + calculateModifiedId(); + if(DebugPolicy.REPORT_RESOURCE_ID_ALLOCATION) + System.out.println("[RID_ALLOCATION]: ClusterSmall[" + clusterId + "] allocates " + resourceIndex); + support.createResource(this, resourceIndex, getClusterId()); + return ClusterTraits.createResourceKey(clusterKey, resourceIndex); + } + @Override + public boolean hasResource(int resourceKey, ClusterSupport support) { + int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey); + if (this.clusterKey != clusterKey) // foreign resource + return false; + int resourceIndex; + try { + resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + } catch (DatabaseException e) { + return false; + } + if (resourceIndex > 0 & resourceIndex <= resourceTable.getTableCount()) + return true; + else + return false; + } + @Override + public int getNumberOfResources(ClusterSupport support) + throws DatabaseException { + + if(proxy) { + throw new UnsupportedOperationException(); +// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey); +// return cluster.getNumberOfResources(support); + } + + return resourceTable.getUsedSize(); + } + + public int getNumberOfResources() throws IllegalAcornStateException { + if(proxy) + throw new IllegalAcornStateException("proxy == true for " + clusterId); + + return resourceTable.getUsedSize(); + } + + @Override + public long getUsedSpace() { + if(isEmpty()) return 0; + long rt = resourceTable.getTableCapacity() * 8 + 8; // (8 = cluster id) + long ft = foreignTable.getTableCapacity() * 8; + long pt = predicateTable.getTableCapacity() * 4; + long ot = objectTable.getTableCapacity() * 4; + long ct = completeTable.getTableCapacity() * 4; + long vt = valueTable.getTableCapacity() * 1; + long cm = clusterMap.getUsedSpace(); + return rt + ft + pt + ot + ct + vt + cm; + } + @Override + public boolean isEmpty() { + if(resourceTable == null) return true; + return resourceTable.getTableCount() == 0; + } + @Override + public void printDebugInfo(String message, ClusterSupport support) + throws DatabaseException { + throw new DatabaseException("Not implemented!"); + } + private short getInternalReferenceOrZero2(int resourceKey, ClusterSupport support) throws DatabaseException { + int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey); + if (!ClusterTraitsBase.isCluster(clusterBits, resourceKey)) { + return clusterMap.getForeignReferenceOrZero(resourceKey); + } else { + return (short)resourceIndex; + } + } + private short getInternalReferenceOrZeroAnd(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { // foreign resource + ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey); + short foreignRef = clusterMap.getForeignReferenceOrZero(resourceKey); + support.addStatementIndex(this, resourceKey, clusterUID, op); + return foreignRef; + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return (short)resourceIndex; + } + private final short getLocalReference(int resourceKey) throws DatabaseException { + return ClusterTraits.getResourceIndexFromResourceKeyNoThrow(resourceKey); + } + private final short getLocalReferenceAnd(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + short resourceIndex = getLocalReference(resourceKey); + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private short checkResourceKeyIsOursAndGetResourceIndexIf(int resourceKey, ClusterSupport support) + throws DatabaseException { + int clusterShortId = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + if (this.clusterKey != clusterShortId) + return 0; + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + return (short)resourceIndex; + } + private short getReferenceOrCreateIfForeign(int resourceKey, ClusterUID clusterUID, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { + support.addStatementIndex(this, resourceKey, clusterUID, op); + short ref = clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID); + return ref; + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private short getReferenceOrCreateIfForeign(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { + ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey); + support.addStatementIndex(this, resourceKey, clusterUID, op); + short ref = clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID); + return ref; + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private class ResourceReferenceAndCluster { + ResourceReferenceAndCluster(short reference, ClusterUID clusterUID) { + this.reference = reference; + this.clusterUID = clusterUID; + } + public final short reference; + public final ClusterUID clusterUID; + } + private ResourceReferenceAndCluster checkResourceKeyAndGetResourceIndexIf(int resourceKey, ClusterSupport support) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { // foreign resource + ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey); + ClusterUID clusterUID = foreignCluster.getClusterUID(); + short ref = clusterMap.getForeignReferenceOrZero(resourceKey); + return new ResourceReferenceAndCluster(ref, clusterUID); + } + return new ResourceReferenceAndCluster(resourceIndex, getClusterUID()); + } + + static long fTime = 0; + + @Override + final public int execute(int resourceReference) throws DatabaseException { + short resourceRef = (short)resourceReference; + int key; + if (ClusterTraitsSmall.resourceRefIsLocal(resourceRef)) { + key = clusterBits | resourceRef; + } else { + short foreignIndex = ClusterTraitsSmall.resourceRefGetForeignIndex((short)resourceRef); + //long start = System.nanoTime(); + ResourceUID resourceUID = foreignTable.getResourceUID(foreignIndex); + int clusterKey = clusterSupport.getClusterKeyByClusterUIDOrMake(resourceUID.asCID()); +// ClusterBase cluster = clusterSupport.getClusterByClusterUIDOrMake(resourceUID.asCID()); + key = ClusterTraitsBase.createResourceKey(clusterKey, resourceUID.getIndex()); + //fTime += System.nanoTime() - start; + //System.err.println("fTime: " + 1e-9*fTime); + } + if (DEBUG) + System.out.println("ClusterSmall.execute key=" + key); + return key; + } + + private boolean addRelationInternal(short sReference, short pReference, short oReference, ClusterI.CompleteTypeEnum completeType) + throws DatabaseException { + int predicateIndex = resourceTable.addStatement(sReference, pReference, oReference, predicateTable, objectTable, completeType, completeTable); + if (0 == predicateIndex) + return true; // added to resourceTable + else if (0 > predicateIndex) + return false; // old complete statemenent + int newPredicateIndex = predicateTable.addPredicate(predicateIndex, 0xFFFF & pReference, 0xFFFF & oReference, objectTable); + if (0 == newPredicateIndex) + return false; + if (predicateIndex != newPredicateIndex) + resourceTable.setPredicateIndex(sReference, newPredicateIndex); + return true; + } + private boolean removeRelationInternal(int sResourceIndex, short pResourceIndex, + short oResourceIndex, ClusterI.CompleteTypeEnum completeType, ClusterSupport support) + throws DatabaseException { + int predicateIndex = resourceTable.getPredicateIndex(sResourceIndex); + if (0 == predicateIndex || ClusterI.CompleteTypeEnum.NotComplete != completeType) + return resourceTable.removeStatementFromCache(sResourceIndex, + pResourceIndex, oResourceIndex, completeType, completeTable); + PredicateTable.Status ret = predicateTable.removePredicate(predicateIndex, 0xFFFF & pResourceIndex, 0xFFFF & oResourceIndex, objectTable); + switch (ret) { + case NothingRemoved: + return false; + case PredicateRemoved: { + if (0 == predicateTable.getPredicateSetSize(predicateIndex)) + resourceTable.setPredicateIndex(sResourceIndex, 0); + // intentionally dropping to next case + } default: + break; + } + resourceTable.removeStatement(sResourceIndex, + pResourceIndex, oResourceIndex, + completeType, completeTable, + predicateTable, objectTable, support); + return true; + } + @Override + public void load() { + throw new Error("Not supported."); + } + + @Override + public void load(Callback r) { + throw new Error("Not supported."); + } + + public boolean contains(int resourceKey) { + return ClusterTraitsBase.isCluster(clusterBits, resourceKey); + } + @Override + public void load(final ClusterSupport support, final Runnable callback) { + + throw new UnsupportedOperationException(); + +// try { +// clusterTable.load2(clusterId, clusterKey); +// callback.run(); +// } catch (DatabaseException e) { +// e.printStackTrace(); +// } + + } + @Override + public ClusterI getClusterByResourceKey(int resourceKey, + ClusterSupport support) { + throw new Error(); + } + @Override + public void increaseReferenceCount(int amount) { + throw new Error(); + } + @Override + public void decreaseReferenceCount(int amount) { + throw new Error(); + } + @Override + public int getReferenceCount() { + throw new Error(); + } + @Override + public void releaseMemory() { + } + @Override + public void compact() { + clusterMap.compact(); + } + @Override + public boolean isLoaded() { + return !proxy; + } + +// public ClusterImpl tryLoad(SessionImplSocket sessionImpl) { +// +// throw new UnsupportedOperationException(); +// assert(Constants.ReservedClusterId != clusterId); +// +// return clusterTable.tryLoad(clusterId, clusterKey); +// +// } + + + @Override + public ClusterBig toBig(ClusterSupport support) + throws DatabaseException { + if (DEBUG) { + System.out.println("DEBUG: toBig cluster=" + clusterId); + new Exception().printStackTrace(); + } + ClusterBig big = new ClusterBig(clusterSupport, getClusterUID(), clusterKey, (ClusterSupport2)support); + big.cc = this.cc; +// if(big.cc != null) +// big.cc.clusterImpl = this; + resourceTable.toBig(big, support, this); + big.foreignLookup = this.foreignLookup; + big.change = this.change; + this.cc = null; + this.foreignLookup = null; + this.change = null; + return big; + } + + @Override + public ClusterTypeEnum getType() { + return ClusterTypeEnum.SMALL; + } + @Override + public boolean getImmutable() { + int status = resourceTable.getClusterStatus(); + return (status & ClusterStatus.ImmutableMaskSet) == 1; + } + @Override + public void setImmutable(boolean immutable, ClusterSupport support) { + if(resourceTable != null) { + int status = resourceTable.getClusterStatus(); + if (immutable) + status |= ClusterStatus.ImmutableMaskSet; + else + status &= ClusterStatus.ImmutableMaskClear; + resourceTable.setClusterStatus(status); + } + support.setImmutable(this, immutable); + } + + @Override + public String toString() { + try { + final TIntHashSet set = new TIntHashSet(); + TIntShortHashMap map = foreignTable.getResourceHashMap(); + map.forEachKey(new TIntProcedure() { + @Override + public boolean execute(int value) { + set.add(value & 0xfffff000); + return true; + } + }); + return "ClusterSmall[" + getClusterUID() + " - " + getClusterId() + " - " + getNumberOfResources() + " - " + foreignTable.getResourceHashMap().size() + " - " + set.size() + "]"; + } catch (DatabaseException e) { + try { + return "ClusterSmall[" + getNumberOfResources() + "]"; + } catch (IllegalAcornStateException e1) { + Logger.defaultLogError(e1); + e1.printStackTrace(); + return "An exception occured!!"; + } + } + } + + // Memory map + // bytes (b) | headers(i) | predicateTable (i) | objectTable (i) | completeTable (i) | resourceTable (l) | foreignTable (l) + + @Override + public byte[] storeBytes() throws IOException { + + int byteSize = valueTable.getTableSize(); + int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize(); + int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize(); + + byte[] raw = new byte[12 + byteSize + 8*longSize + 4*intSize]; + + int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE); + + Bytes.writeLE(raw, 0, byteSize); + Bytes.writeLE(raw, 4, intSize); + Bytes.writeLE(raw, 8, longSize); + + int rawPos = valueTable.storeBytes(raw, 0, 12); + + int intBase = rawPos; + + rawPos += 4*INT_HEADER_SIZE; + rawPos = predicateTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos); + rawPos = objectTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos); + rawPos = completeTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos); + + int longBase = rawPos; + + rawPos += 8*LONG_HEADER_SIZE; + rawPos = resourceTable.storeBytes(raw, (rawPos-longBase)>>3, rawPos); + rawPos = foreignTable.storeBytes(raw, (rawPos-longBase)>>3, rawPos); + + Bytes.writeLE8(raw, longBase, -1); + Bytes.writeLE8(raw, longBase+8, LONG_HEADER_VERSION); + Bytes.writeLE8(raw, longBase+16, 0); + Bytes.writeLE8(raw, longBase+24, clusterUID.second); + + // write header + for(int i=0;i getPredicateTable() { + return predicateTable; + } + + @Override + public Table getForeignTable() { + return foreignTable; + } + + @Override + public int makeResourceKey(int pRef) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public Table getCompleteTable() { + return completeTable; + } + + @Override + public Table getValueTable() { + return valueTable; + } + + @Override + public Table getObjectTable() { + return objectTable; + } + +} + +class ClusterStatus { + public static final int ImmutableMaskClear = 0xFFFFFFFE; + public static final int ImmutableMaskSet = 0x00000001; + public static final int DeletedMaskClear = 0xFFFFFFFD; + public static final int DeletedMaskSet = 0x00000002; +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/exception/AcornAccessVerificationException.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/exception/AcornAccessVerificationException.java new file mode 100644 index 000000000..6269a60b3 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/exception/AcornAccessVerificationException.java @@ -0,0 +1,20 @@ +package org.simantics.acorn.exception; + +import org.simantics.db.exception.SDBException; + +public class AcornAccessVerificationException extends SDBException { + + private static final long serialVersionUID = 6601855907356895356L; + + public AcornAccessVerificationException(String message, Throwable cause) { + super(message, cause); + } + + public AcornAccessVerificationException(String message) { + super(message); + } + + public AcornAccessVerificationException(Throwable cause) { + super(cause); + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/exception/IllegalAcornStateException.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/exception/IllegalAcornStateException.java new file mode 100644 index 000000000..8228d59c1 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/exception/IllegalAcornStateException.java @@ -0,0 +1,21 @@ +package org.simantics.acorn.exception; + +import org.simantics.db.exception.SDBException; + +public class IllegalAcornStateException extends SDBException { + + private static final long serialVersionUID = -8255505454138490120L; + + public IllegalAcornStateException(String message, Throwable cause) { + super(message, cause); + } + + public IllegalAcornStateException(String message) { + super(message); + } + + public IllegalAcornStateException(Throwable cause) { + super(cause); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/exception/InvalidHeadStateException.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/exception/InvalidHeadStateException.java new file mode 100644 index 000000000..7c8510399 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/exception/InvalidHeadStateException.java @@ -0,0 +1,27 @@ +package org.simantics.acorn.exception; + +public class InvalidHeadStateException extends Exception { + + private static final long serialVersionUID = -7291859180968235955L; + + public InvalidHeadStateException() { + super(); + } + + public InvalidHeadStateException(String message, Throwable cause, boolean enableSuppression, + boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + public InvalidHeadStateException(String message, Throwable cause) { + super(message, cause); + } + + public InvalidHeadStateException(String message) { + super(message); + } + + public InvalidHeadStateException(Throwable cause) { + super(cause); + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java new file mode 100644 index 000000000..be505c603 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java @@ -0,0 +1,263 @@ +package org.simantics.acorn.internal; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.channels.FileLock; +import java.nio.file.DirectoryStream; +import java.nio.file.FileVisitOption; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.EnumSet; +import java.util.Properties; + +import org.simantics.acorn.GraphClientImpl2; +import org.simantics.db.Database; +import org.simantics.db.DatabaseUserAgent; +import org.simantics.db.ServiceLocator; +import org.simantics.db.common.utils.Logger; +import org.simantics.db.server.ProCoreException; + +/** + * @author Tuukka Lehtonen + */ +public class AcornDatabase implements Database { + + private final Path folder; + + private DatabaseUserAgent userAgent; + + private RandomAccessFile raLockFile; + + private FileLock lock; + + private boolean isRunning; + + public AcornDatabase(Path folder) { + this.folder = folder; + } + + @Override + public DatabaseUserAgent getUserAgent() { + return userAgent; + } + + @Override + public void setUserAgent(DatabaseUserAgent dbUserAgent) { + userAgent = dbUserAgent; + } + + @Override + public Status getStatus() { + return Status.Local; + } + + @Override + public File getFolder() { + return folder.toFile(); + } + + @Override + public boolean isFolderOk() { + return isFolderOk(folder.toFile()); + } + + @Override + public boolean isFolderOk(File aFolder) { + if (!aFolder.isDirectory()) + return false; + return true; + } + + @Override + public boolean isFolderEmpty() { + return isFolderEmpty(folder.toFile()); + } + + @Override + public boolean isFolderEmpty(File aFolder) { + Path path = aFolder.toPath(); + if (!Files.isDirectory(path)) + return false; + try (DirectoryStream folderStream = Files.newDirectoryStream(path)) { + return !folderStream.iterator().hasNext(); + } catch (IOException e) { + Logger.defaultLogError("Failed to open folder stream. folder=" + path, e); + return false; + } + } + + @Override + public void initFolder(Properties properties) throws ProCoreException { + try { + Files.createDirectories(folder); + } catch (IOException e) { + throw new ProCoreException(e); + } + } + + @Override + public void deleteFiles() throws ProCoreException { + deleteTree(folder); + } + + @Override + public void start() throws ProCoreException { + Path lockFile = folder.resolve("lock"); + try { + if (!Files.exists(lockFile)) + Files.createFile(lockFile); + + raLockFile = new RandomAccessFile(lockFile.toFile(), "rw"); + lock = raLockFile.getChannel().tryLock(); + if (lock == null) { + throw new ProCoreException("The database in folder " + folder.toAbsolutePath() + " is already in use!"); + } + + isRunning = true; + + } catch (IOException e) { + e.printStackTrace(); + } + } + + @Override + public boolean isRunning() throws ProCoreException { + return isRunning; + } + + @Override + public boolean tryToStop() throws ProCoreException { + try { + lock.release(); + raLockFile.close(); + + Files.deleteIfExists(folder.resolve("lock")); + + isRunning = false; + + } catch (IOException e) { + e.printStackTrace(); + } + + return true; + } + + @Override + public void connect() throws ProCoreException { + } + + @Override + public boolean isConnected() throws ProCoreException { + return isRunning; + } + + @Override + public String execute(String command) throws ProCoreException { + throw new UnsupportedOperationException("execute(" + command + ")"); + } + + @Override + public void disconnect() throws ProCoreException { + } + + @Override + public void clone(File to, int revision, boolean saveHistory) throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + @Override + public Path createFromChangeSets(int revision) throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + @Override + public void deleteGuard() throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + @Override + public Path dumpChangeSets() throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + @Override + public void purgeDatabase() throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + @Override + public long serverGetTailChangeSetId() throws ProCoreException { + // "We have it all" + // But after purging we don't so beware. + // TODO: beware for purge + return 1; + } + + @Override + public Session newSession(ServiceLocator locator) throws ProCoreException { + try { + return new GraphClientImpl2(this, folder, locator); + } catch (IOException e) { + throw new ProCoreException(e); + } + } + + @Override + public Journal getJournal() throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + private static void deleteTree(Path path) throws ProCoreException { + if (!Files.exists(path)) + return; + + class Visitor extends SimpleFileVisitor { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + try { + Files.delete(file); + } catch (IOException ioe) { + ioe.printStackTrace(); + throw ioe; + } + return FileVisitResult.CONTINUE; + } + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException e) throws IOException { + if (e == null) { + try { + Files.delete(dir); + } catch (IOException ioe) { + ioe.printStackTrace(); + throw ioe; + } + return FileVisitResult.CONTINUE; + } + throw e; + } + } + try { + Visitor v = new Visitor(); + EnumSet opts = EnumSet.noneOf(FileVisitOption.class); + Files.walkFileTree(path, opts, Integer.MAX_VALUE, v); + } catch (IOException e) { + throw new ProCoreException("Could not delete " + path, e); + } + } + + @Override + public String getCompression() { + return "LZ4"; + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java new file mode 100644 index 000000000..b6cb59b40 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java @@ -0,0 +1,62 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn.internal; + +import org.eclipse.core.runtime.Plugin; +import org.osgi.framework.BundleContext; + +/** + * @author Antti Villberg + */ +public class Activator extends Plugin { + + // The plug-in ID + public static final String BUNDLE_ID = "org.simantics.acorn"; //$NON-NLS-1$ + // The shared instance + private static Activator plugin; + + /** + * The constructor + */ + public Activator() { + } + + /* + * (non-Javadoc) + * @see org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext) + */ + @Override + public void start(BundleContext context) throws Exception { + super.start(context); + plugin = this; + } + + /* + * (non-Javadoc) + * @see org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext) + */ + @Override + public void stop(BundleContext context) throws Exception { + plugin = null; + super.stop(context); + } + + /** + * Returns the shared instance + * + * @return the shared instance + */ + public static Activator getDefault() { + return plugin; + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java new file mode 100644 index 000000000..3de77d2aa --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java @@ -0,0 +1,119 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +/* + * Created on Jan 21, 2005 + * + * Copyright Toni Kalajainen + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.simantics.acorn.internal; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +/** + * Bijection map is a Map that has no values or keys, only 1:1 mappings + * of values. These value/keys will be called with left and right side + * values. + * + * Each value can exist only once on a side + * + * @author Toni Kalajainen + */ +public class BijectionMap { + + /** The keys of tableLeft are left-side-values and + * values are right-side-values */ + private final Map tableLeft = new HashMap(); + /** The keys of tableRight are right-side-values and + * values on it are left-side-values */ + private final Map tableRight = new HashMap(); + + public boolean containsLeft(L leftValue) + { + return tableLeft.containsKey(leftValue); + } + + public boolean containsRight(R rightValue) + { + return tableRight.containsKey(rightValue); + } + + public void map(L leftValue, R rightValue) + { + // Remove possible old mapping + R oldRight = tableLeft.remove(leftValue); + if (oldRight != null) { + tableRight.remove(oldRight); + } else { + L oldLeft = tableRight.remove(rightValue); + if (oldLeft != null) { + tableLeft.remove(oldLeft); + } + } + + tableLeft.put(leftValue, rightValue); + tableRight.put(rightValue, leftValue); + } + + public int size() + { + return tableLeft.size(); + } + + public L getLeft(R rightValue) { + return tableRight.get(rightValue); + } + + public R getRight(L leftValue) { + return tableLeft.get(leftValue); + } + + public R removeWithLeft(L leftValue) { + R rightValue = tableLeft.remove(leftValue); + if (rightValue!=null) + tableRight.remove(rightValue); + return rightValue; + } + + public L removeWithRight(R rightValue) { + L leftValue = tableRight.remove(rightValue); + if (leftValue!=null) + tableLeft.remove(leftValue); + return leftValue; + } + + public Set getLeftSet() { + return tableLeft.keySet(); + } + + public Set getRightSet() { + return tableRight.keySet(); + } + + public void clear() { + tableLeft.clear(); + tableRight.clear(); + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java new file mode 100644 index 000000000..305e31537 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java @@ -0,0 +1,70 @@ +package org.simantics.acorn.internal; + +import org.simantics.db.service.ClusterUID; + +final public class Change { + + byte op0; + int key0; + int key1; + int key2; + ClusterUID clusterUID1; + ClusterUID clusterUID2; + byte[] lookup1; + byte[] lookup2; + byte lookIndex1; + byte lookIndex2; + int lastArg = 0; + + @Override + public String toString() { + return "Change " + (key0&0xffff) + " " + (key1&0xffff) + " " + (key2&0xffff) + " " + clusterUID2 + " " + clusterUID2; + } + + public final void init() { + lastArg = 0; + } + + public final void initValue() { + lastArg = 0; + } + + final void addStatementIndex0(int key, byte op) { + assert (op != 0); + key0 = key; + op0 = op; + } + + final void addStatementIndex1(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) { + key1 = key; + clusterUID1 = clusterUID; + lookIndex1 = lookIndex; + lookup1 = lookup; +// if(lookIndex > 0) +// System.err.println("statementIndex1 " + pos + " " + lookIndex); + } + + final void addStatementIndex2(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) { + key2 = key; + clusterUID2 = clusterUID; + lookIndex2 = lookIndex; + lookup2 = lookup; + } + + final public void addStatementIndex(int key, ClusterUID clusterUID, byte op) { + + // new Exception("lastArg=" + lastArg).printStackTrace(); + + assert (lastArg < 3); + + if (0 == lastArg) + addStatementIndex0(key, op); + else if (1 == lastArg) + addStatementIndex1(key, clusterUID, (byte)0, null); + else if (2 == lastArg) + addStatementIndex2(key, clusterUID, (byte)0, null); + + lastArg++; + + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java new file mode 100644 index 000000000..b1fbb5d9c --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java @@ -0,0 +1,735 @@ +package org.simantics.acorn.internal; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.simantics.acorn.internal.ClusterStream.ClusterEnum; +import org.simantics.acorn.internal.ClusterStream.Data; +import org.simantics.acorn.internal.ClusterStream.DebugInfo; +import org.simantics.acorn.internal.ClusterStream.OpEnum; +import org.simantics.acorn.internal.ClusterStream.StmEnum; +import org.simantics.compressions.Compressions; +import org.simantics.db.exception.RuntimeDatabaseException; +import org.simantics.db.impl.ClusterTraitsBase; +import org.simantics.db.procore.cluster.ClusterTraits; +import org.simantics.db.procore.cluster.ClusterTraitsSmall; +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.datastructures.Pair; + +import gnu.trove.map.hash.TIntByteHashMap; +import gnu.trove.map.hash.TLongIntHashMap; + + +public final class ClusterChange { + + public static final int VERSION = 1; + public static final byte ADD_OPERATION = 2; + public static final byte REMOVE_OPERATION = 3; + public static final byte DELETE_OPERATION = 5; + + public static final boolean DEBUG = false; + public static final boolean DEBUG_STAT = false; + public static final boolean DEBUG_CCS = false; + + private static DebugInfo sum = new DebugInfo(); + + public final TIntByteHashMap foreignTable = new TIntByteHashMap(); + private final DebugInfo info; +// private final GraphSession graphSession; + public final ClusterUID clusterUID; + private final int SIZE_OFFSET; +// private final int HEADER_SIZE; + // How much buffer is used before stream is flushed to server. The bigger the better. + public static final int MAX_FIXED_BYTES = (1<<15) + (1<<14); + private static final int MAX_FIXED_OPERATION_SIZE = 17 + 16; + private static final int MAX_FIXED_OPERATION_SIZE_AND_ROOM_FOR_ERROR = MAX_FIXED_OPERATION_SIZE + 36; + private int nextSize = MAX_FIXED_BYTES; + int byteIndex = 0; + private byte[] bytes = null; // Operation data. +// private final byte[] header; + private boolean flushed = false; + private ArrayList> stream; + +// public ClusterImpl clusterImpl; + + public ClusterChange( ArrayList> stream, ClusterUID clusterUID) { + this.clusterUID = clusterUID; + long[] longs = new long[ClusterUID.getLongLength()]; + clusterUID.toLong(longs, 0); + this.stream = stream; +// this.graphSession = clusterStream.graphSession; + info = new DebugInfo(); +// HEADER_SIZE = 8 + longs.length * 8; +// header = new byte[HEADER_SIZE]; + SIZE_OFFSET = 0; +// Bytes.writeLE(header, SIZE_OFFSET + 0, 0); // Correct byte vector size is set with setHeaderVectorSize() later. +// Bytes.writeLE(header, SIZE_OFFSET + 4, VERSION); +// for (int i=0, offset=8; i>>8); + } + void flushCollect(Change c) { + throw new UnsupportedOperationException(); +// flushInternal(graphSession, clusterUID); +// if (DEBUG) +// printlnd("Cluster change data was flushed."); +// if (null != c) { +// if (DEBUG) +// printlnd("Clearing lookup for " + c.toString()); +// c.lookup1 = null; +// c.lookup2 = null; +// } +// if (null != clusterImpl) { +// clusterImpl.foreignLookup = null; +// } + } + + private final boolean checkBufferSpace(Change c) { +// clusterStream.changes.checkFlush(); + if(bytes == null) initBuffer(); + if (MAX_FIXED_BYTES - byteIndex > MAX_FIXED_OPERATION_SIZE_AND_ROOM_FOR_ERROR) { + return false; + } + flush(); +// initBuffer(); + return true; + } + + private final void checkBufferSpace(int size) { + if(bytes == null) initBuffer(); + if (bytes.length - byteIndex >= size) + return; + nextSize = Math.max(MAX_FIXED_BYTES, size); + flush(); + initBuffer(); + } + + public final void addChange(Change c) { + checkInitialization(); + checkBufferSpace(c); + byte operation = c.op0; + if(operation == ADD_OPERATION) + addStm(c, StmEnum.Add); + else if (operation == REMOVE_OPERATION) + addStm(c, StmEnum.Remove); + else if (operation == DELETE_OPERATION) { + if (DEBUG) + printlnd("Delete value offset=" + byteIndex + " " + c); + addByte(OpEnum.Delete.getOrMask()); + addShort(ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0)); + } + c.lastArg = 0; + } + + private final void addForeignLong(short index, ClusterUID clusterUID) { + byteIndex = clusterUID.toByte(bytes, byteIndex); + bytes[byteIndex++] = (byte)(index & 0xFF); + bytes[byteIndex++] = (byte)(index >>> 8); + } + + private final ClusterEnum addIndexAndCluster(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) { + assert(!clusterUID.equals(ClusterUID.Null)); + short resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(key); + if (clusterUID.equals(this.clusterUID)) { + bytes[byteIndex++] = (byte)(resourceIndex & 0xFF); + bytes[byteIndex++] = (byte)(resourceIndex >>> 8); + return ClusterEnum.Local; + } + + byte foreign = 0; + if(lookIndex > 0) { + if(lookup != null) + foreign = lookup[lookIndex]; + } else { + foreign = foreignTable.get(key); + } + if (0 != foreign) { + if (foreign > 256) + throw new RuntimeDatabaseException("Internal error, contact application support." + + "Too big foreing index=" + foreign + " max=256"); + --foreign; + bytes[byteIndex++] = foreign; + return ClusterEnum.ForeignShort; + } else { + byte position = (byte) (foreignTable.size() + 1); + if(lookup != null) + lookup[lookIndex] = position; + foreignTable.put(key, position); + if (DEBUG_STAT) + info.sForeign = foreignTable.size(); + if (clusterUID.equals(ClusterUID.Null)) + throw new RuntimeDatabaseException("Internal error, contact application support." + + "Cluster unique id not defined for foreing cluster."); + addForeignLong(resourceIndex, clusterUID); + return ClusterEnum.ForeignLong; + } + } + + private final void addByte(byte b) { + bytes[byteIndex++] = b; + } + + private final void addShort(short s) { + bytes[byteIndex++] = (byte)(s & 0xFF); + bytes[byteIndex++] = (byte)(s >>> 8); + } + +// private final void addShort(int s) { +// bytes[byteIndex++] = (byte) (s & 0xFF); +// bytes[byteIndex++] = (byte) ((s >>> 8) & 0xFF); +// } + + private final void addInt(int i) { +// System.err.println("addInt " + i + " " + i); + bytes[byteIndex++] = (byte) (i & 0xFF); + bytes[byteIndex++] = (byte) ((i >>> 8) & 0xFF); + bytes[byteIndex++] = (byte) ((i >>> 16) & 0xFF); + bytes[byteIndex++] = (byte) ((i >>> 24) & 0xFF); + // buffer.asIntBuffer().put(i); + // buffer.position(buffer.position()+4); + } + +// private void addLong6(long l) { +//// System.err.println("addLong " + l); +// bytes[byteIndex++] = (byte) (l & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF); +// // buffer.asLongBuffer().put(l); +// // buffer.position(buffer.position() + 6); +// } + + private void addLong7(long l) { + bytes[byteIndex++] = (byte) (l & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 48) & 0xFF); + // buffer.asLongBuffer().put(l); + // buffer.position(buffer.position() + 7); + } + +// private void addLong(long l) { +// bytes[byteIndex++] = (byte) (l & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 48) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 56) & 0xFF); +// } + private final byte bufferPop() { + return bytes[--byteIndex]; + } + + final class DebugStm { + StmEnum e; + int r; + int p; + int o; + ClusterUID pc; + ClusterUID oc; + + DebugStm(StmEnum e, int r, int p, ClusterUID pc, int o, ClusterUID oc) { + this.e = e; + this.r = r; + this.p = p; + this.o = o; + this.pc = pc; + this.oc = oc; + } + + @Override + public String toString() { + short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(r); + short pi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(p); + short oi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(o); + return "" + e + " rk=" + r + " ri=" + ri + " rc=" + clusterUID + + " pk=" + p + " pi=" + pi + " pc=" + pc + + " ok=" + o + " oi=" + oi + " oc=" + oc; + } + + public String toString2() { + return "" + e + " r=" + r + " rc=" + clusterUID + " p=" + p + + " pc=" + pc + " o=" + o + " oc=" + oc; + } + + public String toString3() { + short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(r); + short pi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(p); + short oi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(o); + return "" + e + " ri=" + ri + + " pi=" + pi + " pc=" + pc + + " oi=" + oi + " oc=" + oc; + } + } + + private List debugStms = new ArrayList(); + + @SuppressWarnings("unused") + private final void addStm(Change c, StmEnum stmEnum) { + + if (DEBUG_STAT) + ++info.nStms; + if (DEBUG || DEBUG_CCS) { + DebugStm d = new DebugStm(stmEnum, c.key0, c.key1, c.clusterUID1, c.key2, c.clusterUID2); + if (DEBUG_CCS) + debugStms.add(d); + if (DEBUG) { + printlnd(d.toString3() + " offset=" + byteIndex); + } + } + // int opPos = buffer.position(); + int opPos = byteIndex++; + // buffer.put((byte)0); // operation code + // addByte((byte)0); + + boolean done = true; + + ClusterEnum a = addIndexAndCluster(c.key1, c.clusterUID1, c.lookIndex1, c.lookup1); + byte ab = 0; + + // ForeignShort = byte + // Local = short + // ForeignLong = 8 byte + if (a != ClusterEnum.ForeignShort) { + ab = bufferPop(); + done = false; + } + + ClusterEnum b = addIndexAndCluster(c.key2, c.clusterUID2, c.lookIndex2, c.lookup2); + byte bb = 0; + if (b != ClusterEnum.ForeignShort) { + bb = bufferPop(); + done = false; + } + + int ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0); + if (ClusterTraitsSmall.isIllegalResourceIndex(ri)) + throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + ri); + bytes[byteIndex++] = (byte)ri; // index low byte + if(!done) { + Data data = ClusterEnum.getData(stmEnum, a, b); + int left = 6 - data.bits; + int op = ri >>> (8 + left); + ri >>>= 8; + ri &= (1 << left) - 1; + if (a != ClusterEnum.ForeignShort) { + ri |= ab << left; + left += 6; + } + if (b != ClusterEnum.ForeignShort) { + ri |= bb << left; + left += 6; + } + switch (data.bytes) { + default: + throw new RuntimeDatabaseException("Assertion error. Illegal number of bytes=" + data.bytes); + case 2: + bytes[byteIndex++] = (byte)(ri & 0xFF); + bytes[byteIndex++] = (byte)((ri >>> 8) & 0xFF); + break; + case 1: + bytes[byteIndex++] = (byte)(ri & 0xFF); + break; + case 0: + break; + } + op |= data.mask; + this.bytes[opPos] = (byte)op; + } else { + if (stmEnum == StmEnum.Add) + bytes[opPos] = (byte)((ri >>> 8) + 64); + else + bytes[opPos] = (byte)((ri >>> 8) + 128); + } + if (DEBUG_STAT) { + if (a == ClusterEnum.Local && b == ClusterEnum.Local) { + ++info.nLocal; + } else if (a == ClusterEnum.Local || b == ClusterEnum.Local) { + ++info.nPartly; + } else { + ++info.nForeign; + } + } + if (foreignTable.size() > 252) + flush(); +// throw new UnsupportedOperationException(); + //flushInternal(graphSession, clusterUID); + } + + private final int modiValue(int ri, long value_offset, byte[] bytes, int offset, int size) { + if (DEBUG) + printlnd("Modify value ri=" + ri + " vo=" + value_offset + " size=" + size + " total=" + bytes.length); + if (ClusterTraitsBase.isIllegalResourceIndex(ri)) + throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + ri); + if (value_offset > (1L << 58 - 1)) + throw new RuntimeDatabaseException("Illegal value offset=" + + value_offset); + if (size < 0 || size > MAX_FIXED_BYTES - 1) + throw new RuntimeDatabaseException("Illegal value size=" + size); + if (offset + size > bytes.length) + throw new RuntimeDatabaseException("Illegal value size=" + size); + checkBufferSpace(12 + size); + addByte(OpEnum.Modify.getOrMask()); + ri |= (value_offset >>> 56) << 14; // add top two bits + addShort((short) ri); + value_offset &= (1L << 56) - 1; + addLong7(value_offset); + addShort((short) size); + if (DEBUG) + System.out.println("Modify value fixed part end offset=" + byteIndex); + int copied = Math.min(size, this.bytes.length - byteIndex); + System.arraycopy(bytes, offset, this.bytes, byteIndex, copied); + byteIndex += size; + return copied; + } + +// private final void modiValueBig(int ri, long voffset, int left, byte[] bytes, int offset) { +// checkBufferSpace(0); +// int current = Math.min(this.bytes.length - byteIndex - 12, left); +// if(current >= 0) { +// int written = modiValue(ri, voffset, bytes, offset, current); +// voffset += written; +// offset += written; +// left -= written; +// } +//// flushInternal(graphSession, clusterUID); +// while (left > 0) { +// int length = Math.min(left, (1 << 16) - 1); +// if (DEBUG) +// printlnd("Modify big value ri=" + ri + " vo=" + voffset + " len=" + length); +// int psize = length + 12; +//// setHeaderVectorSize(psize); +// byte[] message = new byte[psize/*+HEADER_SIZE*/]; +//// System.arraycopy(header, 0, message, 0, HEADER_SIZE); +// int to = 0; +// Bytes.write(message, to++, OpEnum.Modify.getOrMask()); +// short index = (short)(ri | (voffset >>> 56)<<14); // add top two bits +// Bytes.writeLE(message, to, index); to += 2; +// Bytes.writeLE7(message, to, voffset & ((1L << 56) - 1)); to += 7; +// Bytes.writeLE(message, to, (short)length); to += 2; +// System.arraycopy(bytes, offset, message, to, length); +//// graphSession.updateCluster(new UpdateClusterFunction(message)); +// voffset += length; +// offset += length; +// left -= length; +// } +// } + + private final int setValueBig(int ri, byte[] bytes, int length_) { + checkBufferSpace(12); + int sum = 0; + int voffset = 0; + int offset = 0; + int left = length_; + while (left > 0) { + int length = Math.min(left, MAX_FIXED_BYTES - 12 - byteIndex); + if (DEBUG) + printlnd("Set big value ri=" + ri + " vo=" + voffset + " len=" + length); + int written = modiValue(ri, voffset, bytes, offset, length); + sum += written; + voffset += written; + offset += written; + left -= written; + checkBufferSpace(12); + } + return sum; + } + + private final int setValueSmall(int ri, byte[] bytes, int length) { + checkBufferSpace(5 + length); + int pos = byteIndex; + int i = length << 14 | ri; + if (length < 32) { + byte op = (byte) (OpEnum.SetShort.getOrMask() | length >>> 2); + addByte(op); + short s = (short) i; + addShort(s); + } else { + addByte(OpEnum.Set.getOrMask()); + addInt(i); + } + System.arraycopy(bytes, 0, this.bytes, byteIndex, length); + byteIndex += length; + int len = byteIndex - pos; + return len; + } + + final void setValue(short index, byte[] bytes) { + setValue(index, bytes, bytes.length); + } + + final public void setValue(short index, byte[] bytes, int length) { + checkInitialization(); + if (ClusterTraitsBase.isIllegalResourceIndex(index)) + throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + index); + if (DEBUG) + printlnd("Set value ri=" + index + + " len=" + length + + " bytes=" + Arrays.toString(Arrays.copyOfRange(bytes, 0, Math.min(10, length)))); + int len; + /* + * The limit for the cluster stream is (1<18)-1 but this avoids the + * conversion to big cluster. + */ + if (length > ClusterTraitsSmall.VALUE_SIZE_MAX) + len = setValueBig(index, bytes, length); + else + len = setValueSmall(index, bytes, length); + if (DEBUG_STAT) { + ++info.nValues; + info.sValues += len + length; + } + } + +// final void setValue(Change c, byte[] bytes, int length) { +// short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0); +// setValue(ri, bytes, length); +// c.initValue(); +// } + +// final void modiValue(Change c, long voffset, int length, byte[] bytes, int offset) { +// checkInitialization(); +// int ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0); +// if (DEBUG) +// printlnd("Modify value ri=" + ri +// + " voff=" + voffset +// + " vlen=" + length +// + " blen=" + bytes.length +// + " boff=" + offset +// + " bytes=" + Arrays.toString(Arrays.copyOfRange(bytes, 0, Math.min(10, bytes.length)))); +// modiValueBig(ri, voffset, length, bytes, offset); +// c.init(); +// if (DEBUG_STAT) { +// ++info.nValues; +// info.sValues += length; +// } +// } + final void setImmutable(boolean immutable) { + checkInitialization(); +// clusterChange2.setImmutable(immutable); + } + final void undoValueEx(int resourceIndex) { + checkInitialization(); +// clusterChange2.undoValueEx(resourceIndex); + } + final void setDeleted(boolean deleted) { + checkInitialization(); +// clusterChange2.setDeleted(deleted); + } + final void corrupt() { + checkInitialization(); + addByte((byte)0); + } + + public byte[] getBytes() { + byte[] copy = new byte[byteIndex]; + System.arraycopy(bytes, 0, copy, 0, byteIndex); + return copy; + } + + /** + * @param graphSession + * @param clusterId + * @return true if actually flushed something + */ + final boolean flush(/*GraphSession graphSession,*/ ClusterUID clusterUID) { + throw new UnsupportedOperationException(); +// if (byteIndex > 0) { +// if(DebugPolicy.REPORT_CLUSTER_STREAM) +// System.err.println("Flush cluster change set stream " + this); +// setHeaderVectorSize(byteIndex); +// byte[] copy = new byte[byteIndex + HEADER_SIZE]; +// System.arraycopy(header, 0, copy, 0, HEADER_SIZE); +// System.arraycopy(bytes, 0, copy, HEADER_SIZE, byteIndex); +// UpdateClusterFunction updateClusterFunction = new UpdateClusterFunction(copy); +// if (DEBUG_CCS) { +// for (DebugStm stm : debugStms) +// printlnd(stm.toString2()); +// debugStms.clear(); +// } +// if (DEBUG_STAT) { +// info.tot = updateClusterFunction.operation.length; +// printlnd("ReallyFlush: " + info.toString()); +// sum.add(info); +// printlnd("ReallyFlush sum: " + sum.toString()); +// } +// // long start = System.nanoTime(); +// graphSession.updateCluster(updateClusterFunction); +// // long duration = System.nanoTime() - start; +// // duration2 += duration; +// // System.err.println("updateCluster " + 1e-9*duration); +// // System.err.println("updateCluster total " + 1e-9*duration2); +// clear(); +// clusterChange2.flush(graphSession); +// return true; +// } else if (clusterChange2.isDirty()) { +// clusterChange2.flush(graphSession); +// clear(); +// return true; +// } else if (flushed) { +// flushed = false; +// return true; +// } else { +// return true; +// } + } + + final void flushInternal(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); +// flush(graphSession, clusterUID); +// flushed = true; + } + + final class ForeignTable { + private final TLongIntHashMap table = new TLongIntHashMap(); + + private long createKey(short index, long cluster) { + assert (cluster <= (1L << 48) - 1); + return (cluster << 14) | index; + } + + public int get(short index, long cluster) { + int value = table.get(createKey(index, cluster)); + if (DEBUG) + printlnd("ForeignTable get c=" + clusterUID + " i=" + + (value - 1) + " r=" + index + " rc=" + cluster); + return value; + } + + public int put(short index, long cluster, int value) { + if (DEBUG) + printlnd("ForeignTable put c=" + clusterUID + " i=" + + (value - 1) + " r=" + index + " rc=" + cluster); + return table.put(createKey(index, cluster), value); + } + + public int size() { + return table.size(); + } + + public void clear() { + table.clear(); + } + } + + @Override + public int hashCode() { + return 31*clusterUID.hashCode(); + } + + @Override + public boolean equals(Object object) { + if (this == object) + return true; + else if (object == null) + return false; + else if (!(object instanceof ClusterChange)) + return false; + ClusterChange r = (ClusterChange)object; + return r.clusterUID.equals(clusterUID); + } + + public void flush() { + + if(byteIndex > 0) { + + final ClusterUID cuid = clusterUID; + + byte[] block = getBytes(); + byte[] raw = new byte[block.length + 28]; + Bytes.writeLE(raw, 0, 1); + System.arraycopy(cuid.asBytes(), 0, raw, 4, 16); + Bytes.writeLE(raw, 20, block.length); + System.arraycopy(block, 0, raw, 24, block.length); + Bytes.writeLE(raw, 24+block.length, 0); + + ByteBuffer rawBB = ByteBuffer.wrap(raw); + ByteBuffer outputBB = ByteBuffer.allocate(raw.length + raw.length/8); + //outputBB.order(ByteOrder.LITTLE_ENDIAN); + int compressedSize = Compressions.get(Compressions.LZ4).compressBuffer(rawBB, 0, raw.length, outputBB, 0); + + byte[] data_ = null; + if(compressedSize < raw.length) { + data_ = new byte[compressedSize]; + outputBB.get(data_,0,compressedSize); + } else { + data_ = raw; + } + + byte[] data = new byte[data_.length+24]; + Bytes.writeLE(data, 0, 0); + Bytes.writeLE(data, 4, 0); + Bytes.writeLE(data, 8, raw.length); + Bytes.writeLE(data, 12, raw.length); + Bytes.writeLE(data, 16, data_.length); + System.arraycopy(data_, 0, data, 20, data_.length); + Bytes.writeLE(data, 20+data_.length, 0); + + stream.add(Pair.make(clusterUID, data)); + clear(); + initBuffer(); + + } + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java new file mode 100644 index 000000000..472b4d7b7 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java @@ -0,0 +1,70 @@ +package org.simantics.acorn.internal; + + +public class ClusterChange2 { + public static final int VERSION = 2; + public static final byte SET_IMMUTABLE_OPERATION = 1; // + public static final byte UNDO_VALUE_OPERATION = 2; // + private static final int INCREMENT = 1<<10; +// private boolean dirty = false; +// private byte[] bytes; +// private int byteIndex; +// private ClusterUID clusterUID; +// private ClusterImpl cluster; +// ClusterChange2(ClusterUID clusterUID, ClusterImpl cluster) { +// this.clusterUID = clusterUID; +// this.cluster = cluster; +// init(); +// } +// void init() { +//// System.err.println("clusterChange2 dirty " + cluster.clusterId); +// dirty = false; +// bytes = new byte[INCREMENT]; +// byteIndex = 0; +// addInt(0); // Size of byte vector. Set by flush. +// addInt(VERSION); +// byteIndex = clusterUID.toByte(bytes, 8); +// } +// boolean isDirty() { +// return dirty; +// } +// void flush(GraphSession graphSession) { +//// System.err.println("flush2 clusterChange2 " + dirty + this); +// if (!dirty) +// return; +// Bytes.writeLE(bytes, 0, byteIndex - 4); +// byte[] ops = Arrays.copyOf(bytes, byteIndex); +//// System.err.println("flush2 clusterChange2 " + cluster.clusterId + " " + ops.length + " bytes."); +// graphSession.updateCluster(new UpdateClusterFunction(ops)); +// init(); +// } +// void setImmutable(boolean immutable) { +// dirty = true; +// addByte(SET_IMMUTABLE_OPERATION); +// addByte((byte)(immutable ? -1 : 0)); +// } +// void undoValueEx(int resourceIndex) { +// dirty = true; +// addByte(UNDO_VALUE_OPERATION); +// addInt(resourceIndex); +// } +// private final void checkSpace(int len) { +// if (bytes.length - byteIndex > len) +// return; +// bytes = Arrays.copyOf(bytes, bytes.length + len + INCREMENT); +// } +// private final void addByte(byte value) { +// checkSpace(1); +// bytes[byteIndex++] = value; +// } +// private final void addInt(int value) { +// checkSpace(4); +// Bytes.writeLE(bytes, byteIndex, value); +// byteIndex += 4; +// } +//// private void addLong(long value) { +//// checkSpace(8); +//// Bytes.writeLE(bytes, byteIndex, value); +//// byteIndex += 8; +//// } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java new file mode 100644 index 000000000..2b1ae1979 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java @@ -0,0 +1,437 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn.internal; + +import java.util.ArrayList; + +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.ClusterUID; + +final public class ClusterStream { + +// // public static long duration2 = 0; +// + public static final boolean DEBUG = false; + public static final byte NULL_OPERATION = 0; + public static final byte CREATE_OPERATION = 1; + public static final byte SET_OPERATION = 4; + public static final byte MODI_OPERATION = 6; + public static final byte KILL_OPERATION = 7; +// boolean off = false; +// public GraphSession graphSession; +// final SessionImplSocket session; +//// private int flushCount = 0; +// final private boolean alwaysOff; +// private int stamp; +// private int acceptedStamp; +// private boolean dirty = false; +//// final private ArrayList clusterChanges = new ArrayList(); +// +// final ClusterChangeManager changes = new ClusterChangeManager(); +// +//// final TLongObjectHashMap clusterChanges = new TLongObjectHashMap(); +// +// // private final Change lastChange = new Change(); +// ClusterStream(SessionImplSocket session, GraphSession graphSession, +// boolean alwaysOff) { +// this.session = session; +// this.graphSession = graphSession; +// this.alwaysOff = alwaysOff; +// } +// +// +// boolean isDirty() { +// return dirty; +// } +// +// void markDirty() { +// dirty = true; +// } +// +// void setOff(boolean value) { +// if (alwaysOff) { +// off = true; +// } else { +// off = value; +// } +// } +// +// boolean getOff() { +// return off; +// } +// +// void createResource(ClusterChange cc, short operationIndex, ClusterUID clusterUID) { +// if (off) +// return; +// assert (null != cc); +// assert (0 != operationIndex); +// assert (!ClusterUID.Null.equals(clusterUID)); +// if (DEBUG) +// System.out.println("DEBUG: Created resource index=" + operationIndex + " cluster=" + clusterUID); +// cc.createResource(operationIndex); +// } +// +// final void addStatementIndex(Change change, int key, ClusterUID clusterUID, byte op) { +// if (off) +// return; +// assert (key > 0); +// assert (null != change); +// assert (!ClusterUID.Null.equals(clusterUID)); +// change.addStatementIndex(key, clusterUID, op); +// } +// +// void addStatement(ClusterChange cc, Change change) { +// if (off) +// return; +// assert (null != cc); +// assert (null != change); +// cc.addChange(change); +// } +// +// void cancelStatement(Change change) { +// if (off) +// return; +// assert (null != change); +// change.init(); +// } +// +// void removeStatement(ClusterChange cc, Change change, long clusterId) { +// if (off) +// return; +// assert (null != cc); +// assert (null != change); +// cc.addChange(change); +// } +// +// void cancelValue(Change change) { +// if (off) +// return; +// assert (null != change); +// change.init(); +// } +// +// void removeValue(ClusterChange cc, Change change, long clusterId) { +// if (off) +// return; +// // ClusterChange cc = getClusterChange(clusterId); +// assert (null != cc); +// assert (null != change); +// cc.addChange(change); +// } +// +// void setValue(ClusterChange cc, Change change, long clusterId, byte[] bytes, int length) { +// if (off) +// return; +// assert (null != cc); +// assert (null != change); +// // ClusterChange cc = getClusterChange(clusterId); +// cc.setValue(change, bytes, length); +// } +// +// void modiValue(ClusterChange cc, Change change, long clusterId, +// long voffset, int length, byte[] bytes, int offset) { +// assert (null != cc); +// assert (null != change); +// cc.modiValue(change, voffset, length, bytes, offset); +// } +// +// void undoValueEx(ClusterChange cc, Change change, int resourceIndex) { +// cc.undoValueEx(resourceIndex); +// } +// void setImmutable(ClusterChange cc, Change change, long clusterId, boolean immutable) { +// if (off) +// return; +// cc.setImmutable(immutable); +// } +// public void corruptCluster(ClusterChange cc, long clusterId) +// throws DatabaseException { +// if (off) +// return; +// if (DEBUG) +// System.out.println("ClusterStream.corrupt cid=" + clusterId + "."); +// assert (null != cc); +// cc.corrupt(); +// } +// +// int getStamp() { +// return stamp; +// } +// +// void flush() { +// if (off) +// return; +//// flushCount++; +// return; +// } +// +// void flush(long clusterId) { +// if (off) +// return; +// ClusterUID clusterUID = session.clusterTable.clusterIds.getClusterUID(clusterId); +// ArrayList ccs = new ArrayList(); +// for(ClusterChange cc : changes.get()) { +// if(cc.clusterUID.equals(clusterUID)) { +// if (cc.flush(graphSession, cc.clusterUID)) { +// ccs.add(cc); +// if (stamp == acceptedStamp) +// ++stamp; +// } else { +//// System.err.println("kasdi"); +// } +// } +// } +// changes.remove(ccs); +// } +// +// /** +// * @return true if the stream has accepted all changes +// */ +// public boolean reallyFlush() { +// // Last possibility to mark clusters immutable before write only clusters are gone +// session.handleCreatedClusters(); +// // These shall be requested from server +// session.clusterTable.removeWriteOnlyClusters(); +// if (!off && changes.size() > 0) { +// for(ClusterChange cc : changes.get()) { +// if (cc.flush(graphSession, cc.clusterUID)) +// if (stamp == acceptedStamp) +// ++stamp; +// } +// changes.clear(); +// } +// dirty = false; +// return hasAcceptedAllChanges(); +// } +// +// /** +// * Clear all changes and set stream status to empty. +// */ +// public void clear() { +// changes.clear(); +// acceptedStamp = stamp; +// dirty = false; +// } +// +// private boolean hasAcceptedAllChanges() { +// return stamp == acceptedStamp; +// } +// +// void accept() { +// acceptedStamp = stamp; +// } +// +// + + static class DebugInfo { + long nStms; + long nLocal; + long nPartly; + long nForeign; + long nValues; + long sValues; + long sForeign; + long tot; + + void clear() { + nStms = 0; + nLocal = 0; + nPartly = 0; + nForeign = 0; + sForeign = 0; + nValues = 0; + sValues = 0; + tot = 0; + } + + void add(DebugInfo di) { + nStms += di.nStms; + nLocal += di.nLocal; + nPartly += di.nPartly; + nForeign += di.nForeign; + sForeign += di.sForeign; + nValues += di.nValues; + sValues += di.sValues; + tot += di.tot; + } + + @Override + public String toString() { + return "val=" + nValues + " stm=" + nStms + " loc=" + nLocal + + " par=" + nPartly + " ful=" + nForeign + " for=" + + sForeign + " vat=" + sValues + " tot=" + tot; + } + } + + enum StmEnum { + Add(0, (byte) 0), Remove(1, (byte) 0x20); + StmEnum(int ordinal, byte mask) { + this.ordinal = ordinal; + this.mask = mask; + } + + public int ordinal; + private byte mask; + + byte getOrMask() { + return mask; + } + } + + final static class Data { + + final byte mask; // or mask for operation code (don't care bits are zero) + final short bits; // how many bits are reserved for resource index (0,2,4,6) + final int bytes; + + Data(int mask, int bits, ClusterEnum a, ClusterEnum b) { + this.mask = (byte) (mask << bits); + this.bits = (short) bits; + this.bytes = bytes(bits, a, b); + } + + private static int bytes(int bits, ClusterEnum a, ClusterEnum b) { + int left = 6 - bits; + if (a != ClusterEnum.ForeignShort) { + left += 6; + } + if (b != ClusterEnum.ForeignShort) { + left += 6; + } + int bytes = left >>> 3; + if ((left & 7) != 0) + bytes++; + return bytes; + } + + } + + enum ClusterEnum { + Local(0), ForeignShort(1), ForeignLong(2); + public int ordinal; + + ClusterEnum(int ordinal) { + this.ordinal = ordinal; + } + + static Data[][][] maps = new Data[2][3][3]; + static { + // mask: 00000000 + // op: 000000|r12-13 + // p1 + // o1 + // r0-7 + // o2 | p2 | r8-11 + maps[StmEnum.Add.ordinal][Local.ordinal][Local.ordinal] = new Data( + 0, 2, Local, Local); + // mask: 11000000 + // op: 1100 | r10-13 + // p1 + // o for index + // r0-7 + // p2 | ri 8-9 + maps[StmEnum.Add.ordinal][Local.ordinal][ForeignShort.ordinal] = new Data( + 12, 4, Local, ForeignShort); + // mask: 00001000 + // op: 000010 | r12-13 + maps[StmEnum.Add.ordinal][Local.ordinal][ForeignLong.ordinal] = new Data( + 2, 2, Local, ForeignLong); + // mask: 11010000 + // op: 1101 | r10-13 + maps[StmEnum.Add.ordinal][ForeignShort.ordinal][Local.ordinal] = new Data( + 13, 4, ForeignShort, Local); + + // mask: 01000000 + // op: 01 | r8-13 + // p for index + // o for index + // r0-7 + maps[StmEnum.Add.ordinal][ForeignShort.ordinal][ForeignShort.ordinal] = new Data( + 1, 6, ForeignShort, ForeignShort); + // mask: 11100000 + // op: 1110 | r10-13 + maps[StmEnum.Add.ordinal][ForeignShort.ordinal][ForeignLong.ordinal] = new Data( + 14, 4, ForeignShort, ForeignLong); + // mask: 00010000 + // op: 000100 | r12-13 + maps[StmEnum.Add.ordinal][ForeignLong.ordinal][Local.ordinal] = new Data( + 4, 2, ForeignLong, Local); + // mask: 11110000 + // op: 1111 | r10-13 + maps[StmEnum.Add.ordinal][ForeignLong.ordinal][ForeignShort.ordinal] = new Data( + 15, 4, ForeignLong, ForeignShort); + // mask: 00011000 + // op: 000110 | r12-13 + maps[StmEnum.Add.ordinal][ForeignLong.ordinal][ForeignLong.ordinal] = new Data( + 6, 2, ForeignLong, ForeignLong); + + // mask: 00000100 + // op: 000001 | r12-13 + maps[StmEnum.Remove.ordinal][Local.ordinal][Local.ordinal] = new Data( + 1, 2, Local, Local); + // mask: 01100001 + // op: 01100001 + // p1 + // o for index + // r0-7 + // p2 | ri 8-13 + maps[StmEnum.Remove.ordinal][Local.ordinal][ForeignShort.ordinal] = new Data( + 49, 0, Local, ForeignShort); + // mask: 00001100 + // op: 000011 | r12-13 + maps[StmEnum.Remove.ordinal][Local.ordinal][ForeignLong.ordinal] = new Data( + 3, 2, Local, ForeignLong); + // mask: 00100000 + // op: 0010 | r10-13 + maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][Local.ordinal] = new Data( + 2, 4, ForeignShort, Local); + // mask: 10000000 + // op: 10 | r8-13 + maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][ForeignShort.ordinal] = new Data( + 2, 6, ForeignShort, ForeignShort); + // mask: 00110010 + // op: 00110010 + maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][ForeignLong.ordinal] = new Data( + 50, 0, ForeignShort, ForeignLong); + // mask: 00010100 + // op: 000101 | r12-13 + maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][Local.ordinal] = new Data( + 5, 2, ForeignLong, Local); + // mask: 00110011 + // op: 00110011 + maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][ForeignShort.ordinal] = new Data( + 51, 0, ForeignLong, ForeignShort); + // mask: 00011100 + // op: 000111 | r12-13 + maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][ForeignLong.ordinal] = new Data( + 7, 2, ForeignLong, ForeignLong); + } + + static Data getData(StmEnum s, ClusterEnum a, ClusterEnum b) { + return maps[s.ordinal][a.ordinal][b.ordinal]; + // return maps.get(s).get(a).get(b); + } + } + + enum OpEnum { + Create((byte) 52), Set((byte) 53), SetShort((byte) 56), Delete( + (byte) 54), Modify((byte) 55); + OpEnum(byte mask) { + this.mask = mask; + } + + public byte getOrMask() { + return mask; + } + + private byte mask; + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java new file mode 100644 index 000000000..20cd6f462 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java @@ -0,0 +1,351 @@ +package org.simantics.acorn.internal; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.db.Session; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterI; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.service.ClusterUID; + +import gnu.trove.map.hash.TIntObjectHashMap; + +public class ClusterSupport2 implements ClusterSupport, IClusterTable { + + final private static boolean DEBUG = false; + + public ClusterManager impl; + + public TIntObjectHashMap uidCache = new TIntObjectHashMap(); + + public ClusterSupport2(ClusterManager impl) { + this.impl = impl; + } + + @Override + public int createClusterKeyByClusterUID(ClusterUID clusterUID, long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) { + try { + return impl.getClusterByClusterUIDOrMake(clusterUID); + } catch (DatabaseException e) { + e.printStackTrace(); + return null; + } + } + + @Override + public ClusterBase getClusterByClusterId(long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterKey(int clusterKey) { + throw new UnsupportedOperationException(); + } + + ReentrantReadWriteLock uidLock = new ReentrantReadWriteLock(); + ReadLock uidRead = uidLock.readLock(); + WriteLock uidWrite = uidLock.writeLock(); + + @Override + public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException { + + ClusterUID cuid; + + uidRead.lock(); + cuid = uidCache.get(resourceKey >> 12); + uidRead.unlock(); + if(cuid != null) return cuid; + uidWrite.lock(); + cuid = uidCache.get(resourceKey >> 12); + if(cuid == null) { + cuid = impl.getClusterUIDByResourceKeyWithoutMutex(resourceKey); + uidCache.put(resourceKey >> 12, cuid); + } + uidWrite.unlock(); + + return cuid; + + } + + @Override + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) { + try { + return impl.getClusterKeyByClusterUIDOrMakeWithoutMutex(clusterUID); + } catch (IllegalAcornStateException | AcornAccessVerificationException e) { + throw new RuntimeException(e); + } + } + + @Override + public int getClusterKeyByClusterUIDOrMake(long id1, long id2) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByResourceKey(int resourceKey) { + throw new UnsupportedOperationException(); +// return impl.getClusterByResourceKey(resourceKey); + } + + @Override + public long getClusterIdOrCreate(ClusterUID clusterUID) { + return impl.getClusterIdOrCreate(clusterUID); + } + + @Override + public void addStatement(Object cluster) { + // nop + } + + @Override + public void cancelStatement(Object cluster) { + // nop + } + + @Override + public void removeStatement(Object cluster) { + // nop + } + + @Override + public void removeValue(Object cluster) { + // nop + } + + @Override + public void setImmutable(Object cluster, boolean immutable) { + // nop + } + + @Override + public void setDeleted(Object cluster, boolean deleted) { + // TODO Auto-generated method stub + + } + + + + @Override + public void cancelValue(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void setValue(Object cluster, long clusterId, byte[] bytes, + int length) { + // nop + } + + @Override + public void modiValue(Object _cluster, long clusterId, long voffset, + int length, byte[] bytes, int offset) { + // nop + } + + @Override + public void createResource(Object cluster, short resourceIndex, + long clusterId) { + // No op + } + + @Override + public void addStatementIndex(Object cluster, int resourceKey, + ClusterUID clusterUID, byte op) { + // No op + } + + @Override + public void setStreamOff(boolean setOff) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getStreamOff() { + return true; + } + + + private static class ResourceSegment { + public long valueSize; + + public byte[] bytes; + + ResourceSegment(long valueSize, byte[] bytes) { + this.valueSize = valueSize; + this.bytes = bytes; + } + } + + public ResourceSegment getResourceSegment(int resourceIndex, ClusterUID clusterUID, long offset, short size) throws DatabaseException { + if (DEBUG) + System.out.println("DEBUG: getResourceSegment ri=" + resourceIndex + " cid=" + clusterUID + " offset=" + offset + " size=" + size); + + try { + org.simantics.db.Database.Session.ResourceSegment t = impl.getResourceSegment(clusterUID.asBytes(), resourceIndex, offset, size); + return new ResourceSegment(t.getValueSize(), t.getSegment()); + } catch (AcornAccessVerificationException | IllegalAcornStateException e) { + throw new DatabaseException(e); + } + } + + protected byte[] getValueBig(ClusterBase cluster, int resourceIndex, int offset, int length) throws DatabaseException { + + assert(offset == 0); + assert(length == 0); + + ClusterUID clusterUID = cluster.clusterUID; + + try { + return impl.getResourceFile(clusterUID.asBytes(), resourceIndex); + } catch (AcornAccessVerificationException | IllegalAcornStateException e) { + throw new DatabaseException(e); + } + } + + protected InputStream getValueStreamBig(ClusterBase cluster, final int resourceIndex, int offset, int length) throws DatabaseException { + + final ClusterUID clusterUID = cluster.clusterUID; + + if (DEBUG) + System.out.println("DEBUG: getResourceFile ri=" + resourceIndex + " cid=" + clusterUID + " off=" + offset + " len=" + length); + final int IMAX = 0xFFFF; + short slen = (short)Math.min(length != 0 ? length : IMAX, IMAX); + final ResourceSegment s = getResourceSegment(resourceIndex, clusterUID, offset, slen); + if (s.valueSize < 0) + throw new DatabaseException("Failed to get value for resource index=" + resourceIndex + + " cluster=" + clusterUID + " off=" + offset + " len=" + length + " (1)."); + int ilen = (int)slen & 0xFFFF; + assert(s.bytes.length <= ilen); + if (0 == length) { + if (s.valueSize > Integer.MAX_VALUE) + throw new DatabaseException("Failed to get value for resource index=" + resourceIndex + + " cluster=" + clusterUID + " off=" + offset + " len=" + length + + ". Value size=" + s.valueSize + " (2)."); + length = (int)s.valueSize; + } + long rSize = s.valueSize - offset; + if (rSize < length) + throw new DatabaseException("Failed to get value for resource index=" + resourceIndex + + " cluster=" + clusterUID + " off=" + offset + " len=" + length + + ". Value size=" + s.valueSize + " (3)."); + else if (length <= IMAX) + return new ByteArrayInputStream(s.bytes); + + final int finalLength = length; + + return new InputStream() { + + int left = finalLength; + long valueOffset = 0; + int offset = 0; + ResourceSegment _s = s; + + @Override + public int read() throws IOException { + + if(left <= 0) return -1; + + if(offset == _s.bytes.length) { + short slen = (short)Math.min(left, IMAX); + valueOffset += _s.bytes.length; + try { + _s = getResourceSegment(resourceIndex, clusterUID, valueOffset, slen); + } catch (DatabaseException e) { + throw new IOException(e); + } + offset = 0; + } + + left--; + int result = _s.bytes[offset++]; + if(result < 0) result += 256; + return result; + + } + + }; + + } + + @Override + public InputStream getValueStreamEx(int resourceIndex, long clusterId) + throws DatabaseException { + ClusterBase cluster = impl.getClusterByClusterUIDOrMakeProxy(ClusterUID.make(0, clusterId)); + return getValueStreamBig(cluster, resourceIndex, 0, 0); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId) + throws DatabaseException { + ClusterBase cluster = impl.getClusterByClusterUIDOrMakeProxy(ClusterUID.make(0, clusterId)); + return getValueBig(cluster, resourceIndex, 0, 0); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId, long voffset, + int length) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public long getValueSizeEx(int resourceIndex, long clusterId) + throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public int wait4RequestsLess(int limit) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public Session getSession() { + return null; + } + + @Override + public IClusterTable getClusterTable() { + return this; + } + + @Override + public T getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) { + try { + return (T)impl.getClusterByClusterUIDOrMakeProxy(clusterUID); + } catch (DatabaseException e) { + e.printStackTrace(); + return null; + } + } + + @Override + public T getClusterProxyByResourceKey(int resourceKey) { + try { + return impl.getClusterProxyByResourceKey(resourceKey); + } catch (DatabaseException e) { + e.printStackTrace(); + return null; + } + } + + @Override + public int getClusterKeyByUID(long id1, long id2) throws DatabaseException { + return impl.getClusterKeyByUID(id1, id2); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java new file mode 100644 index 000000000..d4381aeca --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java @@ -0,0 +1,86 @@ +package org.simantics.acorn.internal; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.cluster.ClusterImpl; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.lru.ClusterUpdateOperation; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.service.ClusterUID; + +public class ClusterUpdateProcessor extends ClusterUpdateProcessorBase { + + final ClusterSupport support; + final ClusterUpdateOperation info; + private ClusterImpl cluster; + + public ClusterUpdateProcessor(ClusterManager client, ClusterSupport support, byte[] operations, ClusterUpdateOperation info) throws DatabaseException { + super(client, operations); + this.support = support; + this.info = info; + } + + @Override + void create() throws DatabaseException { + cluster.createResource(support); + } + + @Override + void delete(int ri) throws DatabaseException { + + boolean oldValueEx = cluster.isValueEx(ri); + byte[] old = cluster.getValue(ri, support); + if(old != null) cluster.removeValue(ri, support); + info.ccs.oldValueEx.add(oldValueEx ? (byte)1 : 0); + info.ccs.oldValues.add(old); + + } + + @Override + void modify(int resourceKey, long offset, int size, byte[] bytes, int pos) + throws DatabaseException { + + cluster = (ClusterImpl)cluster.modiValueEx(resourceKey, offset, size, bytes, pos, support); + manager.modiFileEx(cluster.getClusterUID(), resourceKey, offset, size, bytes, pos, support); + + } + + @Override + void set(int resourceKey, byte[] bytes, int length) + throws DatabaseException { + + byte[] old = cluster.getValue(resourceKey, support); + boolean oldValueEx = cluster.isValueEx(resourceKey); + cluster = (ClusterImpl)cluster.setValue(resourceKey, valueBuffer, length, support); + info.ccs.oldValueEx.add(oldValueEx ? (byte)1 : 0); + info.ccs.oldValues.add(old); + + } + + @Override + void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) + throws DatabaseException { + + ClusterImpl c = (ClusterImpl)cluster.addRelation(resourceKey, puid, predicateKey, ouid, objectKey, support); + if(c != null) cluster = c; + info.ccs.statementMask.add(c != null ? (byte)1 : 0); + + } + + @Override + void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) + throws DatabaseException { + + boolean modified = cluster.removeRelation(resourceKey, predicateKey, objectKey, support); + info.ccs.statementMask.add(modified ? (byte)1 : 0); + + } + + public ClusterImpl process(ClusterImpl cluster) throws IllegalAcornStateException { + this.cluster = cluster; + process(); + info.finish(); + return this.cluster; + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java new file mode 100644 index 000000000..61a8a8a9d --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java @@ -0,0 +1,31 @@ +package org.simantics.acorn.internal; + +import org.simantics.acorn.cluster.ClusterImpl; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.lru.ClusterUpdateOperation; +import org.simantics.db.impl.ClusterSupport; + +public class ClusterUpdateProcessor2 extends ClusterUpdateProcessorBase2 { + + final ClusterSupport support; + final ClusterUpdateOperation info; + private ClusterImpl cluster; + + public ClusterUpdateProcessor2(ClusterSupport support, byte[] operations, ClusterUpdateOperation info) { + super(operations); + this.support = support; + this.info = info; + } + + public void process(ClusterImpl cluster) throws IllegalAcornStateException { + this.cluster = cluster; + process(); + info.finish(); + } + + @Override + void setImmutable(boolean value) { + cluster.setImmutable(value, support); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java new file mode 100644 index 000000000..cd8130d9c --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java @@ -0,0 +1,476 @@ +package org.simantics.acorn.internal; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.internal.ClusterStream.ClusterEnum; +import org.simantics.acorn.internal.ClusterStream.Data; +import org.simantics.acorn.internal.ClusterStream.StmEnum; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; + +abstract public class ClusterUpdateProcessorBase { + + public final static boolean DEBUG = false; + + final protected ClusterManager manager; + final public byte[] bytes; + private int pos = 0; + final private int len; + final private ClusterUID uid; + final private int clusterKey; + final public int version; + + final Map clusterKeyCache = new HashMap(); + + public int getResourceKey(ClusterUID uid, int index) throws IllegalAcornStateException { + Integer match = clusterKeyCache.get(uid); + if(match != null) return match+index; + int key = manager.getResourceKeyWitoutMutex(uid, 0); + clusterKeyCache.put(uid, key); + return key+index; + } + + + public ClusterUpdateProcessorBase(ClusterManager client, byte[] operations) throws DatabaseException { + this.manager = client; + this.bytes = operations; + this.len = Bytes.readLE4(bytes, 0)+4; // whatta? + version = Bytes.readLE4(bytes, 4); + long cuid1 = Bytes.readLE8(bytes, 8); + long cuid2 = Bytes.readLE8(bytes, 16); + uid = ClusterUID.make(cuid1, cuid2); + pos = 24; + client.clusterLRU.acquireMutex(); + try { + clusterKey = client.clusterLRU.getClusterKeyByUID(cuid1, cuid2) << 12; + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + client.clusterLRU.releaseMutex(); + } + } + + public ClusterUID getClusterUID() { + return uid; + } + + private void processCreate() { + int r = Bytes.readLE2(bytes, pos); + pos+=2; + if(DEBUG) System.err.println("DEBUG: New ri=" + r + " offset=" + (pos-3-24)); + try { + create(); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + private void processDelete() { + + int ri = Bytes.readLE2(bytes, pos); + pos += 2; + + if(DEBUG) System.err.println("DEBUG: Delete " + ri); + + try { + delete(ri); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + private void processModify(int op) { + + int ri = Bytes.readLE2(bytes, pos); + pos+=2; + long offset = Bytes.readLE7(bytes, pos); + pos+=7; + int size = Bytes.readLE2(bytes, pos); + pos+=2; + + offset += (ri>>14) << 56; + ri = ri & 0x3FFF; + + if(size < 0) + throw new IllegalStateException(); + if(ri < 1) + throw new IllegalStateException(); + if(ri > 4095) + throw new IllegalStateException(); + + if(DEBUG) System.err.println("DEBUG: Modify " + ri + " " + offset + " " + size + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,size))); + + try { + modify(clusterKey + ri, offset, size, bytes, pos); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + pos += size; + + } + + private void processSet(int op) { + + int s = Bytes.readLE4(bytes, pos); + int length = (s >> 14); + if(length < 1) + throw new IllegalStateException(); + int r = s & 0x3FFF; + + pos += 4; + System.arraycopy(bytes, pos, valueBuffer, 0, length); + pos += length; + + if(DEBUG) System.err.println("DEBUG: Set " + r + " " + length + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,length))); + + try { + set(clusterKey+r, valueBuffer, length); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + byte[] valueBuffer = new byte[65536]; + + private void processSetShort(int op) { + + int s = Bytes.readLE2(bytes, pos); + int length = ((op&7)<<2) + (s >> 14); + if(length < 1) + throw new IllegalStateException(); + if(length > 31) + throw new IllegalStateException(); + int r = s & 0x3FFF; + + if(DEBUG) System.err.println("DEBUG: SetShort " + r + " " + length + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,length))); + pos += 2; + + System.arraycopy(bytes, pos, valueBuffer, 0, length); + pos += length; + + try { + set(clusterKey+r, valueBuffer, length); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + private void processStatementResource(ClusterEnum enu, int pOrO) { + if(ClusterEnum.ForeignShort == enu) { + int fs = bytes[pos++]&0xff; + foreignRefs[pOrO] = fs; + } else if(ClusterEnum.Local == enu) { + int lo = bytes[pos++]&0xff; + lows[pOrO] = lo; + } else { + long l1 = Bytes.readLE8(bytes, pos); + pos += 8; + long l2 = Bytes.readLE8(bytes, pos); + pos += 8; + ClusterUID cuid = ClusterUID.make(l1, l2); + foreignClusters[foreignPos] = cuid; + int lo = bytes[pos++]&0xff; + foreignIndices[foreignPos] = lo; + foreignRefs[pOrO] = foreignPos; + foreignPos++; + lows[pOrO] = lo; + } + } + + ClusterUID[] foreignClusters = new ClusterUID[256]; + int[] foreignIndices = new int[256]; + int foreignPos = 0; + int lows[] = new int[2]; + int foreignRefs[] = new int[2]; + + private void processStatement(int op, StmEnum stmEnum, ClusterEnum p, ClusterEnum o) throws IllegalAcornStateException { + + int curPos = pos-1-24; + + processStatementResource(p, 0); + processStatementResource(o, 1); + + int ri = bytes[pos++]&0xff; + int pi = 0; + int oi = 0; + + ClusterUID puid = uid; + ClusterUID ouid = puid; + + if(ClusterEnum.ForeignShort == p && ClusterEnum.ForeignShort == o) { + ri |= (op&0x3F) << 8; + } else { + Data data = ClusterEnum.getData(stmEnum, p, o); + // data.left is the amount of bytes in last two bytes + if(data.bytes == 0) { + ri = ri | ((op&0x3F)<<8); + } else { + int extra = 0; + int opBits = data.bits; + int extraBits = 6-opBits; + if(data.bytes == 1) { + extra = bytes[pos++]&0xff; + int high = extra >> extraBits; + if(ClusterEnum.ForeignShort == p) { + oi = lows[1] + (high<<8); + } else { + pi = lows[0] + (high<<8); + } + } else { + extra = Bytes.readLE2(bytes, pos); + pos += 2; + int high1 = (extra >> extraBits)&((1<<6)-1); + int high2 = (extra >> (extraBits+6))&((1<<6)-1); + if(ClusterEnum.ForeignShort == p) { + oi = lows[1] + (high1<<8); + } else { + pi = lows[0] + (high1<<8); + oi = lows[1] + (high2<<8); + } + } + ri = ri | ((extra&((1< 4095) + throw new IllegalStateException(); + if(pi > 4095) + throw new IllegalStateException(); + if(oi > 4095) + throw new IllegalStateException(); + + if(StmEnum.Add == stmEnum) { + + if(DEBUG) + System.err.println("DEBUG: ClusterChange " + uid + ": Add ri=" + ri + " pi=" + pi + " oi=" + oi + " pc=" + puid + " oc=" + ouid + " offset=" + curPos + " " + p.ordinal + " " + o.ordinal); + + int predicateKey = getResourceKey(puid, pi); + int objectKey = getResourceKey(ouid, oi); + try { + claim(clusterKey+ri, predicateKey, objectKey, puid, ouid); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } else { + + if(DEBUG) + System.err.println("DEBUG: ClusterChange " + uid + ": Rem ri=" + ri + " pi=" + pi + " oi=" + oi + " pc=" + puid + " oc=" + ouid + " offset=" + curPos + " " + p.ordinal + " " + o.ordinal); + + int predicateKey = getResourceKey(puid, pi); + int objectKey = getResourceKey(ouid, oi); + try { + deny(clusterKey+ri, predicateKey, objectKey, puid, ouid); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + } + + public void process() throws IllegalAcornStateException { + + foreignPos = 0; + + if(DEBUG) System.err.println("DEBUG: process " + uid + " " + len); + + // op resolution for statement operation: + + // 2 first bits + // op: 01 | r8-13 + // op: 10 | r8-13 + + // 3 first bits (000) + // op: 000000 | r12-13 + // op: 000001 | r12-13 + // op: 000010 | r12-13 + // op: 000011 | r12-13 + // op: 000100 | r12-13 + // op: 000101 | r12-13 + // op: 000110 | r12-13 + // op: 000111 | r12-13 + + // 4 first bits + // op: 1100 | r10-13 + // op: 1101 | r10-13 + // op: 1110 | r10-13 + // op: 1111 | r10-13 + // op: 0010 | r10-13 + + // 6 bits + // op: 00110001 = 49 + // op: 00110010 = 50 + // op: 00110011 = 51 + // other: 0011xxxx + + while(pos < len) { + + int op = bytes[pos++]&0xff; + + // common prefix: 0011 + switch(op) { + + case 49: + processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.ForeignShort); + break; + case 50: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.ForeignLong); + break; + case 51: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.ForeignShort); + break; + // 52 = 32+16+4 = 00110100 + case 52: + processCreate(); + break; + // 53 = 32+16+4+1 = 00110101 + case 53: + processSet(op); + break; + // 54 = 32+16+4+2 = 00110110 + case 54: + processDelete(); + break; + // 55 = 32+16+4+2+1 = 00110111 + case 55: + processModify(op); + break; + default: + + int bits6 = ((int)op)&0xC0; + switch(bits6) { + + case 0x40: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.ForeignShort); + break; + case 0x80: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.ForeignShort); + break; + default: + + int bits5 = ((int)op)&0xE0; + if(bits5 == 0) { + + int bits2 = (((int)op)&0xFC) >> 2; + + // 3 top bits are 0 + // 6 bits of op + + switch(bits2) { + + case 0: + processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.Local); + break; + case 1: + processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.Local); + break; + case 2: + processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.ForeignLong); + break; + case 3: + processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.ForeignLong); + break; + case 4: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.Local); + break; + case 5: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.Local); + break; + case 6: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.ForeignLong); + break; + case 7: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.ForeignLong); + break; + + } + + } else { + + // 4 top bits of op + // 4 low bits of payload + + int bits4 = (((int)op)&0xF0)>>4; + switch(bits4) { + case 0b1100: + processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.ForeignShort); + break; + case 0b1101: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.Local); + break; + case 0b1110: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.ForeignLong); + break; + case 0b1111: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.ForeignShort); + break; + case 0b0010: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.Local); + break; + case 0b0011: + int bits3 = (((int)op)&0xF8)>>3; + if(bits3 == 7) + processSetShort(op); + break; + } + + } + + } + + } + + } + + } + + + abstract void create() throws DatabaseException; + abstract void delete(int resourceIndex) throws DatabaseException; + abstract void modify(int resourceKey, long offset, int size, byte[] bytes, int pos) throws DatabaseException; + abstract void set(int resourceKey, byte[] bytes, int length) throws DatabaseException; + + abstract void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) throws DatabaseException; + abstract void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) throws DatabaseException; + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java new file mode 100644 index 000000000..502729c0b --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java @@ -0,0 +1,62 @@ +package org.simantics.acorn.internal; + +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; + +public abstract class ClusterUpdateProcessorBase2 { + + final private byte[] bytes; + private int pos = 0; + final private int len; + final private ClusterUID uid; + + public ClusterUpdateProcessorBase2(byte[] operations) { + this.bytes = operations; + this.len = Bytes.readLE4(bytes, 0) + 4; // whatta? + int version = Bytes.readLE4(bytes, 4); + assert(version == ClusterChange2.VERSION); + long cuid1 = Bytes.readLE8(bytes, 8); + long cuid2 = Bytes.readLE8(bytes, 16); + pos = 24; + uid = ClusterUID.make(cuid1, cuid2); + } + + public ClusterUID getClusterUID() { + return uid; + } + + private void processSetImmutable(int op) { + int value = bytes[pos++]&0xff; + setImmutable(value > 0); + } + + private void processUndoValue(int op) { + Bytes.readLE4(bytes, pos); + pos+=4; + } + + public void process() throws IllegalAcornStateException { + + while(pos < len) { + + int op = bytes[pos++]&0xff; + + switch(op) { + + case ClusterChange2.SET_IMMUTABLE_OPERATION: + processSetImmutable(op); + break; + case ClusterChange2.UNDO_VALUE_OPERATION: + processUndoValue(op); + break; + default: + throw new IllegalAcornStateException("Can not process cluster " + uid); + + } + } + } + + abstract void setImmutable(boolean value); + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java new file mode 100644 index 000000000..d694abe83 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java @@ -0,0 +1,19 @@ +package org.simantics.acorn.internal; + + +/** + * @author Antti Villberg + */ +public final class DebugPolicy { + + public static final boolean REPORT_RESOURCE_ID_ALLOCATION = false; + public static final boolean REPORT_CLUSTER_ID_ALLOCATION = false; + public static final boolean REPORT_CLUSTER_EVENTS = false; + public static final boolean REPORT_CLUSTER_LOADING = false; + public static final boolean REPORT_CLUSTER_LOADING_STACKS = false; + public static final boolean REPORT_CLUSTER_STREAM = false; + public static final boolean CLUSTER_COLLECTION = false; + public static final boolean LOG_SERVER_EVENTS = false; + public static final boolean SHOW_SERVER_EVENTS = false; // Requires LOG_SERVER_EVENTS to be true. + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java new file mode 100644 index 000000000..8b3e4f066 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java @@ -0,0 +1,114 @@ +package org.simantics.acorn.internal; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.lru.ClusterChangeSet; +import org.simantics.acorn.lru.ClusterStreamChunk; +import org.simantics.acorn.lru.ClusterChangeSet.Entry; +import org.simantics.acorn.lru.ClusterChangeSet.Type; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.ClusterUID; + +public class UndoClusterUpdateProcessor extends ClusterUpdateProcessorBase { + + public final static boolean DEBUG = false; + + final private ClusterChangeSet ccs; + + private int oldValuesIndex = 0; + private int statementMaskIndex = 0; + + final public List entries = new ArrayList(); + + public UndoClusterUpdateProcessor(ClusterManager client, ClusterStreamChunk chunk, ClusterChangeSet ccs) throws DatabaseException { + super(client, readOperation(client, chunk, ccs)); + this.ccs = ccs; + } + + private static byte[] readOperation(ClusterManager manager, ClusterStreamChunk chunk, ClusterChangeSet ccs) throws AcornAccessVerificationException, IllegalAcornStateException { + +// ClusterStreamChunk chunk; +// manager.streamLRU.acquireMutex(); +// try { +// chunk = ccs.getChunk(manager); +// } catch (Throwable t) { +// throw new IllegalStateException(t); +// } finally { +// manager.streamLRU.releaseMutex(); +// } +// +// chunk.acquireMutex(); +// try { +// chunk.ve + chunk.makeResident(); + return chunk.getOperation(ccs.chunkOffset); +// } catch (Throwable t) { +// throw new IllegalStateException(t); +// } finally { +// chunk.releaseMutex(); +// } + } + + @Override + void create() throws DatabaseException { + } + + @Override + void delete(int ri) throws DatabaseException { + + byte[] old = ccs.oldValues.get(oldValuesIndex); + boolean oldValueEx = ccs.oldValueEx.get(oldValuesIndex) > 0; + oldValuesIndex++; + + if(old != null) { + entries.add(new Entry(ri, oldValueEx, old, null)); + } + + } + + @Override + void modify(int resourceKey, long offset, int size, byte[] bytes, int pos) + throws DatabaseException { + + } + + @Override + void set(int resourceKey, byte[] bytes, int length) + throws DatabaseException { + + byte[] old = ccs.oldValues.get(oldValuesIndex); + boolean oldValueEx = ccs.oldValueEx.get(oldValuesIndex) > 0; + oldValuesIndex++; + + entries.add(new Entry(resourceKey, oldValueEx, old, Arrays.copyOf(valueBuffer, length))); + + } + + @Override + void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) + throws DatabaseException { + + boolean add = ccs.statementMask.get(statementMaskIndex++) > 0; + if(add) { + entries.add(new Entry(Type.ADD, resourceKey, puid, predicateKey & 0xFFF, ouid, objectKey & 0xFFF)); + } + + } + + @Override + void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) + throws DatabaseException { + + boolean remove = ccs.statementMask.get(statementMaskIndex++) > 0; + if(remove) { + entries.add(new Entry(Type.REMOVE, resourceKey, puid, predicateKey & 0xFFF, ouid, objectKey & 0xFFF)); + } + + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java new file mode 100644 index 000000000..8a32ef230 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java @@ -0,0 +1,23 @@ +package org.simantics.acorn.lru; + +public class AccessTime { + + private long last = 0; + + private static AccessTime INSTANCE = new AccessTime(); + + private AccessTime() { + + } + + public static AccessTime getInstance() { + return INSTANCE; + } + + public synchronized long getAccessTime() { + long result = System.nanoTime(); + last = Math.max(result, last+1); + return last; + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java new file mode 100644 index 000000000..a2c489901 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java @@ -0,0 +1,160 @@ +package org.simantics.acorn.lru; + +import java.io.InputStream; + +import org.simantics.db.Session; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.service.ClusterUID; + +public class CachingClusterSupport implements ClusterSupport { + + private ClusterSupport backend; + + public CachingClusterSupport(ClusterSupport backend) { + this.backend = backend; + } + + @Override + public int createClusterKeyByClusterUID(ClusterUID clusterUID, long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterId(long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterKey(int clusterKey) { + throw new UnsupportedOperationException(); + } + + @Override + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public int getClusterKeyByClusterUIDOrMake(long id1, long id2) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByResourceKey(int resourceKey) { + throw new UnsupportedOperationException(); + } + + @Override + public long getClusterIdOrCreate(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public void addStatement(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void cancelStatement(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeStatement(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void cancelValue(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeValue(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void setValue(Object cluster, long clusterId, byte[] bytes, int length) { + throw new UnsupportedOperationException(); + } + + @Override + public void modiValue(Object cluster, long clusterId, long voffset, int length, byte[] bytes, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public void setImmutable(Object cluster, boolean immutable) { + throw new UnsupportedOperationException(); + } + + @Override + public void setDeleted(Object cluster, boolean deleted) { + throw new UnsupportedOperationException(); + } + + @Override + public void createResource(Object cluster, short resourceIndex, long clusterId) { + backend.createResource(cluster, resourceIndex, clusterId); + } + + @Override + public void addStatementIndex(Object cluster, int resourceKey, ClusterUID clusterUID, byte op) { + throw new UnsupportedOperationException(); + } + + @Override + public void setStreamOff(boolean setOff) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getStreamOff() { + throw new UnsupportedOperationException(); + } + + @Override + public InputStream getValueStreamEx(int resourceIndex, long clusterId) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId, long voffset, int length) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public long getValueSizeEx(int resourceIndex, long clusterId) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public int wait4RequestsLess(int limit) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public Session getSession() { + throw new UnsupportedOperationException(); + } + + @Override + public IClusterTable getClusterTable() { + throw new UnsupportedOperationException(); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java new file mode 100644 index 000000000..a730e136d --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java @@ -0,0 +1,113 @@ +package org.simantics.acorn.lru; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; + +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.db.service.Bytes; +import org.simantics.utils.datastructures.Pair; + +import gnu.trove.list.array.TByteArrayList; + +public class ChangeSetInfo extends LRUObject { + + private byte[] metadataBytes; + private ArrayList clusterChangeSetIds; + + // Stub + public ChangeSetInfo(LRU LRU, Path readDir, Long revision, int offset, int length) throws AcornAccessVerificationException { + super(LRU, revision, readDir, "clusterStream", offset, length, false, false); + LRU.map(this); + } + + // New + public ChangeSetInfo(LRU LRU, Long revision, byte[] bytes, ArrayList clusterChangeSetIds) throws AcornAccessVerificationException { + super(LRU, revision, LRU.getDirectory(), "clusterStream", true, true); + this.metadataBytes = bytes; + this.metadataBytes = bytes; + this.clusterChangeSetIds = clusterChangeSetIds; + LRU.insert(this, accessTime); + } + + public ArrayList getCSSIds() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + return clusterChangeSetIds; + } + + public byte[] getMetadataBytes() throws AcornAccessVerificationException, IllegalAcornStateException { + if(VERIFY) + verifyAccess(); + + makeResident(); + return metadataBytes; + } + + private static void writeLE(TByteArrayList bytes, int value) { + + bytes.add( (byte) (value & 0xFF)); + bytes.add((byte) ((value >>> 8) & 0xFF)); + bytes.add((byte) ((value >>> 16) & 0xFF)); + bytes.add((byte) ((value >>> 24) & 0xFF)); + + } + + @Override + protected Pair toBytes() { + + TByteArrayList result = new TByteArrayList(); + writeLE(result, metadataBytes.length); + result.add(metadataBytes); + writeLE(result, clusterChangeSetIds.size()); + for(String id : clusterChangeSetIds) { + byte[] bb = id.getBytes(); + writeLE(result, bb.length); + result.add(bb); + } + + release(); + + byte[] ret = result.toArray(); + + return Pair.make(ret, ret.length); + + } + + @Override + void release() { + clusterChangeSetIds = null; + metadataBytes = null; + } + + @Override + public void fromFile(byte[] data) { + + clusterChangeSetIds = new ArrayList(); + + int metadataLength = Bytes.readLE4(data, 0); + metadataBytes = Arrays.copyOfRange(data, 4, 4+metadataLength); + int offset = 4+metadataLength; + int numberOfChangeSets = Bytes.readLE4(data, offset); + offset += 4; + for(int i=0;i oldValues = new ArrayList(); + + public ClusterChangeSet(String id ,ClusterUID cuid) { + this.id = id; + this.cuid = cuid; + String[] ss = id.split("\\."); + chunkKey = ss[0]; + chunkOffset = Integer.parseInt(ss[1]); + } + + public ClusterStreamChunk getChunk(ClusterManager manager) throws AcornAccessVerificationException { + return manager.streamLRU.get(chunkKey); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java new file mode 100644 index 000000000..57dfe9f41 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java @@ -0,0 +1,342 @@ +package org.simantics.acorn.lru; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Path; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.Persistable; +import org.simantics.acorn.cluster.ClusterImpl; +import org.simantics.acorn.cluster.ClusterSmall; +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.cluster.ClusterImpl.ClusterTables; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.compressions.CompressionCodec; +import org.simantics.compressions.Compressions; +import org.simantics.db.ClusterCreator; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.exception.SDBException; +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.datastructures.Pair; + +public class ClusterInfo extends LRUObject implements Persistable { + + final private ClusterManager manager; + private ClusterImpl cluster; + public int changeSetId; + private ClusterUpdateState updateState; + + public static final String COMPRESSION = "LZ4"; + + // Stub + public ClusterInfo(ClusterManager manager, LRU LRU, Path readDirectory, ClusterUID uid, int offset, int length) throws AcornAccessVerificationException { + super(LRU, uid, readDirectory, uid.toString() + ".cluster", offset, length, false, false); + this.manager = manager; + this.cluster = null; + LRU.map(this); + } + + // New + public ClusterInfo(ClusterManager manager, LRU LRU, ClusterImpl cluster) throws AcornAccessVerificationException, IllegalAcornStateException { + super(LRU, cluster.getClusterUID(), LRU.getDirectory(), cluster.getClusterUID().toString() + ".cluster", true, true); + this.manager = manager; + this.cluster = cluster; + LRU.insert(this, accessTime); + LRU.swap(getKey()); + } + + public T clone(ClusterUID uid, ClusterCreator creator) throws IOException, AcornAccessVerificationException, IllegalAcornStateException { + + // Updates have been ensured at this point + + acquireMutex(); + + try { + if(isResident()) { + ClusterTables tables = cluster.store(); + return creator.create(uid, tables.bytes, tables.ints, tables.longs); + } + } catch (IOException e) { + throw e; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + releaseMutex(); + } + + // Ensure pending updates here - this may take some time + LRU.waitPending(this, false); + + acquireMutex(); + try { + + if(isResident()) { + ClusterTables tables = cluster.store(); + return creator.create(uid, tables.bytes, tables.ints, tables.longs); + } else { + byte[] data = readFile(); + ClusterTables tables = new ClusterTables(); + loadCluster(getKey(), manager.support, data, tables); + return creator.create(uid, tables.bytes, tables.ints, tables.longs); + } + + } catch (IOException e) { + throw e; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + releaseMutex(); + } + + } + + static class ClusterDecompressor { + + byte[] decompressBuffer = new byte[1024*1024]; + + public synchronized ClusterTables readCluster(ClusterUID uid, byte[] compressed) throws IOException { + + int deflatedSize = Bytes.readLE4(compressed, compressed.length-4); + + if(decompressBuffer.length < deflatedSize) + decompressBuffer = new byte[Math.max(3*decompressBuffer.length / 2, deflatedSize)]; + + CompressionCodec codec = Compressions.get(Compressions.LZ4); + + ByteBuffer input = ByteBuffer.wrap(compressed); + ByteBuffer output = ByteBuffer.wrap(decompressBuffer); + + int decompressedSize = codec.decompressBuffer(input, 0, compressed.length-4, output, 0, decompressBuffer.length); + assert(decompressedSize <= decompressBuffer.length); + + int byteLength = Bytes.readLE4(decompressBuffer, 0); + int intLength = Bytes.readLE4(decompressBuffer, 4); + int longLength = Bytes.readLE4(decompressBuffer, 8); + + byte[] bytes = new byte[byteLength]; + int[] ints = new int[intLength]; + long[] longs = new long[longLength]; + + System.arraycopy(decompressBuffer, 12, bytes, 0, byteLength); + + int offset = 12+byteLength; + for(int i=0;i toBytes() throws IllegalAcornStateException { + try { + byte[] raw = null; + + if(cluster instanceof ClusterSmall) { + raw = cluster.storeBytes(); + } else { + + ClusterTables tables = cluster.store(); + + raw = new byte[12 + tables.bytes.length + (tables.ints.length<<2) + (tables.longs.length<<3)]; + + Bytes.writeLE(raw, 0, tables.bytes.length); + Bytes.writeLE(raw, 4, tables.ints.length); + Bytes.writeLE(raw, 8, tables.longs.length); + + System.arraycopy(tables.bytes, 0, raw, 12, tables.bytes.length); + int offset = 12+tables.bytes.length; + for(int i=0;i { + + final private BijectionMap clusterMapping = new BijectionMap(); + + public ClusterLRU(ClusterManager manager, String identifier, Path writeDir) { + super(manager, identifier, writeDir); + + clusterMapping.map(ClusterUID.make(0,2), clusterMapping.size() + 1); + } + + public ClusterInfo getOrCreate(ClusterUID uid, boolean makeIfNull) throws IllegalAcornStateException, AcornAccessVerificationException { + + try { + + acquireMutex(); + + ClusterInfo info = get(uid); + + if (info == null) { + + if(!makeIfNull) throw new IllegalAcornStateException("Asked for an existing cluster " + uid + " that was not found."); + + Integer clusterKey = clusterMapping.getRight(uid); + if (clusterKey == null) { + clusterKey = clusterMapping.size() + 1; + clusterMapping.map(uid, clusterKey); + } + + info = new ClusterInfo(manager, this, ClusterImpl.make(manager.support, + uid, clusterKey, manager.support)); + + } + + return info; + } catch (IllegalAcornStateException | AcornAccessVerificationException e) { + throw e; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + + releaseMutex(); + + } + + } + + /* + * This method waits - we have no locks here + */ + public void ensureUpdates(ClusterUID uid) throws ClusterDoesNotExistException, AcornAccessVerificationException, IllegalAcornStateException { + + ClusterInfo info = getWithoutMutex(uid); + if(info == null) + throw new ClusterDoesNotExistException("Asked a cluster which does not exist: " + uid); + info.waitForUpdates(); + } + + public ClusterInfo get(ClusterUID uid, boolean makeIfNull, boolean ensureUpdates) throws AcornAccessVerificationException, IllegalAcornStateException { + + if (ensureUpdates) { + try { + ensureUpdates(uid); + } catch (ClusterDoesNotExistException e) { + if (makeIfNull) { + Logger.defaultLogError("For debug purposes, creating cluster which does not exist", e); + } else { + throw new IllegalAcornStateException(e); + } + } + } + return getOrCreate(uid, makeIfNull); + } + + public ClusterInfo get(ClusterUID uid, boolean makeIfNull) throws AcornAccessVerificationException, IllegalAcornStateException { + return get(uid, makeIfNull, true); + } + + public int getResourceKey(ClusterUID uid, int index) throws AcornAccessVerificationException { + + if(VERIFY) verifyAccess(); + + Integer i = clusterMapping.getRight(uid); + if (i == null) { + i = clusterMapping.size() + 1; + clusterMapping.map(uid, i); + } + return (i << 12) + index; + + } + + public int getResourceKeyWithoutMutex(ClusterUID uid, int index) throws IllegalAcornStateException { + + acquireMutex(); + try { + return getResourceKey(uid, index); + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + releaseMutex(); + } + } + + public int createClusterKeyByClusterUID(ClusterUID uid) throws AcornAccessVerificationException { + + if(VERIFY) verifyAccess(); + + Integer i = clusterMapping.getRight(uid); + if (i == null) { + i = clusterMapping.size() + 1; + clusterMapping.map(uid, i); + } + return i; + + } + + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID uid) throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + + int key = createClusterKeyByClusterUID(uid); + return getClusterByClusterKey(key); + + } + + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) throws AcornAccessVerificationException { + + if(VERIFY) verifyAccess(); + + return createClusterKeyByClusterUID(clusterUID); + + } + + public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) throws IllegalAcornStateException, AcornAccessVerificationException { + acquireMutex(); + try { + return getClusterKeyByClusterUIDOrMake(clusterUID); + } catch (AcornAccessVerificationException e) { + throw e; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + releaseMutex(); + } + } + + public ClusterBase getClusterByClusterKey(int clusterKey) throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + + ClusterUID uid = clusterMapping.getLeft(clusterKey); + ClusterInfo info = get(uid, true); + info.acquireMutex(); + try { + return info.getCluster(); + } catch (IllegalAcornStateException | AcornAccessVerificationException e) { + throw e; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + info.releaseMutex(); + } + } + + public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws AcornAccessVerificationException { + + if(VERIFY) verifyAccess(); + + int clusterKey = resourceKey >> 12; + return clusterMapping.getLeft(clusterKey); + + } + + public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws IllegalAcornStateException, AcornAccessVerificationException { + acquireMutex(); + try { + return getClusterUIDByResourceKey(resourceKey); + } finally { + releaseMutex(); + } + } + + @SuppressWarnings("unchecked") + public T getClusterByClusterUIDOrMakeProxy(ClusterUID uid) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { + return (T) getClusterByClusterUIDOrMake(uid); + } + + @SuppressWarnings("unchecked") + public T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + + return (T) getClusterByClusterKey(resourceKey >> 12); + + } + + public int getClusterKeyByUID(long id1, long id2) throws DatabaseException, AcornAccessVerificationException { + + if(VERIFY) verifyAccess(); + + return getClusterKeyByClusterUIDOrMake(ClusterUID.make(id1, id2)); + + } + + public int getClusterKeyByUIDWithoutMutex(long id1, long id2) throws DatabaseException, IllegalAcornStateException { + acquireMutex(); + try { + return getClusterKeyByClusterUIDOrMake(ClusterUID.make(id1, id2)); + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + releaseMutex(); + } + } + + + public static void main(String[] args) throws Exception { + + long start = System.nanoTime(); + + final TIntIntHashMap map = new TIntIntHashMap(0, 0.9f); + + AtomicInteger counter = new AtomicInteger(0); + AtomicBoolean written = new AtomicBoolean(false); + + //final Semaphore ws = new Semaphore(1); + + Thread write = new Thread() { + + @Override + public void run() { + try { + for(int i=0;i<100000000;i++) { + synchronized(map) { +// ws.acquire(); + map.put(i, i); +// ws.release(); + } + //if((i & 0xfffff) == 0) System.err.println("Write " + i); + counter.incrementAndGet(); + } + written.set(true); + } catch (Throwable e) { + e.printStackTrace(); + } + } + + }; + write.start(); + + Thread read = new Thread() { + + @Override + public void run() { + try { + while(!written.get()) { + double r = Math.random(); + double max = counter.get(); + int key = (int)(max*r); + int value = map.get(key); + if(key != value) { + //System.err.println("Read failed " + key + " vs. " + value); + //ws.acquire(); + synchronized(map) { + value = map.get(key); + if(key != value) { + System.err.println("Read failed for real " + key + " vs. " + value); + } + //ws.release(); + } + } + //if((key & 0xfffff) == 0) System.err.println("Read " + key); + } + } catch (Throwable e) { + e.printStackTrace(); + } + } + + }; + read.start(); + + write.join(); + read.join(); + + long duration = System.nanoTime() - start; + System.err.println("took " + 1e-9*duration + "s."); + + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java new file mode 100644 index 000000000..57d6f6a04 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java @@ -0,0 +1,300 @@ +package org.simantics.acorn.lru; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Path; +import java.util.ArrayList; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.Persistable; +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.internal.ClusterChange; +import org.simantics.acorn.internal.UndoClusterUpdateProcessor; +import org.simantics.compressions.CompressionCodec; +import org.simantics.compressions.Compressions; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.Bytes; +import org.simantics.utils.datastructures.Pair; + +import gnu.trove.list.array.TByteArrayList; + +public class ClusterStreamChunk extends LRUObject implements Persistable { + + // 500KB is a fine chunk + private static int MAX_CHUNK_SIZE = 500*1024; + + int size = 0; + final private ClusterManager manager; + private boolean committed = false; + + public int nextToProcess = 0; + + public ArrayList operations = new ArrayList(); + + // Stub + public ClusterStreamChunk(ClusterManager manager, LRU LRU, Path readDir, String id, int offset, int length) throws AcornAccessVerificationException { + super(LRU, id, readDir, "clusterStream", offset, length, false, false); + this.manager = manager; + LRU.map(this); + } + + // Creation + public ClusterStreamChunk(ClusterManager manager, LRU LRU, String id) throws AcornAccessVerificationException { + super(LRU, id, LRU.getDirectory(), "clusterStream", true, true); + this.manager = manager; + LRU.insert(this, accessTime); + } + + public UndoClusterUpdateProcessor getUndoProcessor(ClusterManager clusters, int chunkOffset, String ccsId) throws DatabaseException { + + if(VERIFY) verifyAccess(); + + makeResident(true); + + ClusterUpdateOperation op = operations.get(chunkOffset); + if(op == null) throw new IllegalAcornStateException("Cluster Update Operation " + ccsId + " was not found."); + if(op.ccs == null) throw new IllegalAcornStateException("Cluster ChangeSet " + ccsId + " was not found."); + + UndoClusterUpdateProcessor proc = new UndoClusterUpdateProcessor(clusters, this, op.ccs); + if(proc.version != ClusterChange.VERSION) + return null; + + // This cluster and CCS can still be under preparation => wait + clusters.clusterLRU.ensureUpdates(proc.getClusterUID()); + + proc.process(); + + cancelForceResident(); + + return proc; + + } + + public void addOperation(ClusterUpdateOperation op) throws IllegalAcornStateException { + if(committed) + throw new IllegalAcornStateException("Cannot add operation " + op + " to " + this + " if commited == true"); + operations.add(op); + size += op.data.length; +// if(isCommitted()) { +// LRU.refresh(this); +// } + } + + public byte[] getOperation(int index) { + return operations.get(index).data; + } + + public void commit() { + committed = true; + } + + public boolean isCommitted() { + if(size > MAX_CHUNK_SIZE) committed = true; + return committed; + } + + @Override + public boolean canBePersisted() throws AcornAccessVerificationException { + if(!super.canBePersisted()) return false; + if(!isCommitted()) return false; + for(ClusterUpdateOperation op : operations) { + if(!op.finished) return false; + } + return true; + } + + private static void writeLE(TByteArrayList bytes, int value) { + + bytes.add( (byte) (value & 0xFF)); + bytes.add((byte) ((value >>> 8) & 0xFF)); + bytes.add((byte) ((value >>> 16) & 0xFF)); + bytes.add((byte) ((value >>> 24) & 0xFF)); + + } + + final public static void writeLE8(TByteArrayList bytes, long value) { + + bytes.add( (byte) (value & 0xFF)); + bytes.add((byte) ((value >>> 8) & 0xFF)); + bytes.add((byte) ((value >>> 16) & 0xFF)); + bytes.add((byte) ((value >>> 24) & 0xFF)); + bytes.add((byte) ((value >>> 32) & 0xFF)); + bytes.add((byte) ((value >>> 40) & 0xFF)); + bytes.add((byte) ((value >>> 48) & 0xFF)); + bytes.add((byte) ((value >>> 56) & 0xFF)); + + } + + @Override + protected Pair toBytes() { + + assert(isCommitted()); + + TByteArrayList raw = new TByteArrayList(); + + writeLE(raw, operations.size()); + + for(ClusterUpdateOperation op : operations) { + + writeLE(raw, op.data.length); + raw.add(op.data); + op.data = null; + + writeLE(raw, op.ccs.statementMask.size()); + raw.add(op.ccs.statementMask.toArray()); + writeLE(raw, op.ccs.oldValueEx.size()); + raw.add(op.ccs.oldValueEx.toArray()); + writeLE(raw, op.ccs.oldValues.size()); + + for(byte[] oldValue : op.ccs.oldValues) { + int len = (oldValue != null ? oldValue.length : -1); + writeLE(raw, len); + if(oldValue != null) { + raw.add(oldValue); + } + } + + } + + byte[] raw_ = raw.toArray(); + CompressionCodec codec = Compressions.get(Compressions.LZ4); + ByteBuffer input = ByteBuffer.wrap(raw_); + ByteBuffer output = ByteBuffer.allocate(raw_.length + raw_.length/8); + int compressedSize = codec.compressBuffer(input, 0, raw_.length, output, 0); + + // We append inflated size - cannot prepend since decompression cannot handle offsets in input + final byte[] rawOutput = new byte[compressedSize+4]; + output.get(rawOutput,0,compressedSize); + Bytes.writeLE(rawOutput, compressedSize, raw_.length); + + release(); + + return Pair.make(rawOutput, rawOutput.length); + + } + + @Override + void release() { + + for(ClusterUpdateOperation op : operations) { + op.data = null; + op.ccs = null; + } + + } + + static class StreamDecompressor { + +// byte[] decompressBuffer = new byte[1024*1024]; + + public synchronized byte[] decompressBuffer(byte[] compressed) throws IOException { + + int deflatedSize = Bytes.readLE4(compressed, compressed.length-4); + + byte[] result = new byte[deflatedSize]; + +// if(decompressBuffer.length < deflatedSize) +// decompressBuffer = new byte[Math.max(3*decompressBuffer.length / 2, deflatedSize)]; + + CompressionCodec codec = Compressions.get(Compressions.LZ4); + + ByteBuffer input = ByteBuffer.wrap(compressed); + ByteBuffer output = ByteBuffer.wrap(result); + + int decompressedSize = codec.decompressBuffer(input, 0, compressed.length-4, output, 0, result.length); + assert(decompressedSize == deflatedSize); + + return result; + + } + + + } + + private static StreamDecompressor decompressor = new StreamDecompressor(); + + @Override + public void fromFile(byte[] data_) throws IllegalAcornStateException, AcornAccessVerificationException { + + try { + + byte[] data = decompressor.decompressBuffer(data_); + + operations = new ArrayList(); + + int offset = 0; + int opLen = Bytes.readLE4(data, offset); + offset += 4; + + for(int i=0;i(oldValuesSize); + for(int j=0;j LRU, String id, int size) throws AcornAccessVerificationException { + super(LRU, id, LRU.getDirectory(), id.toString() + ".extFile", true, true); + this.bytes = new TByteArrayList(size); + LRU.insert(this, accessTime); + } + + public byte[] getResourceFile() throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + + makeResident(); + return bytes.toArray(); + } + + + public ResourceSegment getResourceSegment(final byte[] clusterUID, final int resourceIndex, final long segmentOffset, short segmentSize) throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + + makeResident(); + try { + int segSize = segmentSize; + if (segSize < 0) + segSize += 65536; + if (segmentSize == -1) + segSize = Math.min(65535, bytes.size()); + + final long valueSize = bytes.size(); + final byte[] segment = bytes.toArray((int) segmentOffset, segSize); + + return new ResourceSegment() { + + @Override + public long getValueSize() { + return valueSize; + } + + @Override + public byte[] getSegment() { + return segment; + } + + @Override + public int getResourceIndex() { + return resourceIndex; + } + + @Override + public long getOffset() { + return segmentOffset; + } + + @Override + public byte[] getClusterId() { + return clusterUID; + } + }; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } + } + + public void updateData(byte[] newBytes, long offset, long pos, long size) throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + makeResident(); + + if(size == 0) { + bytes.remove((int)offset, (int)(bytes.size()-offset)); + } else { + bytes.fill((int) (offset + size), (int) (offset + size), (byte) 0); + bytes.set((int) offset, newBytes, (int) pos, (int) size); + } + + setDirty(); + + } + + @Override + public Pair toBytes() { + byte[] result = bytes.toArray(); + release(); + return Pair.make(result, result.length); + } + + @Override + protected void release() { + bytes = null; + } + + @Override + public void fromFile(byte[] data) { + bytes = new TByteArrayList(data); + } + + @Override + protected String getExtension() { + return "extFile"; + } + + @Override + protected boolean overwrite() { + return true; + } + +} \ No newline at end of file diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java new file mode 100644 index 000000000..323d66d3d --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java @@ -0,0 +1,590 @@ +package org.simantics.acorn.lru; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.GraphClientImpl2; +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.db.common.utils.Logger; + +/* + * The order rule of synchronization for LRU and LRUObject is: + * § Always lock LRUObject first! + * + */ + +public class LRU> { + + public static boolean VERIFY = true; + + final private long swapTime = 5L*1000000000L; + final private int swapSize = 200; + + final private HashMap map = new HashMap(); + final private TreeMap priorityQueue = new TreeMap(); + + final private Semaphore mutex = new Semaphore(1); + final private String identifier; + + private Path writeDir; + + private Thread mutexOwner; + + public Map pending = new HashMap(); + + protected final ClusterManager manager; + + public LRU(ClusterManager manager, String identifier, Path writeDir) { + this.manager = manager; + this.identifier = identifier; + this.writeDir = writeDir; + resume(); + } + + /* + * Public interface + */ + + public void acquireMutex() throws IllegalAcornStateException { + try { + while(!mutex.tryAcquire(3, TimeUnit.SECONDS)) { + System.err.println("Mutex is taking a long time to acquire - owner is " + mutexOwner); + } + if(VERIFY) + mutexOwner = Thread.currentThread(); + } catch (InterruptedException e) { + throw new IllegalAcornStateException(e); + } + } + + public void releaseMutex() { + mutex.release(); + mutexOwner = null; + } + + public void shutdown() { + if (GraphClientImpl2.DEBUG) + System.err.println("Shutting down LRU writers " + writers); + writers.shutdown(); + try { + writers.awaitTermination(60, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + } + + public void resume() { + writers = new ScheduledThreadPoolExecutor(2, new ThreadFactory() { + + @Override + public Thread newThread(Runnable r) { + return new Thread(r, identifier + " File Writer"); + } + }); + if (GraphClientImpl2.DEBUG) + System.err.println("Resuming LRU writers " + writers); + } + + /* + * This method violates the synchronization order rule between LRU and MapVAlue + * External synchronization is used to ensure correct operation + */ + public void persist(ArrayList state) throws IllegalAcornStateException { + + acquireMutex(); + try { + for (MapValue value : values()) { + value.acquireMutex(); + // for debugging purposes + boolean persisted = false; + try { + // Persist the value if needed + persisted = value.persist(); + } finally { + // WriteRunnable may want to + value.releaseMutex(); + } + // Wait pending if value was actually persisted + waitPending(value, false); + // Take lock again + value.acquireMutex(); + try { + // Record the value + state.add(value.getStateKey()); + } finally { + value.releaseMutex(); + } + } + } catch (IllegalAcornStateException e) { + throw e; + } catch (IOException e) { + throw new IllegalAcornStateException("Unable to waitPending for " + this.identifier, e); + } catch (Throwable t) { + throw new IllegalAcornStateException("Fatal error occured for " + this.identifier, t); + } finally { + releaseMutex(); + } + } + + public MapValue getWithoutMutex(MapKey key) throws AcornAccessVerificationException, IllegalAcornStateException { + + acquireMutex(); + try { + return get(key); + } finally { + releaseMutex(); + } + } + + public MapValue get(MapKey key) throws AcornAccessVerificationException { + + if(VERIFY) verifyAccess(); + + return map.get(key); + } + + public void map(MapValue info) throws AcornAccessVerificationException { + + if(VERIFY) verifyAccess(); + + map.put(info.getKey(), info); + } + + public Collection values() throws AcornAccessVerificationException { + + if(VERIFY) verifyAccess(); + + return map.values(); + } + + public boolean swapForced() throws IllegalAcornStateException, AcornAccessVerificationException { + + acquireMutex(); + + try { + return swap(0, 0, null); + } finally { + releaseMutex(); + } + + } + + public boolean swap(long lifeTime, int targetSize) throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + + return swap(lifeTime, targetSize, null); + } + + /* + * This is called under global lock + */ + public void setWriteDir(Path dir) { + + this.writeDir = dir; + } + + + /* + * Package access + */ + + void insert(MapValue info, long accessTime) throws AcornAccessVerificationException { + + if(VERIFY) verifyAccess(); + + map.put(info.getKey(), info); + priorityQueue.put(accessTime, info.getKey()); + } + + /* + * We have access to ClusterLRU - try to refresh value if available + */ + boolean tryRefresh(MapValue info) throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + + if(!info.tryAcquireMutex()) + return false; + + try { + priorityQueue.remove(info.getLastAccessTime()); + info.accessed(); + map.put(info.getKey(), info); + priorityQueue.put(info.getLastAccessTime(), info.getKey()); + return true; + } finally { + info.releaseMutex(); + } + } + + /* + * We have access to MapValue and no access to clusterLRU + */ + void refresh(MapValue info, boolean needMutex) throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) { + if(!needMutex) verifyAccess(); + info.verifyAccess(); + } + + if(needMutex) + acquireMutex(); + + try { + + priorityQueue.remove(info.getLastAccessTime()); + info.accessed(); + map.put(info.getKey(), info); + priorityQueue.put(info.getLastAccessTime(), info.getKey()); + + } catch (AcornAccessVerificationException e) { + throw e; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + if(needMutex) + releaseMutex(); + } + } + + /* + * Private implementation + */ + + int size() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + return priorityQueue.size(); + } + + boolean swap(MapKey excluded) throws AcornAccessVerificationException, IllegalAcornStateException { + if(VERIFY) verifyAccess(); + return swap(swapTime, swapSize, excluded); + } + + boolean swap(long lifeTime, int targetSize, MapKey excluded) throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + + MapValue valueToSwap = getValueToSwap(lifeTime, targetSize, excluded); + if(valueToSwap != null) { + + if(valueToSwap.tryAcquireMutex()) { + try { + if(valueToSwap.canBePersisted()) { + valueToSwap.persist(); + return true; + } + } catch (Throwable t) { + throw new IllegalAcornStateException(t); + } finally { + valueToSwap.releaseMutex(); + } + } + } + return false; + } + + private MapValue getValueToSwap1(long lifeTime, int targetSize, MapKey excluded) throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + + for(int i=0;i<10;i++) { + + long candidate = getSwapCandidate(lifeTime, targetSize); + if(candidate == 0) return null; + + MapKey key = priorityQueue.remove(candidate); + if(key.equals(excluded)) { + tryRefresh(map.get(key)); + continue; + } + + return map.get(key); + } + return null; + } + + + private MapValue getValueToSwap(long lifeTime, int targetSize, MapKey excluded) throws AcornAccessVerificationException, IllegalAcornStateException { + + if(VERIFY) verifyAccess(); + + for(int i=0;i<10;i++) { + + // Lock LRU and get a candidate + MapValue value = getValueToSwap1(lifeTime, targetSize, excluded); + if(value == null) return null; + + if(value.tryAcquireMutex()) { + + try { + // This may lock the object + if(value.canBePersisted()) + return value; + // Insert back the value + refresh(value, false); + } finally { + value.releaseMutex(); + } + } + } + return null; + } + + private long getSwapCandidate(long lifeTime, int targetSize) throws AcornAccessVerificationException { + + if(VERIFY) verifyAccess(); + + if(priorityQueue.isEmpty()) return 0; + + long currentTime = System.nanoTime(); + Long lowest = priorityQueue.firstKey(); + + if(currentTime - lowest > lifeTime || priorityQueue.size() > targetSize) { + return lowest; + } + + return 0; + + } + + /* + * Tries to persist this object. Can fail if the object cannot be persisted at this time. + * + */ + boolean persist(Object object_) throws AcornAccessVerificationException { + + MapValue object = (MapValue)object_; + + if(VERIFY) object.verifyAccess(); + + if(object.isDirty()) { + // It is possible that this just became unpersistable. Fail here in this case. + if(!object.canBePersisted()) { + return false; + } + + assert(object.isResident()); + + Path f = writeDir.resolve(object.getFileName()); + + WriteRunnable runnable = new WriteRunnable(f, object); + + synchronized(pending) { + WriteRunnable existing = pending.put(object.getKey().toString(), runnable); + assert(existing == null); + } + + writers.execute(runnable); + + object.setResident(false); + object.setDirty(false); + + return true; + + } else if(object.isResident()) { + + object.release(); + object.setResident(false); + return false; + } + return false; + } + + int makeResident(Object object_, boolean keepResident) throws AcornAccessVerificationException, IllegalAcornStateException { + + MapValue object = (MapValue)object_; + + if(VERIFY) object.verifyAccess(); + + try { + object.setForceResident(keepResident); + + if(object.isResident()) { + refresh(object, true); + return 0; + } + + waitPending(object, true); + + byte[] data = object.readFile(); + + object.fromFile(data); + object.setResident(true); + + acquireMutex(); + try { + refresh(object, false); + swap(swapTime, swapSize, object.getKey()); + } finally { + releaseMutex(); + } + return data.length; + } catch (IOException e) { + throw new IllegalAcornStateException("Unable to makeResident " + identifier, e); + } + } + + static int readCounter = 0; + static int writeCounter = 0; + + ScheduledThreadPoolExecutor writers; + + void waitPending(MapValue value, boolean hasMutex) throws IOException, AcornAccessVerificationException, IllegalAcornStateException { + + WriteRunnable runnable = null; + boolean inProgress = false; + synchronized(pending) { + runnable = pending.get(value.getKey().toString()); + if(runnable != null) { + synchronized(runnable) { + if(runnable.committed) { + // just being written - just need to wait + inProgress = true; + } else { + runnable.committed = true; + // we do the writing + } + } + } + } + if(runnable != null) { + if(inProgress) { +// System.err.println("reader waits for WriteRunnable to finish"); + try { + if(hasMutex) { + runnable.borrowMutex = true; + } + runnable.s.acquire(); + } catch (InterruptedException e) { + throw new IllegalAcornStateException(e); + } + } else { +// System.err.println("reader took WriteRunnable"); + runnable.runReally(hasMutex); + } + } + } + + public class WriteRunnable implements Runnable { + + private Path bytes; + private MapValue impl; + private boolean committed = false; + private boolean borrowMutex = false; + private Semaphore s = new Semaphore(0); + + WriteRunnable(Path bytes, MapValue impl) { + this.bytes = bytes; + this.impl = impl; + } + + @Override + public void run() { + try { + synchronized(impl) { + + synchronized(this) { + + if(committed) + return; + + committed = true; + } + runReally(false); + } + } catch (Throwable t) { + if (t instanceof IllegalAcornStateException) { + manager.notSafeToMakeSnapshot((IllegalAcornStateException)t); + } else { + manager.notSafeToMakeSnapshot(new IllegalAcornStateException(t)); + } + t.printStackTrace(); + Logger.defaultLogError(t); + } + } + + public void runWithMutex() throws IOException, IllegalAcornStateException, AcornAccessVerificationException { + + try { + // These have been set in method persist + assert (!impl.isResident()); + assert (!impl.isDirty()); + + impl.toFile(bytes); + } finally { + synchronized (pending) { + pending.remove(impl.getKey().toString()); + s.release(Integer.MAX_VALUE); + } + } + + } + + // Fix WriteRunnable.runReally() to use LRU.MapValue mutex instead of + // borrowMutex + public void runReally(boolean hasMutex) throws IOException, IllegalAcornStateException, AcornAccessVerificationException { + + if (hasMutex) { + + runWithMutex(); + + } else { + + boolean gotMutex = impl.tryAcquireMutex(); + + boolean done = false; + while (!done) { + + if (gotMutex || borrowMutex) { + runWithMutex(); + done = true; + } else { + System.err.println("Retry mutex acquire"); + gotMutex = impl.tryAcquireMutex(); + } + + } + + if (gotMutex) + impl.releaseMutex(); + + } + + } + } + + public Path getDirectory() { + return writeDir; + } + + /* + * Protected implementation + * + */ + + protected void verifyAccess() throws AcornAccessVerificationException { + if (mutex.availablePermits() != 0) + throw new AcornAccessVerificationException("identifier=" + identifier + " mutex has " + mutex.availablePermits() + " available permits, should be 0! Current mutexOwner is " + mutexOwner); + } + + /* + * Private implementation + * + */ + + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java new file mode 100644 index 000000000..3194d591e --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java @@ -0,0 +1,247 @@ +package org.simantics.acorn.lru; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +import org.simantics.acorn.FileIO; +import org.simantics.acorn.Persistable; +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.utils.datastructures.Pair; + +public abstract class LRUObject> implements Persistable { + + public static boolean VERIFY = true; + + // Final stuff + final protected LRU LRU; + final private Semaphore mutex = new Semaphore(1); + final private MapKey key; + final private String fileName; + + // Mutable stuff + protected long accessTime = AccessTime.getInstance().getAccessTime(); + private int offset; + private int length; + private boolean resident = true; + private boolean dirty = true; + private boolean forceResident = false; + + // DEBUG +// private boolean isForceResidentSetAfterLastGet = false; + + private Path readDirectory; + + private Thread mutexOwner; + + // for loading + public LRUObject(LRU LRU, MapKey key, Path readDirectory, String fileName, int offset, int length, boolean dirty, boolean resident) { + this.LRU = LRU; + this.key = key; + this.fileName = fileName; + this.offset = offset; + this.length = length; + this.readDirectory = readDirectory; + this.dirty = dirty; + this.resident = resident; + } + + // for creating + public LRUObject(LRU LRU, MapKey key, Path readDirectory, String fileName, boolean dirty, boolean resident) { + this(LRU, key, readDirectory, fileName, -1, -1, dirty, resident); + } + + /* + * Public interface + */ + public MapKey getKey() { + // This can be called without mutex + return key; + } + + public void acquireMutex() throws IllegalAcornStateException { + try { + while(!mutex.tryAcquire(3, TimeUnit.SECONDS)) { + System.err.println("Mutex is taking a long time to acquire - owner is " + mutexOwner); + } + + if(VERIFY) + mutexOwner = Thread.currentThread(); + + } catch (InterruptedException e) { + throw new IllegalAcornStateException(e); + } + } + + public boolean tryAcquireMutex() { + return mutex.tryAcquire(); + } + + public void releaseMutex() { + mutex.release(); + } + + @Override + public void toFile(Path bytes) throws IOException { + if(VERIFY) { + try { + verifyAccess(); + } catch (AcornAccessVerificationException e) { + throw new IOException("Exception occured during toFile for file " + fileName, e); + } + } + try { + Pair pair = toBytes(); + byte[] data = pair.first; + int length = pair.second; + FileIO fio = FileIO.get(bytes); + int offset = fio.saveBytes(data, length, overwrite()); + setPosition(offset, length); + } catch (AcornAccessVerificationException | IllegalAcornStateException e) { + throw new IOException("Exception occured during toFile for file " + fileName, e); + } + } + + public int makeResident() throws AcornAccessVerificationException, IllegalAcornStateException { + if(VERIFY) verifyAccess(); + return LRU.makeResident(this, false); + } + + public int makeResident(boolean keepResident) throws AcornAccessVerificationException, IllegalAcornStateException { + if(VERIFY) verifyAccess(); + return LRU.makeResident(this, true); + } + + /* + * Package implementation details + */ + + abstract void release(); + abstract String getExtension(); + + String getStateKey() throws IllegalAcornStateException, AcornAccessVerificationException { + String result = getKey().toString() + "#" + getDirectory().getFileName() + "#" + getOffset() + "#" + getLength(); + if(offset == -1) + throw new IllegalAcornStateException(result); + return result; + } + + long getLastAccessTime() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + return accessTime; + } + + void accessed() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + accessTime = AccessTime.getInstance().getAccessTime(); + } + + boolean persist() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + if(LRU.persist(this)) { + readDirectory = LRU.getDirectory(); + return true; + } else { + return false; + } + } + + void setForceResident(boolean value) throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + forceResident = value; +// isForceResidentSetAfterLastGet = true; + } + + boolean canBePersisted() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); +// isForceResidentSetAfterLastGet = false; + return !forceResident; + } + + boolean isDirty() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + return dirty; + } + + boolean isResident() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + return resident; + } + + String getFileName() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + return fileName; + } + + void setResident(boolean value) throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + resident = value; + } + + void setDirty(boolean value) throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + dirty = value; + } + + byte[] readFile() throws IOException, AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + Path dir = getDirectory(); + Path f = dir.resolve(getFileName()); + FileIO fio = FileIO.get(f); + return fio.readBytes(getOffset(), getLength()); + } + + /* + * Protected implementation details + */ + + abstract protected boolean overwrite(); + + abstract protected Pair toBytes() throws IllegalAcornStateException; + + protected void setDirty() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + dirty = true; + } + + protected void verifyAccess() throws AcornAccessVerificationException { + if (mutex.availablePermits() != 0) + throw new AcornAccessVerificationException("fileName=" + fileName + " mutex has " + mutex.availablePermits() + " available permits, should be 0! Current mutexOwner is " + mutexOwner); + } + + protected synchronized void cancelForceResident() throws AcornAccessVerificationException { + setForceResident(false); + } + + /* + * Private implementation details + */ + + private int getOffset() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + return offset; + } + + private int getLength() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + return length; + } + + private void setPosition(int offset, int length) throws AcornAccessVerificationException, IllegalAcornStateException { + if(VERIFY) verifyAccess(); + if(offset == -1) + throw new IllegalAcornStateException("offset == -1 for " + fileName + " in " + readDirectory.toAbsolutePath() + ", dirty=" + dirty + ", resident=" + resident + ", forceResident=" + forceResident); + this.offset = offset; + this.length = length; + if(overwrite() && offset > 0) + throw new IllegalAcornStateException("overwrite() == true && offset > 0 for " + fileName + " in " + readDirectory.toAbsolutePath() + ", dirty=" + dirty + ", resident=" + resident + ", forceResident=" + forceResident); + } + + private Path getDirectory() throws AcornAccessVerificationException { + if(VERIFY) verifyAccess(); + return readDirectory; + } + +} \ No newline at end of file diff --git a/bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java b/bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java new file mode 100644 index 000000000..0fb29333b --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java @@ -0,0 +1,73 @@ +package org.simantics.db.javacore; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; + +import org.simantics.acorn.exception.InvalidHeadStateException; + +public class HeadState implements Serializable { + + private static final long serialVersionUID = -4135031566499790077L; + + public int headChangeSetId = 0; + public long transactionId = 1; + public long reservedIds = 3; + + public ArrayList clusters = new ArrayList<>(); + public ArrayList files = new ArrayList<>(); + public ArrayList stream = new ArrayList<>(); + public ArrayList cs = new ArrayList<>(); +// public ArrayList ccs = new ArrayList(); + + public static HeadState load(Path directory) throws InvalidHeadStateException { + Path f = directory.resolve("head.state"); + try { + byte[] bytes = Files.readAllBytes(f); + MessageDigest sha1 = MessageDigest.getInstance("SHA-1"); + int digestLength = sha1.getDigestLength(); + sha1.update(bytes, digestLength, bytes.length - digestLength); + byte[] newChecksum = sha1.digest(); + if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) { + throw new InvalidHeadStateException( + "Checksum " + Arrays.toString(newChecksum) + " does not match excpected " + + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + f.toAbsolutePath()); + } + try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes, digestLength, bytes.length - digestLength))) { + HeadState state = (HeadState) ois.readObject(); + return state; + } + } catch (IOException i) { + return new HeadState(); + } catch (ClassNotFoundException c) { +// throw new Error("HeadState class not found", c); + return new HeadState(); + } catch (NoSuchAlgorithmException e) { + throw new Error("SHA-1 Algorithm not found", e); + } + } + + public static void validateHeadStateIntegrity(Path headState) throws InvalidHeadStateException, IOException { + try { + byte[] bytes = Files.readAllBytes(headState); + MessageDigest sha1 = MessageDigest.getInstance("SHA-1"); + int digestLength = sha1.getDigestLength(); + sha1.update(bytes, digestLength, bytes.length - digestLength); + byte[] newChecksum = sha1.digest(); + if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) { + throw new InvalidHeadStateException( + "Checksum " + Arrays.toString(newChecksum) + " does not match excpected " + + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + headState.toAbsolutePath()); + } + } catch (NoSuchAlgorithmException e) { + throw new Error("SHA-1 digest not found, should not happen", e); + } + } +} diff --git a/bundles/org.simantics.annotation.ui/META-INF/MANIFEST.MF b/bundles/org.simantics.annotation.ui/META-INF/MANIFEST.MF index e962c736e..9435da36d 100644 --- a/bundles/org.simantics.annotation.ui/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.annotation.ui/META-INF/MANIFEST.MF @@ -16,7 +16,8 @@ Require-Bundle: org.eclipse.ui, org.simantics.browsing.ui.model;bundle-version="1.0.0", org.simantics.modeling.ui;bundle-version="1.1.1", org.simantics.graph.db;bundle-version="1.1.9", - org.simantics.views.swt.client;bundle-version="1.0.0" + org.simantics.views.swt.client;bundle-version="1.0.0", + org.slf4j.api;bundle-version="1.7.20" Bundle-ActivationPolicy: lazy Bundle-RequiredExecutionEnvironment: JavaSE-1.8 Bundle-Vendor: VTT Technical Research Centre of Finland diff --git a/bundles/org.simantics.annotation.ui/src/org/simantics/annotation/ui/SCL.java b/bundles/org.simantics.annotation.ui/src/org/simantics/annotation/ui/SCL.java index 7a7977f33..f4f99997c 100644 --- a/bundles/org.simantics.annotation.ui/src/org/simantics/annotation/ui/SCL.java +++ b/bundles/org.simantics.annotation.ui/src/org/simantics/annotation/ui/SCL.java @@ -34,7 +34,6 @@ import org.simantics.db.VirtualGraph; import org.simantics.db.WriteGraph; import org.simantics.db.common.request.UnaryRead; import org.simantics.db.common.request.WriteRequest; -import org.simantics.db.common.utils.Logger; import org.simantics.db.exception.DatabaseException; import org.simantics.db.layer0.util.Layer0Utils; import org.simantics.db.layer0.util.RemoverUtil; @@ -60,6 +59,8 @@ import org.simantics.utils.strings.AlphanumComparator; import org.simantics.utils.ui.ISelectionUtils; import org.simantics.views.swt.client.base.ISWTViewNode; import org.simantics.views.swt.client.impl.SWTExplorer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import gnu.trove.map.hash.THashMap; import gnu.trove.set.hash.THashSet; @@ -69,6 +70,8 @@ import gnu.trove.set.hash.THashSet; * @author Tuukka Lehtonen */ public class SCL { + + private static final Logger LOGGER = LoggerFactory.getLogger(SCL.class); final public static String EMPTY = ""; final public static String MAPPED = "Mapped"; @@ -493,7 +496,7 @@ public class SCL { try { doAdd((Variable)properties.input); } catch (DatabaseException e) { - Logger.defaultLogError(e); + LOGGER.error("newAnnotationModifier failed", e); } return null; @@ -670,7 +673,7 @@ public class SCL { String name = graph.getPossibleRelatedValue(r, L0.HasName, Bindings.STRING); if(name != null) { if (result.put(name, r) != null) - System.err.println(this + ": The database contains siblings with the same name " + name + " (resource=$" + resource.getResourceId() +")."); + LOGGER.error("The database contains siblings with the same name " + name + " (resource=$" + resource.getResourceId() +")."); } } return result; diff --git a/bundles/org.simantics.backup.ontology/build.properties b/bundles/org.simantics.backup.ontology/build.properties index 022de1735..ecdc7c302 100644 --- a/bundles/org.simantics.backup.ontology/build.properties +++ b/bundles/org.simantics.backup.ontology/build.properties @@ -1,6 +1,5 @@ source.. = src/ output.. = bin/ -bin.includes = plugin.xml,\ - META-INF/,\ +bin.includes = META-INF/,\ .,\ graph.tg diff --git a/bundles/org.simantics.charts/src/org/simantics/charts/Charts.java b/bundles/org.simantics.charts/src/org/simantics/charts/Charts.java index 1e503159e..1358c4023 100644 --- a/bundles/org.simantics.charts/src/org/simantics/charts/Charts.java +++ b/bundles/org.simantics.charts/src/org/simantics/charts/Charts.java @@ -1,75 +1,75 @@ -package org.simantics.charts; - -import java.util.Collections; -import java.util.List; - -import org.simantics.charts.editor.ChartData; -import org.simantics.charts.editor.ChartKeys; -import org.simantics.databoard.binding.error.BindingException; -import org.simantics.databoard.util.Bean; -import org.simantics.db.ReadGraph; -import org.simantics.db.Resource; -import org.simantics.db.common.request.PossibleIndexRoot; -import org.simantics.db.exception.DatabaseException; -import org.simantics.db.layer0.variable.Variable; -import org.simantics.history.HistoryException; -import org.simantics.history.HistorySamplerItem; -import org.simantics.history.ItemManager; -import org.simantics.history.util.subscription.SamplingFormat; -import org.simantics.modeling.subscription.SubscriptionItem; -import org.simantics.modeling.subscription.SubscriptionItemQuery; -import org.simantics.project.IProject; -import org.simantics.simulation.experiment.IExperiment; - -/** - * Main facade for externally dealing with the trending system. - * - * @author Tuukka Lehtonen - * @author Antti Villberg - * - */ -public final class Charts { - - public static void resetChartEditorData(IProject project, Resource model, ChartData editorData) { - if (editorData != null) { - project.setHint(ChartKeys.chartSourceKey(model), editorData); - } else { - project.removeHint(ChartKeys.chartSourceKey(model)); - } - } - - public static HistorySamplerItem createHistorySamplerItem(ReadGraph graph, Variable run, Resource subscriptionItem) throws DatabaseException { - IExperiment exp = (IExperiment) run.getPropertyValue(graph, "iExperiment"); - ITrendSupport support = exp.getService(ITrendSupport.class); - ChartData data = support.getChartData(); - return createHistorySamplerItem(graph, subscriptionItem, data); - } - - public static HistorySamplerItem createHistorySamplerItem(ReadGraph graph, Resource subscriptionItem, ChartData data) throws DatabaseException { - - try { - Resource model = graph.syncRequest(new PossibleIndexRoot(subscriptionItem)); - if (model == null) { - throw new DatabaseException("There is no model for " + subscriptionItem); - } - - ItemManager im = new ItemManager(data.history.getItems()); - - SubscriptionItem i = graph.syncRequest(new SubscriptionItemQuery(subscriptionItem)); - - List items = im.search("variableId", i.variableId); - Collections.sort(items, SamplingFormat.INTERVAL_COMPARATOR); - if (items.isEmpty()) - new DatabaseException("There is history item for " + i.variableId); - Bean config = items.get(0); - String historyId = (String) config.getFieldUnchecked("id"); - - return new HistorySamplerItem(data.collector, data.history, historyId, System.identityHashCode(data)); - } catch (HistoryException e) { - throw new DatabaseException(e); - } catch (BindingException e) { - throw new DatabaseException(e); - } - } - -} +package org.simantics.charts; + +import java.util.Collections; +import java.util.List; + +import org.simantics.charts.editor.ChartData; +import org.simantics.charts.editor.ChartKeys; +import org.simantics.databoard.binding.error.BindingException; +import org.simantics.databoard.util.Bean; +import org.simantics.db.ReadGraph; +import org.simantics.db.Resource; +import org.simantics.db.common.request.PossibleIndexRoot; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.layer0.variable.Variable; +import org.simantics.history.HistoryException; +import org.simantics.history.HistorySamplerItem; +import org.simantics.history.ItemManager; +import org.simantics.history.util.subscription.SamplingFormat; +import org.simantics.modeling.subscription.SubscriptionItem; +import org.simantics.modeling.subscription.SubscriptionItemQuery; +import org.simantics.project.IProject; +import org.simantics.simulation.experiment.IExperiment; + +/** + * Main facade for externally dealing with the trending system. + * + * @author Tuukka Lehtonen + * @author Antti Villberg + * + */ +public final class Charts { + + public static void resetChartEditorData(IProject project, Resource model, ChartData editorData) { + if (editorData != null) { + project.setHint(ChartKeys.chartSourceKey(model), editorData); + } else { + project.removeHint(ChartKeys.chartSourceKey(model)); + } + } + + public static HistorySamplerItem createHistorySamplerItem(ReadGraph graph, Variable run, Resource subscriptionItem) throws DatabaseException { + IExperiment exp = (IExperiment) run.getPropertyValue(graph, "iExperiment"); + ITrendSupport support = exp.getService(ITrendSupport.class); + ChartData data = support.getChartData(); + return createHistorySamplerItem(graph, subscriptionItem, data); + } + + public static HistorySamplerItem createHistorySamplerItem(ReadGraph graph, Resource subscriptionItem, ChartData data) throws DatabaseException { + + try { + Resource model = graph.syncRequest(new PossibleIndexRoot(subscriptionItem)); + if (model == null) { + throw new DatabaseException("There is no model for " + subscriptionItem); + } + + ItemManager im = new ItemManager(data.history.getItems()); + + SubscriptionItem i = graph.syncRequest(new SubscriptionItemQuery(subscriptionItem)); + + List items = im.search("variableId", i.variableId); + Collections.sort(items, SamplingFormat.INTERVAL_COMPARATOR); + if (items.isEmpty()) + new DatabaseException("There is history item for " + i.variableId); + Bean config = items.get(0); + String historyId = (String) config.getFieldUnchecked("id"); + + return new HistorySamplerItem(data.collector, data.history, historyId, System.identityHashCode(data)); + } catch (HistoryException e) { + throw new DatabaseException(e); + } catch (BindingException e) { + throw new DatabaseException(e); + } + } + +} diff --git a/bundles/org.simantics.compressions/src/org/simantics/compressions/impl/DecompressingInputStream.java b/bundles/org.simantics.compressions/src/org/simantics/compressions/impl/DecompressingInputStream.java index 8c7987177..9329d83c4 100644 --- a/bundles/org.simantics.compressions/src/org/simantics/compressions/impl/DecompressingInputStream.java +++ b/bundles/org.simantics.compressions/src/org/simantics/compressions/impl/DecompressingInputStream.java @@ -134,16 +134,20 @@ public abstract class DecompressingInputStream extends InputStream { return true; } - private static ByteBuffer ensureBufferSize(ByteBuffer buffer, int minCapacity) { + private ByteBuffer ensureBufferSize(ByteBuffer buffer, int minCapacity) { int oldCapacity = buffer != null ? buffer.capacity() : 0; if (buffer == null || oldCapacity < minCapacity) { int newCapacity = grow(oldCapacity, minCapacity); //System.out.println("ensureBufferSize(" + oldCapacity + ", " + minCapacity + "), new capacity " + newCapacity); - buffer = ByteBuffer.allocateDirect(newCapacity); + buffer = allocateBuffer(newCapacity); } return buffer; } + protected ByteBuffer allocateBuffer(int capacity) { + return ByteBuffer.allocateDirect(capacity); + } + /** * @param oldCapacity current capacity of a buffer * @param minCapacity diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/adapter/AdapterFactory.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/adapter/AdapterFactory.java index 0c07958d5..2dc51e402 100644 --- a/bundles/org.simantics.databoard/src/org/simantics/databoard/adapter/AdapterFactory.java +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/adapter/AdapterFactory.java @@ -15,6 +15,7 @@ import java.util.ArrayList; import java.util.Map; import org.apache.commons.collections.map.ReferenceMap; +import org.simantics.databoard.Bindings; import org.simantics.databoard.Units; import org.simantics.databoard.binding.ArrayBinding; import org.simantics.databoard.binding.Binding; @@ -854,6 +855,29 @@ public class AdapterFactory { return result; } + if (domain instanceof VariantBinding && !(range instanceof VariantBinding)) + { + // Make a recursive adaptation from a variant source + final VariantBinding domainBinding = (VariantBinding)domain; + final Binding rangeBinding = range; + AbstractAdapter result = new AbstractAdapter() { + @Override + public Object adapt(Object obj) throws AdaptException { + try { + Object value = domainBinding.getContent(obj); + Binding contentBinding = domainBinding.getContentBinding(obj); + AbstractAdapter adapter = (AbstractAdapter) getAdapter(contentBinding, rangeBinding, typeAdapter, mustClone); + return adapter.adapt(value); + } catch (BindingException | AdapterConstructionException e) { + throw new AdaptException(e); + } + } + }; + result.clones = mustClone; + addToCache(req, result); + return result; + } + if (range instanceof VariantBinding && !(domain instanceof VariantBinding)) { // Default to just wrapping the domain type diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/DefaultBindingFactory.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/DefaultBindingFactory.java index 54c1160c5..73f2d2f16 100644 --- a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/DefaultBindingFactory.java +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/DefaultBindingFactory.java @@ -20,6 +20,7 @@ import org.simantics.databoard.binding.impl.BooleanArrayBinding; import org.simantics.databoard.binding.impl.BooleanBindingDefault; import org.simantics.databoard.binding.impl.ByteArrayBinding; import org.simantics.databoard.binding.impl.ByteBindingDefault; +import org.simantics.databoard.binding.impl.DefaultMapBinding; import org.simantics.databoard.binding.impl.DoubleArrayBinding; import org.simantics.databoard.binding.impl.DoubleBindingDefault; import org.simantics.databoard.binding.impl.FloatArrayBinding; @@ -30,7 +31,6 @@ import org.simantics.databoard.binding.impl.LongArrayBinding; import org.simantics.databoard.binding.impl.LongBindingDefault; import org.simantics.databoard.binding.impl.ObjectArrayBinding; import org.simantics.databoard.binding.impl.StringBindingDefault; -import org.simantics.databoard.binding.impl.TreeMapBinding; import org.simantics.databoard.binding.mutable.ContainerOptionalBinding; import org.simantics.databoard.binding.mutable.UnionTaggedObjectBinding; import org.simantics.databoard.type.ArrayType; @@ -180,9 +180,9 @@ public class DefaultBindingFactory extends TypeBindingFactory { return binding; } - if (type instanceof MapType) { + if (type instanceof MapType) { MapType mapType = (MapType) type; - TreeMapBinding binding = new TreeMapBinding(mapType, null, null); + DefaultMapBinding binding = new DefaultMapBinding(mapType, null, null); inprogress.put(type, binding); binding.setKeyBinding( construct(mapType.keyType) ); binding.setValueBinding( construct(mapType.valueType) ); diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/JavaUtilBindingsProvider.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/JavaUtilBindingsProvider.java index e43b7bc73..b173cb6eb 100644 --- a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/JavaUtilBindingsProvider.java +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/JavaUtilBindingsProvider.java @@ -28,6 +28,8 @@ import org.simantics.databoard.binding.error.BindingConstructionException; import org.simantics.databoard.binding.impl.ArrayListBinding; import org.simantics.databoard.binding.impl.BooleanArrayBinding; import org.simantics.databoard.binding.impl.ByteArrayBinding; +import org.simantics.databoard.binding.impl.DefaultMapBinding; +import org.simantics.databoard.binding.impl.DefaultSetBinding; import org.simantics.databoard.binding.impl.DoubleArrayBinding; import org.simantics.databoard.binding.impl.FloatArrayBinding; import org.simantics.databoard.binding.impl.HashMapBinding; @@ -101,7 +103,7 @@ public class JavaUtilBindingsProvider implements BindingProvider { if (Set.class.isAssignableFrom(request.getClazz())) { MapType type = new MapType(); type.valueType = Datatypes.VOID; - return new TreeSetBinding(type, null); + return new DefaultSetBinding(type, null); } if (TreeMap.class.isAssignableFrom(request.getClazz())) { @@ -113,7 +115,7 @@ public class JavaUtilBindingsProvider implements BindingProvider { } if (Map.class.isAssignableFrom(request.getClazz())) { - return new HashMapBinding(new MapType(), null, null); + return new DefaultMapBinding(new MapType(), null, null); } return null; diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/TroveBindingsProvider.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/TroveBindingsProvider.java index e635e12a0..0f9a4dc76 100644 --- a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/TroveBindingsProvider.java +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/factory/TroveBindingsProvider.java @@ -103,6 +103,9 @@ public class TroveBindingsProvider implements BindingProvider { @Override public Object create(Map initialMap) throws BindingException { + if (initialMap instanceof THashMap) + return initialMap; + // Replace with TreeMap. Create comparator from binding. THashMap result = new THashMap(); putAll(result, initialMap); @@ -134,10 +137,14 @@ public class TroveBindingsProvider implements BindingProvider { } public Object create(Set initialSet) throws BindingException { + if (initialSet instanceof THashSet) + return initialSet; + return new THashSet(initialSet); } - @Override + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Override public Object create(Map initialMap) throws BindingException { return new THashSet(initialMap.keySet()); } diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/DefaultMapBinding.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/DefaultMapBinding.java new file mode 100644 index 000000000..43c1e97aa --- /dev/null +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/DefaultMapBinding.java @@ -0,0 +1,326 @@ +/******************************************************************************* + * Copyright (c) 2010 Association for Decentralized Information Management in + * Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.databoard.binding.impl; + +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; + +import org.simantics.databoard.binding.ArrayBinding; +import org.simantics.databoard.binding.Binding; +import org.simantics.databoard.binding.MapBinding; +import org.simantics.databoard.binding.error.BindingException; +import org.simantics.databoard.type.MapType; + +/** + * Binds java.util.Map to MapType + * + * This Binding type accepts all java.util.Map instances, but creates + * java.util.TreeMap instances by default. + * + * @author Reino Ruusu + */ +@SuppressWarnings("rawtypes") +public class DefaultMapBinding extends MapBinding { + + public DefaultMapBinding(Binding keyBinding, Binding valueBinding) { + super(keyBinding, valueBinding); + } + + public DefaultMapBinding(MapType mapType, Binding keyBinding, + Binding valueBinding) { + super(mapType, keyBinding, valueBinding); + } + + public void postConstruction() { + } + + @Override + public Object create() { + return new TreeMap( keyBinding ); + } + + @SuppressWarnings("unchecked") + @Override + public Object create(Object[] keys, Object[] values) { + if (keys.length != values.length) + throw new IllegalArgumentException("Equal length arrays expected"); + + int len = keys.length; + Map result = new TreeMap( keyBinding ); + + for (int i = 0; i < len; i++) { + Object key = keys[i]; + Object value = values[i]; + result.put(key, value); + } + + return result; + } + + + @SuppressWarnings("unchecked") + @Override + public Object create(List keys, List values) { + if (keys.size()!=values.size()) + throw new IllegalArgumentException("Equal length arrays expected"); + + int len = keys.size(); + Map result = new TreeMap( keyBinding ); + + for (int i=0; i map) { + return map; + } + + @Override + public void clear(Object map) { + ((Map) map).clear(); + } + + @Override + public boolean containsKey(Object map, Object key) { + Map m = ((Map) map); + return m.containsKey(key); + } + + @Override + public boolean containsValue(Object map, Object value) { + Map m = ((Map) map); + Binding vb = getValueBinding(); + for (Object v : m.values()) + { + if (vb.equals(v, value)) return true; + } + return false; + } + + @Override + public Object get(Object map, Object key) { + Map m = ((Map) map); + return m.get(key); + } + + @SuppressWarnings("unchecked") + @Override + public Object[] getKeys(Object map) { + Map m = ((Map) map); + return m.keySet().toArray(new Object[m.size()]); + } + + @SuppressWarnings("unchecked") + @Override + public void getKeys(Object map, Set keys) throws BindingException { + Map m = ((Map)map); + keys.addAll(m.keySet()); + } + + /** + * Count the number of entries between two keyes + * @param from + * @param fromInclusive + * @param end + * @param endInclusive + * @throws BindingException + */ + @SuppressWarnings("unchecked") + @Override + public int count(Object src, Object from, boolean fromInclusive, Object end, boolean endInclusive) throws BindingException { + // Assert end > from + if (keyBinding.compare(from, end)>0) return 0; + + if (src instanceof TreeMap) { + TreeMap m = (TreeMap) src; + Map sm = m.subMap(from, fromInclusive, end, endInclusive); + return sm.size(); + } + else { + int result = 0; + Map m = ((Map)src); + for (Object k : m.keySet()) { + int fk = keyBinding.compare(from, k); + int ek = keyBinding.compare(k, end); + boolean fromMatches = fromInclusive ? fk<=0 : fk<0; + boolean endMatches = endInclusive ? ek<=0 : ek <0; + if ( fromMatches && endMatches ) result++; + } + return result; + } + } + + /** + * Read a range of entries + * + * @param src + * @param from + * @param fromInclusive + * @param end + * @param endInclusive + * @param dstKeyArrayBinding + * @param dstKeyArray + * @param dstValueArrayBinding + * @param dstValueArray + * @throws BindingException + */ + public int getEntries(Object src, Object from, boolean fromInclusive, Object end, boolean endInclusive, ArrayBinding dstKeyArrayBinding, Object dstKeyArray, ArrayBinding dstValueArrayBinding, Object dstValueArray, int limit) throws BindingException { + if (src instanceof TreeMap) { + return new TreeMapBinding(keyBinding, valueBinding).getEntries(src, from, fromInclusive, end, endInclusive, dstKeyArrayBinding, dstKeyArray, dstValueArrayBinding, dstValueArray, limit); + } + else { + return new HashMapBinding(keyBinding, valueBinding).getEntries(src, from, fromInclusive, end, endInclusive, dstKeyArrayBinding, dstKeyArray, dstValueArrayBinding, dstValueArray, limit); + } + } + + + @SuppressWarnings("unchecked") + @Override + public Object[] getValues(Object map) { + Map m = ((Map) map); + return m.values().toArray(new Object[m.size()]); + } + + @Override + public void put(Object map, K key, V value) { + @SuppressWarnings("unchecked") + Map m = ((Map) map); + m.put(key, value); + } + + @Override + public void putAll(Object dstMap, Map srcMap) { + @SuppressWarnings("unchecked") + Map dst = ((Map) dstMap); + dst.putAll(srcMap); + } + + @SuppressWarnings("unchecked") + @Override + public void getAll(Object mapFrom, Map to) { + Map m = ((Map) mapFrom); + to.putAll(m); + } + + @SuppressWarnings("unchecked") + @Override + public void getAll(Object mapFrom, Object[] keys, Object[] values) { + Map m = (Map) mapFrom; + int i = 0; + for (Entry e : (Set>) m.entrySet()) { + keys[i] = e.getKey(); + values[i] = e.getValue(); + i++; + } + } + + @Override + public Object remove(Object map, Object key) { + Map m = ((Map) map); + return m.remove(key); + } + + @Override + public int size(Object map) { + Map m = ((Map) map); + return m.size(); + } + + @Override + public boolean isInstance(Object obj) { + return obj instanceof Map; + } + + @Override + public int deepHashValue(Object map, IdentityHashMap hashedObjects) throws BindingException { + int result = 0; + Map m = ((Map) map); + @SuppressWarnings("unchecked") + Set s = m.entrySet(); + for (Entry e : s) { + int keyTree = getKeyBinding().deepHashValue(e.getKey(), hashedObjects); + int valueTree = getValueBinding().deepHashValue(e.getValue(), hashedObjects); + result += (keyTree ^ valueTree); + } + return result; + } + + @Override + public Object getCeilingKey(Object map, Object key) { + if (map instanceof TreeMap) { + return new TreeMapBinding(keyBinding, valueBinding).getCeilingKey(map, key); + } + else { + return new HashMapBinding(keyBinding, valueBinding).getCeilingKey(map, key); + } + } + + @Override + public Object getFirstKey(Object map) { + if (map instanceof TreeMap) { + return new TreeMapBinding(keyBinding, valueBinding).getFirstKey(map); + } + else { + return new HashMapBinding(keyBinding, valueBinding).getFirstKey(map); + } + } + + @Override + public Object getFloorKey(Object map, Object key) { + if (map instanceof TreeMap) { + return new TreeMapBinding(keyBinding, valueBinding).getFloorKey(map, key); + } + else { + return new HashMapBinding(keyBinding, valueBinding).getFloorKey(map, key); + } + } + + @Override + public Object getHigherKey(Object map, Object key) { + if (map instanceof TreeMap) { + return new TreeMapBinding(keyBinding, valueBinding).getHigherKey(map, key); + } + else { + return new HashMapBinding(keyBinding, valueBinding).getHigherKey(map, key); + } + } + + @Override + public Object getLastKey(Object map) { + if (map instanceof TreeMap) { + return new TreeMapBinding(keyBinding, valueBinding).getLastKey(map); + } + else { + return new HashMapBinding(keyBinding, valueBinding).getLastKey(map); + } + } + + @Override + public Object getLowerKey(Object map, Object key) { + if (map instanceof TreeMap) { + return new TreeMapBinding(keyBinding, valueBinding).getLowerKey(map, key); + } + else { + return new HashMapBinding(keyBinding, valueBinding).getLowerKey(map, key); + } + } +} diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/DefaultSetBinding.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/DefaultSetBinding.java new file mode 100644 index 000000000..783f80b33 --- /dev/null +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/DefaultSetBinding.java @@ -0,0 +1,239 @@ +/******************************************************************************* + * Copyright (c) 2010 Association for Decentralized Information Management in + * Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.databoard.binding.impl; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import org.simantics.databoard.binding.ArrayBinding; +import org.simantics.databoard.binding.Binding; +import org.simantics.databoard.binding.MapBinding; +import org.simantics.databoard.binding.error.BindingException; +import org.simantics.databoard.binding.reflection.VoidBinding; +import org.simantics.databoard.type.MapType; +import org.simantics.databoard.type.RecordType; + +/** + * Binds java.util.Set to Map(T, {}) + * + * This binding accepts all java.util.Set instances, but instantiates + * java.util.TreeSet objects. + * + * @author Reino Ruusu + */ +@SuppressWarnings("unchecked") +public class DefaultSetBinding extends MapBinding { + + public DefaultSetBinding(MapType mapType, Binding elementBinding) { + super(mapType, elementBinding, VoidBinding.VOID_BINDING); + } + + public DefaultSetBinding(Binding elementBinding) { + super(new MapType(elementBinding.type(), RecordType.VOID_TYPE), elementBinding, VoidBinding.VOID_BINDING); + } + + @Override + public void clear(Object set) throws BindingException { + Set _set = (Set) set; + _set.clear(); + } + + @Override + public boolean containsKey(Object set, Object key) throws BindingException { + Set _set = (Set) set; + return _set.contains(key); + } + + @Override + public boolean containsValue(Object set, Object value) + throws BindingException { + return false; + } + + @Override + public Object create() throws BindingException { + return new TreeSet( keyBinding ); + } + + public Object create(Set initialSet) throws BindingException { + return initialSet; + } + + @Override + public Object create(Map initialMap) throws BindingException { + Set result = new TreeSet( keyBinding ); + result.addAll(initialMap.keySet()); + return result; + } + + @Override + public Object create(Object[] keys, Object[] values) + throws BindingException { + Set result = new TreeSet( keyBinding ); + for (int i=0; i keys, List values) { + Set result = new TreeSet( keyBinding ); + for (int i=0; i void getAll(Object setFrom, Map to) throws BindingException { + Map _to = (Map) to; + Set _setFrom = (Set) setFrom; + for (K k : _setFrom) + _to.put(k, null); + } + + @Override + public void getAll(Object setFrom, Object[] keys, Object[] values) + throws BindingException { + Set _setFrom = (Set) setFrom; + int i=0; + for (Object k : _setFrom) { + keys[i] = k; + values[i] = null; + } + } + + @Override + public Object[] getKeys(Object set) throws BindingException { + Set _set = (Set) set; + return _set.toArray(new Object[_set.size()]); + } + + @Override + public void getKeys(Object set, Set keys) throws BindingException { + Set s = (Set) set; + keys.addAll(s); + } + + @Override + public Object[] getValues(Object set) throws BindingException { + Set _set = (Set) set; + return new Object[_set.size()]; + } + + @Override + public int count(Object src, Object from, boolean fromInclusive, Object end, boolean endInclusive) throws BindingException { + if (src instanceof TreeSet) + return new TreeSetBinding(keyBinding).count(src, from, fromInclusive, end, endInclusive); + else + return new HashSetBinding(keyBinding).count(src, from, fromInclusive, end, endInclusive); + } + + @Override + public int getEntries(Object src, Object from, boolean fromInclusive, Object end, boolean endInclusive, ArrayBinding dstKeyArrayBinding, Object dstKeyArray, ArrayBinding dstValueArrayBinding, Object dstValueArray, int limit) throws BindingException { + return 0; + } + + @Override + public void put(Object set, Object key, Object value) + throws BindingException { + Set _set = (Set) set; + if (value!=null) throw new BindingException("Cannot put non-null to a Set"); + _set.add(key); + } + + public void putAll(Object setTo, Set from) { + Set _set = (Set) setTo; + _set.addAll(from); + } + + @Override + public void putAll(Object setTo, Map from) throws BindingException { + Set _set = (Set) setTo; + _set.addAll(from.keySet()); + } + + @Override + public Object remove(Object set, Object key) throws BindingException { + Set _set = (Set) set; + _set.remove(key); + return null; + } + + @Override + public int size(Object set) throws BindingException { + Set _set = (Set) set; + return _set.size(); + } + + @Override + public boolean isInstance(Object obj) { + return obj instanceof Set; + } + + @Override + public Object getCeilingKey(Object set, Object key) { + if (set instanceof TreeSet) + return new TreeSetBinding(keyBinding).getCeilingKey(set, key); + else + return new HashSetBinding(keyBinding).getCeilingKey(set, key); + } + + @Override + public Object getFirstKey(Object set) { + if (set instanceof TreeSet) + return new TreeSetBinding(keyBinding).getFirstKey(set); + else + return new HashSetBinding(keyBinding).getFirstKey(set); + } + + @Override + public Object getFloorKey(Object set, Object key) { + if (set instanceof TreeSet) + return new TreeSetBinding(keyBinding).getFloorKey(set, key); + else + return new HashSetBinding(keyBinding).getFloorKey(set, key); + } + + @Override + public Object getHigherKey(Object set, Object key) { + if (set instanceof TreeSet) + return new TreeSetBinding(keyBinding).getHigherKey(set, key); + else + return new HashSetBinding(keyBinding).getHigherKey(set, key); + } + + @Override + public Object getLastKey(Object set) { + if (set instanceof TreeSet) + return new TreeSetBinding(keyBinding).getLastKey(set); + else + return new HashSetBinding(keyBinding).getLastKey(set); + } + + @Override + public Object getLowerKey(Object set, Object key) { + if (set instanceof TreeSet) + return new TreeSetBinding(keyBinding).getLowerKey(set, key); + else + return new HashSetBinding(keyBinding).getLowerKey(set, key); + } + +} + diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/HashMapBinding.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/HashMapBinding.java index d3a9d8c75..76241a0c7 100644 --- a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/HashMapBinding.java +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/HashMapBinding.java @@ -53,13 +53,11 @@ public class HashMapBinding extends MapBinding { super(mapType, keyBinding, valueBinding); } - public void postConstruction() {} - - @Override - public Object create() { - return new HashMap(); - } - + @Override + public Object create() { + return new HashMap(); + } + @Override public Object create(Object[] keys, Object[] values) { if (keys.length!=values.length) @@ -97,7 +95,10 @@ public class HashMapBinding extends MapBinding { } @Override - public Object create(Map initialMap) throws BindingException { + public Object create(Map initialMap) throws BindingException { + if (initialMap instanceof HashMap) + return initialMap; + // Replace with TreeMap. Create comparator from binding. HashMap result = new HashMap(); putAll(result, initialMap); diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/HashSetBinding.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/HashSetBinding.java index efeb38493..494b3d9e8 100644 --- a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/HashSetBinding.java +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/HashSetBinding.java @@ -32,7 +32,7 @@ import org.simantics.databoard.type.RecordType; * * @author Toni Kalajainen */ -@SuppressWarnings("all") +@SuppressWarnings({"rawtypes", "unchecked"}) public class HashSetBinding extends MapBinding { public HashSetBinding(MapType mapType, Binding elementBinding) { @@ -43,7 +43,6 @@ public class HashSetBinding extends MapBinding { super(new MapType(elementBinding.type(), RecordType.VOID_TYPE), elementBinding, VoidBinding.VOID_BINDING); } - @SuppressWarnings("unchecked") @Override public void clear(Object set) throws BindingException { Set _set = (Set) set; @@ -139,7 +138,6 @@ public class HashSetBinding extends MapBinding { keys.addAll(s); } - @SuppressWarnings("unchecked") @Override public int count(Object src, Object from, boolean fromInclusive, Object end, boolean endInclusive) throws BindingException { @@ -183,7 +181,6 @@ public class HashSetBinding extends MapBinding { s.add(key); } - @SuppressWarnings("unchecked") Object getComparableKey(Object set, Object key) { // if (keyIsComparable) return key; @@ -201,11 +198,9 @@ public class HashSetBinding extends MapBinding { _set.addAll(from); } - @SuppressWarnings("unchecked") @Override public void putAll(Object setTo, Map from) throws BindingException { Set s = (Set) setTo; - Binding kb = getKeyBinding(); for (Entry e : (Set>) from.entrySet()) { Object k = getComparableKey(s, e.getKey()); s.remove(k); diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/TreeSetBinding.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/TreeSetBinding.java index 304b3cad4..50028b3a1 100644 --- a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/TreeSetBinding.java +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/impl/TreeSetBinding.java @@ -58,12 +58,15 @@ public class TreeSetBinding extends MapBinding { return false; } - @Override - public Object create() throws BindingException { - return new TreeSet( getKeyBinding() ); - } - - public Object create(Set initialSet) throws BindingException { + @Override + public Object create() throws BindingException { + return new TreeSet( getKeyBinding() ); + } + + public Object create(Set initialSet) throws BindingException { + if (initialSet instanceof TreeSet && ((TreeSet) initialSet).comparator() == getKeyBinding()) + return initialSet; + TreeSet result = new TreeSet(getKeyBinding()); result.addAll(initialSet); return result; diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/reflection/BindingRequest.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/reflection/BindingRequest.java index 593eb308b..56cbe61dd 100644 --- a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/reflection/BindingRequest.java +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/reflection/BindingRequest.java @@ -17,7 +17,7 @@ public class BindingRequest { { Annotation[] annotations = ClassBindingFactory.getFieldAnnotations(field); Class fieldClass = field.getType(); - return new BindingRequest(fieldClass, annotations); + return new BindingRequest(fieldClass, annotations); } /** Requested class */ @@ -33,8 +33,8 @@ public class BindingRequest { public final String signature; // eg. Ljava/util/Map; public final String descriptor; //eg. Ljava/util/Map; - public BindingRequest componentRequest; - public Binding componentBinding; + public BindingRequest[] componentRequests; + public Binding[] componentBindings; transient int hash; diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/reflection/ClassBindingFactory.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/reflection/ClassBindingFactory.java index 081e8b1ec..94d3ed5f6 100644 --- a/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/reflection/ClassBindingFactory.java +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/binding/reflection/ClassBindingFactory.java @@ -74,6 +74,7 @@ import org.simantics.databoard.type.DoubleType; import org.simantics.databoard.type.FloatType; import org.simantics.databoard.type.IntegerType; import org.simantics.databoard.type.LongType; +import org.simantics.databoard.type.MapType; import org.simantics.databoard.type.OptionalType; import org.simantics.databoard.type.RecordType; import org.simantics.databoard.type.StringType; @@ -391,15 +392,16 @@ public class ClassBindingFactory { lengths[i] = org.simantics.databoard.util.Range.valueOf(strs[i]); } - if ( binding.componentBinding==null && request.componentBinding!=null ) binding.componentBinding = request.componentBinding; + if ( binding.componentBinding==null && request.componentBindings!=null ) binding.componentBinding = request.componentBindings[0]; if ( binding.componentBinding == null) { - BindingRequest componentRequest = request.componentRequest; - if (componentClass!=null && componentRequest==null) { + BindingRequest componentRequest = request.componentRequests != null ? request.componentRequests[0] : null; + if (componentRequest==null) { + if (componentClass==null) { + componentClass = Object.class; + // throw new BindingConstructionException("Cannot determine array component type"); + } componentRequest = new BindingRequest(componentClass, componentAnnotations); } - if (componentRequest==null) { - throw new BindingConstructionException("Cannot determine array component type"); - } inprogress.put(request, binding); binding.componentBinding = construct( componentRequest ); @@ -426,44 +428,64 @@ public class ClassBindingFactory { Arguments argumentsAnnotation = request.getAnnotation(Arguments.class); Annotation[] componentAnnotations = request.dropAnnotations( 2 ); + Class[] arguments = argumentsAnnotation != null ? argumentsAnnotation.value() : null; BindingRequest keyRequest = null; BindingRequest valueRequest = null; + + Binding keyBinding = null; + Binding valueBinding = null; - if (binding.getKeyBinding() == null) { - Class keyClass = argumentsAnnotation.value()[0]; + if (binding.getKeyBinding() != null) { + keyBinding = binding.getKeyBinding(); + } + else if (request.componentBindings != null) { + keyBinding = request.componentBindings[0]; + } + else if (request.componentRequests != null) { + keyRequest = request.componentRequests[0]; + } + else { + Class keyClass = arguments != null && arguments.length >= 1 ? arguments[0] : null; if (keyClass==null) { - throw new BindingConstructionException("Cannot determine key class, use @Arguments annotation"); + keyClass = Object.class; + //throw new BindingConstructionException("Cannot determine key class, use @Arguments annotation"); } keyRequest = new BindingRequest(keyClass, componentAnnotations); } - else { - binding.type().keyType = binding.getKeyBinding().type(); - } - if (binding.getValueBinding() == null) { - Class valueClass = argumentsAnnotation.value()[1]; + if (binding.getValueBinding() != null) { + valueBinding = binding.getValueBinding(); + } + else if (request.componentBindings != null) { + valueBinding = request.componentBindings[1]; + } + else if (request.componentRequests != null) { + valueRequest = request.componentRequests[1]; + } + else { + Class valueClass = arguments != null && arguments.length >= 2 ? arguments[1] : null; if (valueClass==null) { - throw new BindingConstructionException("Cannot determine value class, use @Arguments annotation"); + valueClass = Object.class; + //throw new BindingConstructionException("Cannot determine value class, use @Arguments annotation"); } valueRequest = new BindingRequest(valueClass, componentAnnotations); } - else { - binding.type().valueType = binding.getValueBinding().type(); - } - + inprogress.put(request, result); if (keyRequest!=null) { - Binding keyBinding = construct( keyRequest ); - binding.type().keyType = keyBinding.type(); - binding.setKeyBinding(keyBinding); + keyBinding = construct( keyRequest ); } if (valueRequest!=null) { - Binding valueBinding = construct( valueRequest ); - binding.type().valueType = valueBinding.type(); - binding.setValueBinding(valueBinding); + valueBinding = construct( valueRequest ); } - inprogress.remove(request); + inprogress.remove(request); + + MapType type = binding.type(); + type.keyType = keyBinding.type(); + type.valueType = valueBinding.type(); + binding.setKeyBinding( keyBinding ); + binding.setValueBinding( valueBinding ); } /// Optional diff --git a/bundles/org.simantics.databoard/src/org/simantics/databoard/util/URIStringUtils.java b/bundles/org.simantics.databoard/src/org/simantics/databoard/util/URIStringUtils.java index dde498a2c..a11579f07 100644 --- a/bundles/org.simantics.databoard/src/org/simantics/databoard/util/URIStringUtils.java +++ b/bundles/org.simantics.databoard/src/org/simantics/databoard/util/URIStringUtils.java @@ -45,8 +45,7 @@ package org.simantics.databoard.util; -import java.nio.charset.Charset; -import java.util.ArrayList; +import java.util.Arrays; import java.util.List; @@ -194,13 +193,14 @@ public final class URIStringUtils { return name; } - final private static int HTTP_POSITION = "http://".length(); + final private static String HTTP_PREFIX = "http://"; + final private static int HTTP_POSITION = HTTP_PREFIX.length(); public static String[] splitURI(String uri) { int nextPathSeparator = uri.lastIndexOf(URIStringUtils.NAMESPACE_PATH_SEPARATOR); if (nextPathSeparator == -1) return null; if (nextPathSeparator == HTTP_POSITION - 1) { - if(uri.startsWith("http://")) return new String[] { "http://", uri.substring(HTTP_POSITION, uri.length()) }; + if(uri.startsWith(HTTP_PREFIX)) return new String[] { HTTP_PREFIX, uri.substring(HTTP_POSITION, uri.length()) }; else return null; } return new String[] { @@ -208,12 +208,10 @@ public final class URIStringUtils { uri.substring(nextPathSeparator + 1, uri.length()) }; } - + public static List splitURISCL(String uri) { String[] result = splitURI(uri); - ArrayList list = new ArrayList(result.length); - for(String s : result) list.add(s); - return list; + return Arrays.asList(result); } /** @@ -263,8 +261,7 @@ public final class URIStringUtils { public static String escapeURI(String localName) { if (localName == null) throw new NullPointerException("null local name"); - String result = encode(localName); - return result; + return encode(localName); } /** @@ -276,8 +273,7 @@ public final class URIStringUtils { * @return the joined namespace */ public static String appendURINamespace(String namespace, String suffix) { - //return namespace + NAMESPACE_PATH_SEPARATOR + suffix; - return new StringBuffer(namespace.length() + 1 + suffix.length()) + return new StringBuilder(namespace.length() + 1 + suffix.length()) .append(namespace) .append(NAMESPACE_PATH_SEPARATOR) .append(suffix) @@ -293,9 +289,8 @@ public final class URIStringUtils { * @return the joined URI */ public static String makeURI(String namespace, String localName) { - //return namespace + NAMESPACE_LOCAL_SEPARATOR + escapeURI(localName); String escapedLocalName = escapeURI(localName); - return new StringBuffer(namespace.length() + 1 + escapedLocalName.length()) + return new StringBuilder(namespace.length() + 1 + escapedLocalName.length()) .append(namespace) .append(NAMESPACE_LOCAL_SEPARATOR) .append(escapedLocalName) @@ -332,94 +327,59 @@ public final class URIStringUtils { } - final private static Charset UTF8 = Charset.forName("UTF-8"); - final private static Charset ASCII = Charset.forName("US-ASCII"); - - /* Copied and modified from Jena 2.4 com.hp.hpl.jena.util.URIref */ - private static String encode(String unicode) { - boolean needsEscapes = needsEscaping(unicode); - if (!needsEscapes) - return unicode; - - byte utf8[] = unicode.getBytes(UTF8); - byte rsltAscii[] = new byte[utf8.length * 6]; - int in = 0; - int out = 0; - while (in < utf8.length) { - switch (utf8[in]) { - case (byte)'a': case (byte)'b': case (byte)'c': case (byte)'d': case (byte)'e': case (byte)'f': case (byte)'g': case (byte)'h': case (byte)'i': case (byte)'j': case (byte)'k': case (byte)'l': case (byte)'m': case (byte)'n': case (byte)'o': case (byte)'p': case (byte)'q': case (byte)'r': case (byte)'s': case (byte)'t': case (byte)'u': case (byte)'v': case (byte)'w': case (byte)'x': case (byte)'y': case (byte)'z': - case (byte)'A': case (byte)'B': case (byte)'C': case (byte)'D': case (byte)'E': case (byte)'F': case (byte)'G': case (byte)'H': case (byte)'I': case (byte)'J': case (byte)'K': case (byte)'L': case (byte)'M': case (byte)'N': case (byte)'O': case (byte)'P': case (byte)'Q': case (byte)'R': case (byte)'S': case (byte)'T': case (byte)'U': case (byte)'V': case (byte)'W': case (byte)'X': case (byte)'Y': case (byte)'Z': - case (byte)'0': case (byte)'1': case (byte)'2': case (byte)'3': case (byte)'4': case (byte)'5': case (byte)'6': case (byte)'7': case (byte)'8': case (byte)'9': - case (byte)';': case (byte)'?': case (byte)':': case (byte)'@': case (byte)'=': case (byte)'+': case (byte)'$': case (byte)',': - case (byte)'-': case (byte)'_': case (byte)'.': case (byte)'!': case (byte)'~': case (byte)'*': case (byte)'\'': case (byte)'(': case (byte)')': - case (byte)'[': case (byte)']': - rsltAscii[out] = utf8[in]; - out++; - in++; - break; - case (byte)' ': - rsltAscii[out++] = (byte) '%'; - rsltAscii[out++] = '2'; - rsltAscii[out++] = '0'; - in++; - break; - case (byte) '%': - // [lehtonen] NOTE: all input needs to be escaped, i.e. "%01" should result in "%2501", not "%01". - // escape+unescape is a bijection, not an idempotent operation. - // Fall through to to escape '%' as '%25' - case (byte) '#': - case (byte) '/': - // Fall through to escape '/' - case (byte)'&': - // Fall through to escape '&' characters to avoid them - // being interpreted as SGML entities. - default: - rsltAscii[out++] = (byte) '%'; - // Get rid of sign ... - int c = (utf8[in]) & 255; - rsltAscii[out++] = hexEncode(c / 16); - rsltAscii[out++] = hexEncode(c % 16); - in++; - break; - } - } - return new String(rsltAscii, 0, out, ASCII); - } - /* * RFC 3986 section 2.2 Reserved Characters (January 2005) * !*'();:@&=+$,/?#[] */ - private static boolean needsEscaping(String unicode) { + private static boolean[] ESCAPED_US_ASCII_CHARS = new boolean[128]; + + static { + ESCAPED_US_ASCII_CHARS[' '] = true; + // IMPORTANT NOTE: every time escape is invoked, all input needs to be escaped, + // i.e. escape("%01") should result in "%2501", not "%01". + // escape and unescape form a bijection, where neither + // of them is an idempotent operation. + ESCAPED_US_ASCII_CHARS['%'] = true; + // '#' and '/' are URL segment/fragment delimiters, need to be escaped in names. + ESCAPED_US_ASCII_CHARS['#'] = true; + ESCAPED_US_ASCII_CHARS['/'] = true; + // Escape '&' characters to avoid them being interpreted as SGML entities. + ESCAPED_US_ASCII_CHARS['&'] = true; + } + + private static int needsEscaping(String unicode) { int len = unicode.length(); + int escapeCount = 0; for (int i = 0; i < len; ++i) { - switch (unicode.charAt(i)) { - case (byte)'!': - case (byte)'*': - case (byte)'\'': - case (byte)'(': - case (byte)')': - case (byte)';': - case (byte)':': - case (byte)'@': - case (byte)'=': - case (byte)'+': - case (byte)'$': - case (byte)',': - case (byte)'?': - case (byte)'~': - case (byte)'[': - case (byte)']': - break; - case (byte)' ': - case (byte) '#': - case (byte) '%': - case (byte) '/': - case (byte)'&': - return true; + char ch = unicode.charAt(i); + if (ch < 128 && ESCAPED_US_ASCII_CHARS[ch]) + ++escapeCount; + } + return escapeCount; + } + + private static String encode(String unicode) { + int needsEscapes = needsEscaping(unicode); + if (needsEscapes == 0) + return unicode; + + int len = unicode.length(); + char result[] = new char[(len - needsEscapes) + needsEscapes * 3]; + int in = 0; + int out = 0; + while (in < len) { + char inCh = unicode.charAt(in++); + if (inCh >= 128 || !ESCAPED_US_ASCII_CHARS[inCh]) { + result[out++] = inCh; + } else { + // Only selected 7-bit US-ASCII characters are escaped + int c = inCh & 255; + result[out++] = '%'; + result[out++] = (char) hexEncode(c / 16); + result[out++] = (char) hexEncode(c % 16); } } - return false; + return new String(result, 0, out); } private static boolean needsUnescaping(String unicode) { @@ -427,13 +387,12 @@ public final class URIStringUtils { } /** - * Convert a URI, in US-ASCII, with escaped characters taken from UTF-8, to - * the corresponding Unicode string. On ill-formed input the results are - * undefined, specifically if the unescaped version is not a UTF-8 String, - * some String will be returned. + * Convert a URI, in UTF-16 with escaped characters taken from US-ASCII, to + * the corresponding unescaped Unicode string. On ill-formed input the results are + * undefined. * * @param uri the uri, in characters specified by RFC 2396 + '#'. - * @return the corresponding Unicode String. + * @return the corresponding unescaped Unicode String. * @exception IllegalArgumentException if a % hex sequence is ill-formed. */ public static String unescape(String uri) { @@ -441,26 +400,29 @@ public final class URIStringUtils { if (!needsUnescaping(uri)) return uri; - byte ascii[] = uri.getBytes("US-ASCII"); - byte utf8[] = new byte[ascii.length]; + int len = uri.length(); + String unicode = uri; + char result[] = new char[len]; int in = 0; int out = 0; - while ( in < ascii.length ) { - if (ascii[in] == (byte) '%') { - in++; - utf8[out++] = (byte) (hexDecode(ascii[in]) * 16 | hexDecode(ascii[in + 1])); + while (in < len) { + char inCh = unicode.charAt(in++); + if (inCh == '%') { + char d1 = unicode.charAt(in); + char d2 = unicode.charAt(in+1); + if (d1 > 127 || d2 > 127) + throw new IllegalArgumentException("Invalid hex digit escape sequence in " + uri + " at " + in); + result[out++] = (char) (hexDecode((byte) d1) * 16 | hexDecode((byte) d2)); in += 2; } else { - utf8[out++] = ascii[in++]; + result[out++] = inCh; } } - return new String(utf8, 0, out, "UTF-8"); + return new String(result, 0, out); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Problem while unescaping string: " + uri, e); - } catch (java.io.UnsupportedEncodingException e) { - throw new Error("The JVM is required to support UTF-8 and US-ASCII encodings."); - } catch (ArrayIndexOutOfBoundsException ee) { - throw new IllegalArgumentException("Incomplete Hex escape sequence in " + uri); + } catch (IndexOutOfBoundsException ee) { + throw new IllegalArgumentException("Incomplete hex digit escape sequence in " + uri); } } @@ -491,38 +453,36 @@ public final class URIStringUtils { * @param args */ public static void main(String[] args) { - String s; - s = "http://www.vtt.fi%2FSome- %25 Namespace/Jotain"; - System.out.println(String.format("escape+unescape: %s -> %s -> %s", s, escape(s), unescape(escape(s)))); - s = "http://www.vtt.fi%2FPSK"; - System.out.println(String.format("unescape: %s -> %s", s, unescape(s))); - s = "http://www.vtt.fi%2FSome-Namespace/Jotain / Muuta"; - System.out.println(String.format("escape: %s -> %s", s, escape(s))); - s = "Jotain / Muuta"; - System.out.println(String.format("escape: %s -> %s", s, escape(s))); - - System.out.println("escapeURI: " + escapeURI("foo/bar/org%2Fnet")); - System.out.println("escapeURI('...#...'): " + escapeURI("foo/bar#org%2Fnet")); - s = makeURI("http://foo.bar.com/foo/bar", "baz/guuk/org%2Fnet"); + String s = makeURI("http://foo.bar.com/foo/bar", "baz/guuk/org%2Fnet"); System.out.println("escapeURI: " + s); System.out.println("getNamespace: " + getNamespace(s)); System.out.println("getLocalName: " + getLocalName(s)); + System.out.println("escapeURI: " + escapeURI("foo/bar/org%2Fnet")); + System.out.println("escapeURI('...#...'): " + escapeURI("foo/bar#org%2Fnet")); + testEscape("/", "%2F"); testEscape("#", "%23"); testEscape("%", "%25"); testEscape("%01", "%2501"); testEscape("%GG", "%25GG"); + testEscape("säätö venttiili", "säätö%20venttiili"); + testEscape("säätö", "säätö"); + testEscape("Something / Else", "Something%20%2F%20Else"); + testEscape("http://www.vtt.fi%2FSome- %25 Namespace/Something", "http:%2F%2Fwww.vtt.fi%252FSome-%20%2525%20Namespace%2FSomething"); + testEscape("http://www.vtt.fi/PSK", "http:%2F%2Fwww.vtt.fi%2FPSK"); + testEscape("http://www.vtt.fi%2FSome-Namespace/Something / Else", "http:%2F%2Fwww.vtt.fi%252FSome-Namespace%2FSomething%20%2F%20Else"); } private static void testEscape(String unescaped, String expectedEscaped) { String esc = escape(unescaped); String unesc = unescape(esc); - System.out.format("escape('%s')='%s', unescape('%s')='%s'\n", unescaped, esc, esc, unesc); + System.out.format("escape('%s') -> '%s', unescape('%s') -> '%s'", unescaped, esc, esc, unesc); if (!esc.equals(expectedEscaped)) throw new AssertionError("escape('" + unescaped + "') was expected to return '" + expectedEscaped + "' but returned '" + esc + "'"); if (!unesc.equals(unescaped)) throw new AssertionError("unescape(escape('" + unescaped + "'))=unescape(" + esc + ") was expected to return '" + unescaped + "' but returned '" + unesc + "'"); + System.out.println(" OK"); } } diff --git a/bundles/org.simantics.db.common/META-INF/MANIFEST.MF b/bundles/org.simantics.db.common/META-INF/MANIFEST.MF index 89395e6ea..5ba6379d6 100644 --- a/bundles/org.simantics.db.common/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.db.common/META-INF/MANIFEST.MF @@ -14,7 +14,8 @@ Require-Bundle: org.simantics.db;bundle-version="1.1.0";visibility:=reexport, org.simantics.scl.runtime;bundle-version="0.1.4", org.simantics.user.ontology;bundle-version="1.0.0", org.simantics.layer0x.ontology;bundle-version="1.0.0", - org.simantics.issues.ontology;bundle-version="1.2.0" + org.simantics.issues.ontology;bundle-version="1.2.0", + org.slf4j.api Export-Package: org.simantics.db.common, org.simantics.db.common.adaption, org.simantics.db.common.auth, diff --git a/bundles/org.simantics.db.common/src/org/simantics/db/common/request/PropertyMapOfResource.java b/bundles/org.simantics.db.common/src/org/simantics/db/common/request/PropertyMapOfResource.java index 5a92ef62c..1d06cc33d 100644 --- a/bundles/org.simantics.db.common/src/org/simantics/db/common/request/PropertyMapOfResource.java +++ b/bundles/org.simantics.db.common/src/org/simantics/db/common/request/PropertyMapOfResource.java @@ -22,9 +22,13 @@ import org.simantics.db.ReadGraph; import org.simantics.db.Resource; import org.simantics.db.exception.DatabaseException; import org.simantics.layer0.Layer0; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class PropertyMapOfResource extends ResourceRead> { + private static final Logger LOGGER = LoggerFactory.getLogger(PropertyMapOfResource.class); + public PropertyMapOfResource(Resource resource) { super(resource); } @@ -41,7 +45,7 @@ public class PropertyMapOfResource extends ResourceRead> { if(name != null) { String escapedName = URIStringUtils.escape(name); if (result.put(escapedName, predicate) != null) - System.err.println(this + ": The database contains siblings with the same name " + name + " (resource=$" + resource.getResourceId() +")."); + LOGGER.error("The database contains siblings with the same name " + name + " (resource=$" + resource.getResourceId() +")."); } } } diff --git a/bundles/org.simantics.db.common/src/org/simantics/db/common/uri/EscapedChildMapOfResource.java b/bundles/org.simantics.db.common/src/org/simantics/db/common/uri/EscapedChildMapOfResource.java index 99b20ff73..1d92da9c8 100644 --- a/bundles/org.simantics.db.common/src/org/simantics/db/common/uri/EscapedChildMapOfResource.java +++ b/bundles/org.simantics.db.common/src/org/simantics/db/common/uri/EscapedChildMapOfResource.java @@ -20,12 +20,17 @@ import org.simantics.db.Resource; import org.simantics.db.common.ProcedureBarrier; import org.simantics.db.common.WriteBindings; import org.simantics.db.common.procedure.adapter.AsyncMultiProcedureAdapter; +import org.simantics.db.common.request.PropertyMapOfResource; import org.simantics.db.common.request.ResourceAsyncRead; import org.simantics.db.procedure.AsyncProcedure; import org.simantics.layer0.Layer0; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class EscapedChildMapOfResource extends ResourceAsyncRead> { + private static final Logger LOGGER = LoggerFactory.getLogger(EscapedChildMapOfResource.class); + public EscapedChildMapOfResource(Resource resource) { super(resource); } @@ -50,7 +55,7 @@ public class EscapedChildMapOfResource extends ResourceAsyncRead> { + private static final Logger LOGGER = LoggerFactory.getLogger(UnescapedChildMapOfResource.class); + public UnescapedChildMapOfResource(Resource resource) { super(resource); } @@ -40,10 +44,10 @@ public class UnescapedChildMapOfResource extends ResourceRead { - Object getValue(Node node); - void setValue(Node node, Object value); + Object getValue(Node node) throws NodeManagerException; + void setValue(Node node, Object value) throws NodeManagerException; String getName(Node node); Map getChildren(Node node); Map getProperties(Node node); diff --git a/bundles/org.simantics.db.layer0/src/org/simantics/db/layer0/adapter/impl/EntityInstances.java b/bundles/org.simantics.db.layer0/src/org/simantics/db/layer0/adapter/impl/EntityInstances.java index 428b96a5d..32a7d2a9f 100644 --- a/bundles/org.simantics.db.layer0/src/org/simantics/db/layer0/adapter/impl/EntityInstances.java +++ b/bundles/org.simantics.db.layer0/src/org/simantics/db/layer0/adapter/impl/EntityInstances.java @@ -174,8 +174,10 @@ public class EntityInstances implements Instances { public Collection find(ReadGraph graph, Resource index, String filter) throws DatabaseException { CollectionSupport coll = graph.getService(CollectionSupport.class); - List rec = findRec(graph, index, filter, new THashSet()); + THashSet visited = new THashSet<>(); + List rec = findRec(graph, index, filter, visited); for(Resource global : Layer0Utils.listGlobalOntologies(graph)) { + if(!visited.add(global)) continue; List rs = graph.syncRequest(new QueryIndex(global, type, filter), TransientCacheListener.>instance()); if(rec.isEmpty() && !rs.isEmpty()) { // TODO: rec could be an immutable empty list diff --git a/bundles/org.simantics.db.procore.ui/src/org/simantics/db/procore/ui/ProCoreUserAgent.java b/bundles/org.simantics.db.procore.ui/src/org/simantics/db/procore/ui/ProCoreUserAgent.java index 54b892070..9b0438042 100644 --- a/bundles/org.simantics.db.procore.ui/src/org/simantics/db/procore/ui/ProCoreUserAgent.java +++ b/bundles/org.simantics.db.procore.ui/src/org/simantics/db/procore/ui/ProCoreUserAgent.java @@ -8,38 +8,38 @@ import org.simantics.db.procore.ProCoreDriver; public final class ProCoreUserAgent implements DatabaseUserAgent { private static Shell getShell() { - Shell shell = null; - Display d = getDisplay(); - if (d == null) - return null; - shell = d.getActiveShell(); - if (null == shell) { - Shell[] shells = d.getShells(); - if (null != shells && shells.length > 0) - shell = shells[0]; - } - return shell; + Shell shell = null; + Display d = getDisplay(); + if (d == null) + return null; + shell = d.getActiveShell(); + if (null == shell) { + Shell[] shells = d.getShells(); + if (null != shells && shells.length > 0) + shell = shells[0]; + } + return shell; } private static Display getDisplay() { - Display d = Display.getCurrent(); - if (d == null) - d = Display.getDefault(); - return d; + Display d = Display.getCurrent(); + if (d == null) + d = Display.getDefault(); + return d; } - @Override - public boolean handleStart(InternalException exception) { - Shell shell = getShell(); - if (null == shell) - return false; // no can do - try { - return Auxiliary.handleStart(shell, exception); - } catch (InternalException e) { - return false; // no could do - } - } - - @Override - public String getId() { - return ProCoreDriver.ProCoreDriverName; - } -} \ No newline at end of file + @Override + public boolean handleStart(InternalException exception) { + Shell shell = getShell(); + if (null == shell) + return false; // no can do + try { + return Auxiliary.handleStart(shell, exception); + } catch (InternalException e) { + return false; // no could do + } + } + + @Override + public String getId() { + return ProCoreDriver.ProCoreDriverName; + } +} diff --git a/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterSetsSupportImpl.java b/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterSetsSupportImpl.java index 0f03141ea..c4ed5739c 100644 --- a/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterSetsSupportImpl.java +++ b/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterSetsSupportImpl.java @@ -70,14 +70,14 @@ public class ClusterSetsSupportImpl implements ClusterSetsSupport, Disposable { clusterSets.clear(); } - @Override - public void setReadDirectory(Path read) { - // TODO Auto-generated method stub - - } @Override public void updateWriteDirectory(Path write) { // Nothing to do here } + @Override + public void setReadDirectory(Path read) { + + } + } diff --git a/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterSetsSupportImpl2.java b/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterSetsSupportImpl2.java index db28af306..5da4e3761 100644 --- a/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterSetsSupportImpl2.java +++ b/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterSetsSupportImpl2.java @@ -77,7 +77,7 @@ public class ClusterSetsSupportImpl2 implements ClusterSetsSupport, Disposable { clusterSets.touch(); } } - + @Override public void setReadDirectory(Path read) { this.readDirectory = read; diff --git a/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/GraphSessionVirtual.java b/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/GraphSessionVirtual.java index 1dc1b015a..f6eba7eb8 100644 --- a/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/GraphSessionVirtual.java +++ b/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/GraphSessionVirtual.java @@ -214,4 +214,4 @@ class GraphSessionVirtual extends GraphSession { // Äsh! This extends relation id = virtualGraphServerSupport.createVirtual(); return id; } -} \ No newline at end of file +} diff --git a/bundles/org.simantics.db/src/org/simantics/db/service/ClusterSetsSupport.java b/bundles/org.simantics.db/src/org/simantics/db/service/ClusterSetsSupport.java index e79433870..d935a2b54 100644 --- a/bundles/org.simantics.db/src/org/simantics/db/service/ClusterSetsSupport.java +++ b/bundles/org.simantics.db/src/org/simantics/db/service/ClusterSetsSupport.java @@ -12,7 +12,7 @@ public interface ClusterSetsSupport { void put(long resourceId, long clusterId); void save() throws IOException; void clear(); - void updateWriteDirectory(Path write); void setReadDirectory(Path read); + void updateWriteDirectory(Path write); } diff --git a/bundles/org.simantics.diagram/META-INF/MANIFEST.MF b/bundles/org.simantics.diagram/META-INF/MANIFEST.MF index 6f879b7ed..1ff8d39e0 100644 --- a/bundles/org.simantics.diagram/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.diagram/META-INF/MANIFEST.MF @@ -44,7 +44,8 @@ Require-Bundle: org.simantics.utils.thread.swt, org.eclipse.e4.ui.model.workbench, org.eclipse.e4.ui.services, org.eclipse.e4.core.contexts, - org.eclipse.e4.ui.workbench + org.eclipse.e4.ui.workbench, + org.slf4j.api;bundle-version="1.7.20" Export-Package: org.simantics.diagram, org.simantics.diagram.adapter, org.simantics.diagram.connection, diff --git a/bundles/org.simantics.diagram/src/org/simantics/diagram/ui/WorkbenchSelectionProvider.java b/bundles/org.simantics.diagram/src/org/simantics/diagram/ui/WorkbenchSelectionProvider.java index aa5076278..5bd73f97a 100644 --- a/bundles/org.simantics.diagram/src/org/simantics/diagram/ui/WorkbenchSelectionProvider.java +++ b/bundles/org.simantics.diagram/src/org/simantics/diagram/ui/WorkbenchSelectionProvider.java @@ -23,7 +23,6 @@ import org.eclipse.jface.viewers.ISelectionChangedListener; import org.eclipse.jface.viewers.SelectionChangedEvent; import org.eclipse.jface.viewers.StructuredSelection; import org.eclipse.ui.IWorkbenchPartSite; -import org.simantics.Logger; import org.simantics.db.exception.DatabaseException; import org.simantics.diagram.elements.AdaptableImmutableProxyElement; import org.simantics.g2d.canvas.ICanvasContext; @@ -35,10 +34,12 @@ import org.simantics.g2d.diagram.participant.Selection; import org.simantics.g2d.element.ElementHints; import org.simantics.g2d.element.IElement; import org.simantics.ui.selection.WorkbenchSelectionUtils; -import org.simantics.utils.datastructures.hints.IHintObservable; import org.simantics.utils.datastructures.hints.IHintContext.Key; +import org.simantics.utils.datastructures.hints.IHintObservable; import org.simantics.utils.threads.IThreadWorkQueue; import org.simantics.utils.threads.ThreadUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A canvas participant that listens to the #0 mouse selection and provides it @@ -48,6 +49,8 @@ import org.simantics.utils.threads.ThreadUtils; */ public class WorkbenchSelectionProvider extends AbstractCanvasParticipant implements IPostSelectionProvider, ElementJSON { + private static final Logger LOGGER = LoggerFactory.getLogger(WorkbenchSelectionProvider.class); + private static final long POST_SELECTION_DELAY = 300; @Dependency protected Selection selection; @@ -216,7 +219,7 @@ public class WorkbenchSelectionProvider extends AbstractCanvasParticipant implem try { return Optional.ofNullable( WorkbenchSelectionUtils.getPossibleJSON(sel) ); } catch (DatabaseException e) { - Logger.defaultLogError(e); + LOGGER.error("Unexpected failure while constructing JSON from element " + element, e); return Optional.empty(); } } diff --git a/bundles/org.simantics.document.server/META-INF/MANIFEST.MF b/bundles/org.simantics.document.server/META-INF/MANIFEST.MF index a345498be..184361779 100644 --- a/bundles/org.simantics.document.server/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.document.server/META-INF/MANIFEST.MF @@ -22,7 +22,8 @@ Require-Bundle: org.simantics.document.base.ontology;bundle-version="1.1.0", org.simantics.modeling;bundle-version="1.1.1", org.simantics.document.server.io;visibility:=reexport, - org.simantics.scl.db;bundle-version="0.1.3" + org.simantics.scl.db;bundle-version="0.1.3", + org.slf4j.api Bundle-ActivationPolicy: lazy Bundle-Activator: org.simantics.document.server.Activator Export-Package: org.simantics.document.server, diff --git a/bundles/org.simantics.document.server/src/org/simantics/document/server/DocumentHistoryListener.java b/bundles/org.simantics.document.server/src/org/simantics/document/server/DocumentHistoryListener.java index ca4adef25..ef764bddc 100644 --- a/bundles/org.simantics.document.server/src/org/simantics/document/server/DocumentHistoryListener.java +++ b/bundles/org.simantics.document.server/src/org/simantics/document/server/DocumentHistoryListener.java @@ -2,10 +2,12 @@ package org.simantics.document.server; import java.util.List; -import org.simantics.Logger; import org.simantics.db.procedure.Listener; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class DocumentHistoryListener implements Listener> { + private static final Logger LOGGER = LoggerFactory.getLogger(DocumentHistoryListener.class); final private DocumentHistory history; @@ -32,7 +34,7 @@ public class DocumentHistoryListener implements Listener> { @Override public void exception(Throwable t) { - Logger.defaultLogError(t); + LOGGER.warn("DocumentHistoryListener received an exception.", t); } @Override diff --git a/bundles/org.simantics.document.server/src/org/simantics/document/server/request/NodeRequestUtils.java b/bundles/org.simantics.document.server/src/org/simantics/document/server/request/NodeRequestUtils.java index 26f88608f..2e67010eb 100644 --- a/bundles/org.simantics.document.server/src/org/simantics/document/server/request/NodeRequestUtils.java +++ b/bundles/org.simantics.document.server/src/org/simantics/document/server/request/NodeRequestUtils.java @@ -4,15 +4,17 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.io.UnsupportedEncodingException; -import org.simantics.Logger; import org.simantics.db.layer0.exception.MissingVariableException; import org.simantics.db.layer0.scl.SCLDatabaseException; import org.simantics.document.server.DocumentException; import org.simantics.scl.compiler.module.repository.ImportFailure; import org.simantics.scl.compiler.module.repository.ImportFailureException; import org.simantics.scl.compiler.top.NotFoundException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class NodeRequestUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(NodeRequestUtils.class); public static String formatErrorMessage(String name, Throwable t) { @@ -36,8 +38,8 @@ public class NodeRequestUtils { sb.append(" " + f.moduleName + "\n"); return sb.toString(); } else { - Logger.defaultLogError(t); - + LOGGER.error("Node request error:", t); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(baos); t.printStackTrace(ps); diff --git a/bundles/org.simantics.export.core/META-INF/MANIFEST.MF b/bundles/org.simantics.export.core/META-INF/MANIFEST.MF index eadcf7e08..88dfc79c7 100644 --- a/bundles/org.simantics.export.core/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.export.core/META-INF/MANIFEST.MF @@ -15,7 +15,8 @@ Require-Bundle: org.eclipse.core.runtime, org.simantics.utils.ui;bundle-version="1.1.0", org.simantics.graph.db;bundle-version="1.1.9", org.simantics;bundle-version="1.0.0", - com.lowagie.text;bundle-version="2.1.7";resolution:=optional + com.lowagie.text;bundle-version="2.1.7";resolution:=optional, + org.slf4j.api Bundle-RequiredExecutionEnvironment: JavaSE-1.8 Export-Package: org.simantics.export.core, org.simantics.export.core.error, diff --git a/bundles/org.simantics.export.core/src/org/simantics/export/core/pdf/ServiceBasedPdfExportPageEvent.java b/bundles/org.simantics.export.core/src/org/simantics/export/core/pdf/ServiceBasedPdfExportPageEvent.java index c513654df..65cb53d13 100644 --- a/bundles/org.simantics.export.core/src/org/simantics/export/core/pdf/ServiceBasedPdfExportPageEvent.java +++ b/bundles/org.simantics.export.core/src/org/simantics/export/core/pdf/ServiceBasedPdfExportPageEvent.java @@ -20,8 +20,9 @@ import java.util.function.Consumer; import org.osgi.framework.BundleContext; import org.osgi.framework.InvalidSyntaxException; import org.osgi.framework.ServiceReference; -import org.simantics.Logger; import org.simantics.export.core.internal.Activator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.lowagie.text.Document; import com.lowagie.text.Paragraph; @@ -34,6 +35,7 @@ import com.lowagie.text.pdf.PdfWriter; * @since 1.22.2 */ public class ServiceBasedPdfExportPageEvent extends PdfPageEventHelper { + private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBasedPdfExportPageEvent.class); Map events; @@ -64,7 +66,7 @@ public class ServiceBasedPdfExportPageEvent extends PdfPageEventHelper { try { r.accept(event); } catch (Exception e) { - Logger.defaultLogError("Failed to invoke PdfExportPageEvent::" + eventName + " for " + event.toString(), e); + LOGGER.error("Failed to invoke PdfExportPageEvent::" + eventName + " for " + event.toString(), e); } } diff --git a/bundles/org.simantics.fastlz/LICENSE b/bundles/org.simantics.fastlz/LICENSE index 299e8e19c..c7d105725 100644 --- a/bundles/org.simantics.fastlz/LICENSE +++ b/bundles/org.simantics.fastlz/LICENSE @@ -24,38 +24,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -LZ4 native/lz4{,hc}.{c,h} ---------------------------------------------------------------------- - LZ4 - Fast LZ compression algorithm - Copyright (C) 2011-2012, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html - - LZ4 source repository : http://code.google.com/p/lz4/ +THE SOFTWARE. \ No newline at end of file diff --git a/bundles/org.simantics.fastlz/README.txt b/bundles/org.simantics.fastlz/README.txt index 134b4e111..8f9e06cd3 100644 --- a/bundles/org.simantics.fastlz/README.txt +++ b/bundles/org.simantics.fastlz/README.txt @@ -1,12 +1,12 @@ The native/ -directory contains the sources for the -native parts of the FastLZ and LZ4 compressions algorithms. +native parts of the FastLZ compression algorithm. To compile in the native libraries follow these instructions: == Windows == -* Install MS Visual Studio 2010 with C++ support and Microsoft SDK 7.1 to get 64-bit compiler -* Open native/vs2010/fastlz.sln in visual studio +* Install MS Visual Studio 2012 with C++ support and Microsoft Platform SDK for Windows Server 2003 R2 to get 64-bit compiler +* Open native/vs2012/fastlz.sln in Visual Studio 2012 * Select ''Batch Build'' from the solution context menu, select Win32 + x64 Release and press ''Rebuild'' * The build will copy the resulting fastlz-windows-{x86,x86_64}.dll into src/ diff --git a/bundles/org.simantics.fastlz/native/Makefile b/bundles/org.simantics.fastlz/native/Makefile deleted file mode 100644 index 8a2696aef..000000000 --- a/bundles/org.simantics.fastlz/native/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -OS := $(shell uname) - -ifeq ($(OS),Linux) - OUTPUT32 = lz4demo32 - OUTPUT64 = lz4demo64 -else - OUTPUT32 = LZ4Demo32.exe - OUTPUT64 = LZ4Demo64.exe -endif - -all: lz4demo64 lz4demo32 - -lz4demo64: lz4.c lz4.h lz4hc.c lz4hc.h bench.c lz4demo.c - gcc -O3 -I. -std=c99 -Wall -W -Wundef -Wno-implicit-function-declaration lz4hc.c lz4.c bench.c lz4demo.c -o $(OUTPUT64) - -lz4demo32: lz4.c lz4.h lz4hc.c lz4hc.h bench.c lz4demo.c - gcc -m32 -Os -march=native -I. -std=c99 -Wall -W -Wundef -Wno-implicit-function-declaration lz4hc.c lz4.c bench.c lz4demo.c -o $(OUTPUT32) - -clean: - rm -f core *.o $(OUTPUT32) $(OUTPUT64) diff --git a/bundles/org.simantics.fastlz/native/compile-x64.bat b/bundles/org.simantics.fastlz/native/compile-x64.bat index c8628b88a..2eb226eb0 100644 --- a/bundles/org.simantics.fastlz/native/compile-x64.bat +++ b/bundles/org.simantics.fastlz/native/compile-x64.bat @@ -11,6 +11,6 @@ @rem *************************************************************************** @echo off -cl /O2 /Oi /GL /I "%JAVA_HOME%/include/win32" /I "%JAVA_HOME%/include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_USRDLL" /D "VC64_EXPORTS" /D "_UNICODE" /D "UNICODE" /D "_WINDLL" /EHsc /LD /Gy /GS- /W3 /nologo /c /Zi /TC /errorReport:prompt fastlz.c lz4.c lz4hc.c jniWrapper.c +cl /O2 /Oi /GL /I "%JAVA_HOME%/include/win32" /I "%JAVA_HOME%/include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_USRDLL" /D "VC64_EXPORTS" /D "_UNICODE" /D "UNICODE" /D "_WINDLL" /EHsc /LD /Gy /GS- /W3 /nologo /c /Zi /TC /errorReport:prompt fastlz.c jniWrapper.c -link /OUT:"..\src\fastlz-windows-x86_64.dll" /INCREMENTAL:NO /NOLOGO /DLL /MANIFEST /MANIFESTFILE:"..\fastlz-windows-x86_64.dll.intermediate.manifest" /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /PDB:"fastlz-windows-x86_64.pdb" /SUBSYSTEM:WINDOWS /OPT:REF /OPT:ICF /LTCG /DYNAMICBASE /NXCOMPAT /MACHINE:X64 /ERRORREPORT:PROMPT kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib fastlz.obj lz4.obj lz4hc.obj jniWrapper.obj +link /OUT:"..\src\fastlz-windows-x86_64.dll" /INCREMENTAL:NO /NOLOGO /DLL /MANIFEST /MANIFESTFILE:"..\fastlz-windows-x86_64.dll.intermediate.manifest" /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /PDB:"fastlz-windows-x86_64.pdb" /SUBSYSTEM:WINDOWS /OPT:REF /OPT:ICF /LTCG /DYNAMICBASE /NXCOMPAT /MACHINE:X64 /ERRORREPORT:PROMPT kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib fastlz.obj jniWrapper.obj diff --git a/bundles/org.simantics.fastlz/native/compile-x86.bat b/bundles/org.simantics.fastlz/native/compile-x86.bat index 0e9570786..0141073b7 100644 --- a/bundles/org.simantics.fastlz/native/compile-x86.bat +++ b/bundles/org.simantics.fastlz/native/compile-x86.bat @@ -11,6 +11,6 @@ @rem *************************************************************************** @echo off -cl /O2 /Oi /GL /I "%JAVA_HOME%/include/win32" /I "%JAVA_HOME%/include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_USRDLL" /D "_UNICODE" /D "UNICODE" /D "_WINDLL" /EHsc /LD /Gy /GS- /W3 /nologo /c /Zi /TC /errorReport:prompt fastlz.c lz4.c lz4hc.c jniWrapper.c +cl /O2 /Oi /GL /I "%JAVA_HOME%/include/win32" /I "%JAVA_HOME%/include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_USRDLL" /D "_UNICODE" /D "UNICODE" /D "_WINDLL" /EHsc /LD /Gy /GS- /W3 /nologo /c /Zi /TC /errorReport:prompt fastlz.c jniWrapper.c -link /OUT:"..\src\fastlz-windows-x86.dll" /INCREMENTAL:NO /NOLOGO /DLL /MANIFEST /MANIFESTFILE:"..\fastlz-windows-x86.dll.intermediate.manifest" /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /PDB:"fastlz-windows-x86.pdb" /SUBSYSTEM:WINDOWS /OPT:REF /OPT:ICF /LTCG /DYNAMICBASE /NXCOMPAT /MACHINE:X86 /ERRORREPORT:PROMPT kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib fastlz.obj lz4.obj lz4hc.obj jniWrapper.obj +link /OUT:"..\src\fastlz-windows-x86.dll" /INCREMENTAL:NO /NOLOGO /DLL /MANIFEST /MANIFESTFILE:"..\fastlz-windows-x86.dll.intermediate.manifest" /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /PDB:"fastlz-windows-x86.pdb" /SUBSYSTEM:WINDOWS /OPT:REF /OPT:ICF /LTCG /DYNAMICBASE /NXCOMPAT /MACHINE:X86 /ERRORREPORT:PROMPT kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib fastlz.obj jniWrapper.obj diff --git a/bundles/org.simantics.fastlz/native/compile.bat b/bundles/org.simantics.fastlz/native/compile.bat index 22968b87c..7904b5198 100644 --- a/bundles/org.simantics.fastlz/native/compile.bat +++ b/bundles/org.simantics.fastlz/native/compile.bat @@ -9,6 +9,6 @@ @rem Contributors: @rem VTT Technical Research Centre of Finland - initial API and implementation @rem *************************************************************************** -rem gcc -mno-cygwin "-I$jdk/include" "-I$jdk$jdk/include/win32" -Wl,--add-stdcall-alias -shared -o ../src/fastlz-windows-x86.dll fastlz.c lz4.c lz4hc.c jniWrapper.c -gcc -mno-cygwin "-I%JAVA_HOME%/include" "-I%JAVA_HOME%/include/win32" -Wl,--add-stdcall-alias -shared -o ../src/fastlz-windows-x86.dll fastlz.c lz4.c lz4hc.c jniWrapper.c +rem gcc -mno-cygwin "-I$jdk/include" "-I$jdk$jdk/include/win32" -Wl,--add-stdcall-alias -shared -o ../src/fastlz-windows-x86.dll fastlz.c jniWrapper.c +gcc -mno-cygwin "-I%JAVA_HOME%/include" "-I%JAVA_HOME%/include/win32" -Wl,--add-stdcall-alias -shared -o ../src/fastlz-windows-x86.dll fastlz.c jniWrapper.c gcc -o fastlz_test.exe fastlz.c fastlz_read.c fastlz_write.c fastlz_test.c diff --git a/bundles/org.simantics.fastlz/native/compile.sh b/bundles/org.simantics.fastlz/native/compile.sh index 05aac11f9..21899d5dd 100755 --- a/bundles/org.simantics.fastlz/native/compile.sh +++ b/bundles/org.simantics.fastlz/native/compile.sh @@ -54,7 +54,7 @@ output="../src/libfastlz-${kernel}-${arch}" case $kernel in darwin*) - output="${output}.jnilib" + output="${output}.dylib" ;; *) output="${output}.so" @@ -65,7 +65,7 @@ echo "Architecture: $arch" echo "Output library: $output" echo "Compiler options: $options" -gcc ${options} -o ${output} fastlz.c lz4.c lz4hc.c jniWrapper.c +gcc ${options} -o ${output} fastlz.c jniWrapper.c size=`ls -l $output | cut -d " " -f 5` echo "library size before stripping: $size" diff --git a/bundles/org.simantics.fastlz/native/jniWrapper.c b/bundles/org.simantics.fastlz/native/jniWrapper.c index 0c696e1f5..6f497627a 100644 --- a/bundles/org.simantics.fastlz/native/jniWrapper.c +++ b/bundles/org.simantics.fastlz/native/jniWrapper.c @@ -87,71 +87,3 @@ JNIEXPORT jint JNICALL Java_org_simantics_fastlz_FastLZ_decompressCluster(JNIEnv } -JNIEXPORT jint JNICALL Java_org_simantics_fastlz_LZ4_compress(JNIEnv* env, jclass clazz, - jobject input, jint inputOffset, jint length, - jobject output, jint outputOffset) { - void* inputAddress = (char*)(*env)->GetDirectBufferAddress(env, input) + inputOffset; - void* outputAddress = (char*)(*env)->GetDirectBufferAddress(env, output) + outputOffset; - return LZ4_compress(inputAddress, outputAddress, length); -} - -JNIEXPORT jint JNICALL Java_org_simantics_fastlz_LZ4_decompress(JNIEnv* env, jclass clazz, - jobject input, jint inputOffset, jint length, - jobject output, jint outputOffset, jint maxout) { - void* inputAddress = (char*)(*env)->GetDirectBufferAddress(env, input) + inputOffset; - void* outputAddress = (char*)(*env)->GetDirectBufferAddress(env, output) + outputOffset; - return LZ4_uncompress_unknownOutputSize(inputAddress, outputAddress, length, maxout); -} - -JNIEXPORT jint JNICALL Java_org_simantics_fastlz_LZ4_decompressCluster(JNIEnv* env, jclass clazz, jobject deflated, jint deflatedSize, jint inflatedSize, jobjectArray arrays) { - - static char *inflateBuffer = 0; - static int inflateBufferSize = 0; - - int ll, il, bl; - - jlongArray longs; - jintArray ints; - jbyteArray bytes; - - char *input = (char*)(*env)->GetDirectBufferAddress(env, deflated); - char *address; - - if(inflateBufferSize < inflatedSize) { - if(!inflateBuffer) { - if(inflatedSize < INITIAL_SIZE) inflatedSize = INITIAL_SIZE; - inflateBuffer = malloc(inflatedSize); - inflateBufferSize = inflatedSize; - } else { - if(inflateBuffer) free(inflateBuffer); - inflateBuffer = malloc(inflatedSize); - inflateBufferSize = inflatedSize; - } - } - - address = inflateBuffer; - - LZ4_uncompress_unknownOutputSize(input, inflateBuffer, deflatedSize, inflateBufferSize); - - ll = *(int *)address; - longs = (*env)->NewLongArray(env, ll); - (*env)->SetLongArrayRegion(env, longs, 0, ll, (const jlong *)(address + 4)); - (*env)->SetObjectArrayElement(env, arrays, 0, longs); - - address += 4 + 8 * ll; - - il = *(int *)address; - ints = (*env)->NewIntArray(env, il); - (*env)->SetIntArrayRegion(env, ints, 0, il, (const jint *)(address + 4)); - (*env)->SetObjectArrayElement(env, arrays, 1, ints); - - address += 4 * il + 4; - - bl = *(int *)address; - bytes = (*env)->NewByteArray(env, bl); - (*env)->SetByteArrayRegion(env, bytes, 0, bl, (const jbyte *)(address + 4)); - (*env)->SetObjectArrayElement(env, arrays, 2, bytes); - - return 0; - -} diff --git a/bundles/org.simantics.fastlz/native/lz4.c b/bundles/org.simantics.fastlz/native/lz4.c deleted file mode 100644 index 06e282970..000000000 --- a/bundles/org.simantics.fastlz/native/lz4.c +++ /dev/null @@ -1,819 +0,0 @@ -/* - LZ4 - Fast LZ compression algorithm - Copyright (C) 2011-2012, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html - - LZ4 source repository : http://code.google.com/p/lz4/ -*/ - -//************************************** -// Tuning parameters -//************************************** -// COMPRESSIONLEVEL : -// Increasing this value improves compression ratio -// Lowering this value reduces memory usage -// Reduced memory usage typically improves speed, due to cache effect (ex : L1 32KB for Intel, L1 64KB for AMD) -// Memory usage formula : N->2^(N+2) Bytes (examples : 12 -> 16KB ; 17 -> 512KB) -#define COMPRESSIONLEVEL 12 - -// NOTCOMPRESSIBLE_CONFIRMATION : -// Decreasing this value will make the algorithm skip faster data segments considered "incompressible" -// This may decrease compression ratio dramatically, but will be faster on incompressible data -// Increasing this value will make the algorithm search more before declaring a segment "incompressible" -// This could improve compression a bit, but will be slower on incompressible data -// The default value (6) is recommended -#define NOTCOMPRESSIBLE_CONFIRMATION 6 - -// LZ4_COMPRESSMIN : -// Compression function will *fail* if it is not successful at compressing input by at least LZ4_COMPRESSMIN bytes -// Since the compression function stops working prematurely, it results in a speed gain -// The output however is unusable. Compression function result will be zero. -// Default : 0 = disabled -#define LZ4_COMPRESSMIN 0 - -// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE : -// This will provide a boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU. -// You can set this option to 1 in situations where data will stay within closed environment -// This option is useless on Little_Endian CPU (such as x86) -//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 - - - -//************************************** -// CPU Feature Detection -//************************************** -// 32 or 64 bits ? -#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode -# define LZ4_ARCH64 1 -#else -# define LZ4_ARCH64 0 -#endif - -// Little Endian or Big Endian ? -// Note : overwrite the below #define if you know your architecture endianess -#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) ) -# define LZ4_BIG_ENDIAN 1 -#else -// Little Endian assumed. PDP Endian and other very rare endian format are unsupported. -#endif - -// Unaligned memory access is automatically enabled for "common" CPU, such as x86. -// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected -// If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance -#if defined(__ARM_FEATURE_UNALIGNED) -# define LZ4_FORCE_UNALIGNED_ACCESS 1 -#endif - -// Define this parameter if your target system or compiler does not support hardware bit count -#if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count -# define LZ4_FORCE_SW_BITCOUNT -#endif - - -//************************************** -// Compiler Options -//************************************** -#if __STDC_VERSION__ >= 199901L // C99 -/* "restrict" is a known keyword */ -#else -# define restrict // Disable restrict -#endif - -#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) - -#ifdef _MSC_VER // Visual Studio -# define inline __forceinline // Visual is not C99, but supports some kind of inline -# if LZ4_ARCH64 // 64-bit -# pragma intrinsic(_BitScanForward64) // For Visual 2005 -# pragma intrinsic(_BitScanReverse64) // For Visual 2005 -# else -# pragma intrinsic(_BitScanForward) // For Visual 2005 -# pragma intrinsic(_BitScanReverse) // For Visual 2005 -# endif -#endif - -#ifdef _MSC_VER -# define lz4_bswap16(x) _byteswap_ushort(x) -#else -# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))) -#endif - -#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__) -# define expect(expr,value) (__builtin_expect ((expr),(value)) ) -#else -# define expect(expr,value) (expr) -#endif - -#define likely(expr) expect((expr) != 0, 1) -#define unlikely(expr) expect((expr) != 0, 0) - - -//************************************** -// Includes -//************************************** -#include // for malloc -#include // for memset -#include "lz4.h" - - -//************************************** -// Basic Types -//************************************** -#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively -# define BYTE unsigned __int8 -# define U16 unsigned __int16 -# define U32 unsigned __int32 -# define S32 __int32 -# define U64 unsigned __int64 -#else -# include -# define BYTE uint8_t -# define U16 uint16_t -# define U32 uint32_t -# define S32 int32_t -# define U64 uint64_t -#endif - -#ifndef LZ4_FORCE_UNALIGNED_ACCESS -# pragma pack(push, 1) -#endif - -typedef struct _U16_S { U16 v; } U16_S; -typedef struct _U32_S { U32 v; } U32_S; -typedef struct _U64_S { U64 v; } U64_S; - -#ifndef LZ4_FORCE_UNALIGNED_ACCESS -# pragma pack(pop) -#endif - -#define A64(x) (((U64_S *)(x))->v) -#define A32(x) (((U32_S *)(x))->v) -#define A16(x) (((U16_S *)(x))->v) - - -//************************************** -// Constants -//************************************** -#define MINMATCH 4 - -#define HASH_LOG COMPRESSIONLEVEL -#define HASHTABLESIZE (1 << HASH_LOG) -#define HASH_MASK (HASHTABLESIZE - 1) - -#define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION>2?NOTCOMPRESSIBLE_CONFIRMATION:2) -#define STACKLIMIT 13 -#define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()). -#define COPYLENGTH 8 -#define LASTLITERALS 5 -#define MFLIMIT (COPYLENGTH+MINMATCH) -#define MINLENGTH (MFLIMIT+1) - -#define MAXD_LOG 16 -#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) - -#define ML_BITS 4 -#define ML_MASK ((1U<> ((MINMATCH*8)-HASH_LOG)) -#define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p)) -#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d>3); - #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_clzll(val) >> 3); - #else - int r; - if (!(val>>32)) { r=4; } else { r=0; val>>=32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; - #endif -#else - #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanForward64( &r, val ); - return (int)(r>>3); - #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_ctzll(val) >> 3); - #else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58]; - #endif -#endif -} - -#else - -inline static int LZ4_NbCommonBytes (register U32 val) -{ -#if defined(LZ4_BIG_ENDIAN) - #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanReverse( &r, val ); - return (int)(r>>3); - #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_clz(val) >> 3); - #else - int r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; - #endif -#else - #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanForward( &r, val ); - return (int)(r>>3); - #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_ctz(val) >> 3); - #else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; - #endif -#endif -} - -#endif - - -//**************************** -// Public functions -//**************************** - -int LZ4_compressBound(int isize) -{ - return (isize + (isize/255) + 16); -} - - - -//****************************** -// Compression functions -//****************************** - -int LZ4_compressCtx(void** ctx, - const char* source, - char* dest, - int isize) -{ -#if HEAPMODE - struct refTables *srt = (struct refTables *) (*ctx); - HTYPE* HashTable; -#else - HTYPE HashTable[HASHTABLESIZE] = {0}; -#endif - - const BYTE* ip = (BYTE*) source; - INITBASE(base); - const BYTE* anchor = ip; - const BYTE* const iend = ip + isize; - const BYTE* const mflimit = iend - MFLIMIT; -#define matchlimit (iend - LASTLITERALS) - - BYTE* op = (BYTE*) dest; - - int len, length; - const int skipStrength = SKIPSTRENGTH; - U32 forwardH; - - - // Init - if (isizehashTable); - memset((void*)HashTable, 0, sizeof(srt->hashTable)); -#else - (void) ctx; -#endif - - - // First Byte - HashTable[LZ4_HASH_VALUE(ip)] = ip - base; - ip++; forwardH = LZ4_HASH_VALUE(ip); - - // Main Loop - for ( ; ; ) - { - int findMatchAttempts = (1U << skipStrength) + 3; - const BYTE* forwardIp = ip; - const BYTE* ref; - BYTE* token; - - // Find a match - do { - U32 h = forwardH; - int step = findMatchAttempts++ >> skipStrength; - ip = forwardIp; - forwardIp = ip + step; - - if unlikely(forwardIp > mflimit) { goto _last_literals; } - - forwardH = LZ4_HASH_VALUE(forwardIp); - ref = base + HashTable[h]; - HashTable[h] = ip - base; - - } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); - - // Catch up - while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; } - - // Encode Literal length - length = ip - anchor; - token = op++; - if (length>=(int)RUN_MASK) { *token=(RUN_MASK< 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } - else *token = (length<=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } - else *token += len; - - // Test end of chunk - if (ip > mflimit) { anchor = ip; break; } - - // Fill table - HashTable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base; - - // Test next position - ref = base + HashTable[LZ4_HASH_VALUE(ip)]; - HashTable[LZ4_HASH_VALUE(ip)] = ip - base; - if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; } - - // Prepare next loop - anchor = ip++; - forwardH = LZ4_HASH_VALUE(ip); - } - -_last_literals: - // Encode Last Literals - { - int lastRun = iend - anchor; - if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0; - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (lastRun<> ((MINMATCH*8)-HASHLOG64K)) -#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p)) -int LZ4_compress64kCtx(void** ctx, - const char* source, - char* dest, - int isize) -{ -#if HEAPMODE - struct refTables *srt = (struct refTables *) (*ctx); - U16* HashTable; -#else - U16 HashTable[HASH64KTABLESIZE] = {0}; -#endif - - const BYTE* ip = (BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const base = ip; - const BYTE* const iend = ip + isize; - const BYTE* const mflimit = iend - MFLIMIT; -#define matchlimit (iend - LASTLITERALS) - - BYTE* op = (BYTE*) dest; - - int len, length; - const int skipStrength = SKIPSTRENGTH; - U32 forwardH; - - - // Init - if (isizehashTable); - memset((void*)HashTable, 0, sizeof(srt->hashTable)); -#else - (void) ctx; -#endif - - - // First Byte - ip++; forwardH = LZ4_HASH64K_VALUE(ip); - - // Main Loop - for ( ; ; ) - { - int findMatchAttempts = (1U << skipStrength) + 3; - const BYTE* forwardIp = ip; - const BYTE* ref; - BYTE* token; - - // Find a match - do { - U32 h = forwardH; - int step = findMatchAttempts++ >> skipStrength; - ip = forwardIp; - forwardIp = ip + step; - - if (forwardIp > mflimit) { goto _last_literals; } - - forwardH = LZ4_HASH64K_VALUE(forwardIp); - ref = base + HashTable[h]; - HashTable[h] = ip - base; - - } while (A32(ref) != A32(ip)); - - // Catch up - while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; } - - // Encode Literal length - length = ip - anchor; - token = op++; - if (length>=(int)RUN_MASK) { *token=(RUN_MASK< 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } - else *token = (length<=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } - else *token += len; - - // Test end of chunk - if (ip > mflimit) { anchor = ip; break; } - - // Fill table - HashTable[LZ4_HASH64K_VALUE(ip-2)] = ip - 2 - base; - - // Test next position - ref = base + HashTable[LZ4_HASH64K_VALUE(ip)]; - HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base; - if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; } - - // Prepare next loop - anchor = ip++; - forwardH = LZ4_HASH64K_VALUE(ip); - } - -_last_literals: - // Encode Last Literals - { - int lastRun = iend - anchor; - if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0; - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (lastRun<>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; } - - // copy literals - cpy = op+length; - if unlikely(cpy>oend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer - memcpy(op, ip, length); - ip += length; - break; // Necessarily EOF - } - LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy; - - // get offset - LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2; - if (ref < (BYTE* const)dest) goto _output_error; // Error : offset create reference outside destination buffer - - // get matchlength - if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; } - - // copy repeated sequence - if unlikely(op-refoend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer - LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH)); - while(op>ML_BITS)) == RUN_MASK) { int s=255; while ((ipoend-COPYLENGTH) || (ip+length>iend-COPYLENGTH)) - { - if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer - if (ip+length > iend) goto _output_error; // Error : request to read beyond source buffer - memcpy(op, ip, length); - op += length; - ip += length; - if (ipoend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; // Error : request to write outside of destination buffer - LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH)); - while(op= 255 -- 10 : (=280 - 15 - 255) ) remaining length to reach 280 - -Example 3 : A length of 15 will be represented as : -- 15 : value for the 4-bits High field -- 0 : (=15-15) yes, the zero must be output - -Following the token and optional length bytes, are the literals themselves. -They are exactly as numerous as previously decoded (length of literals). -It's possible that there are zero literal. - - -Following the literals is the match copy operation. - -It starts by the offset. -This is a 2 bytes value, in little endian format : -the lower byte is the first one in the stream. - -The offset represents the position of the match to be copied from. -1 means "current position - 1 byte". -The maximum offset value is 65535, 65536 cannot be coded. -Note that 0 is an invalid value, not used. - -Then we need to extract the match length. -For this, we use the second token field, the low 4-bits. -Value, obviously, ranges from 0 to 15. -However here, 0 means that the copy operation will be minimal. -The minimum length of a match, called minmatch, is 4. -As a consequence, a 0 value means 4 bytes, and a value of 15 means 19+ bytes. -Similar to literal length, on reaching the highest possible value (15), -we output additional bytes, one at a time, with values ranging from 0 to 255. -They are added to total to provide the final match length. -A 255 value means there is another byte to read and add. -There is no limit to the number of optional bytes that can be output this way. -(This points towards a maximum achievable compression ratio of ~250). - -With the offset and the matchlength, -the decoder can now proceed to copy the data from the already decoded buffer. -On decoding the matchlength, we reach the end of the compressed sequence, -and therefore start another one. - - --- Parsing restrictions -- - -There are specific parsing rules to respect in order to remain compatible -with assumptions made by the decoder : -1) The last 5 bytes are always literals -2) The last match must start at least 12 bytes before end of stream -Consequently, a file with less than 13 bytes cannot be compressed. -These rules are in place to ensure that the decoder -will never read beyond the input buffer, nor write beyond the output buffer. - -Note that the last sequence is also incomplete, -and stops right after literals. - - --- Additional notes -- - -There is no assumption nor limits to the way the compressor -searches and selects matches within the source stream. -It could be a fast scan, a multi-probe, a full search using BST, -standard hash chains or MMC, well whatever. - -Advanced parsing strategies can also be implemented, such as lazy match, -or full optimal parsing. - -All these trade-off offer distinctive speed/memory/compression advantages. -Whatever the method used by the compressor, its result will be decodable -by any LZ4 decoder if it follows the format specification described above. - diff --git a/bundles/org.simantics.fastlz/native/lz4hc.c b/bundles/org.simantics.fastlz/native/lz4hc.c deleted file mode 100644 index cca755c26..000000000 --- a/bundles/org.simantics.fastlz/native/lz4hc.c +++ /dev/null @@ -1,663 +0,0 @@ -/* - LZ4 HC - High Compression Mode of LZ4 - Copyright (C) 2011-2012, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html - - LZ4 source repository : http://code.google.com/p/lz4/ -*/ - - -//************************************** -// CPU Feature Detection -//************************************** -// 32 or 64 bits ? -#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode -#define LZ4_ARCH64 1 -#else -#define LZ4_ARCH64 0 -#endif - -// Little Endian or Big Endian ? -#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) ) -#define LZ4_BIG_ENDIAN 1 -#else -// Little Endian assumed. PDP Endian and other very rare endian format are unsupported. -#endif - -// Unaligned memory access is automatically enabled for "common" CPU, such as x86. -// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected -// If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance -#if defined(__ARM_FEATURE_UNALIGNED) -#define LZ4_FORCE_UNALIGNED_ACCESS 1 -#endif - - -//************************************** -// Compiler Options -//************************************** -#if __STDC_VERSION__ >= 199901L // C99 - /* "restrict" is a known keyword */ -#else -#define restrict // Disable restrict -#endif - -#ifdef _MSC_VER -#define inline __forceinline // Visual is not C99, but supports some kind of inline -#endif - -#ifdef _MSC_VER // Visual Studio -#define bswap16(x) _byteswap_ushort(x) -#else -#define bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))) -#endif - - -//************************************** -// Includes -//************************************** -#include // calloc, free -#include // memset, memcpy -#include "lz4hc.h" - -#define ALLOCATOR(s) calloc(1,s) -#define FREEMEM free -#define MEM_INIT memset - - -//************************************** -// Basic Types -//************************************** -#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively -#define BYTE unsigned __int8 -#define U16 unsigned __int16 -#define U32 unsigned __int32 -#define S32 __int32 -#define U64 unsigned __int64 -#else -#include -#define BYTE uint8_t -#define U16 uint16_t -#define U32 uint32_t -#define S32 int32_t -#define U64 uint64_t -#endif - -#ifndef LZ4_FORCE_UNALIGNED_ACCESS -#pragma pack(push, 1) -#endif - -typedef struct _U16_S { U16 v; } U16_S; -typedef struct _U32_S { U32 v; } U32_S; -typedef struct _U64_S { U64 v; } U64_S; - -#ifndef LZ4_FORCE_UNALIGNED_ACCESS -#pragma pack(pop) -#endif - -#define A64(x) (((U64_S *)(x))->v) -#define A32(x) (((U32_S *)(x))->v) -#define A16(x) (((U16_S *)(x))->v) - - -//************************************** -// Constants -//************************************** -#define MINMATCH 4 - -#define DICTIONARY_LOGSIZE 16 -#define MAXD (1<> ((MINMATCH*8)-HASH_LOG)) -#define HASH_VALUE(p) HASH_FUNCTION(*(U32*)(p)) -#define HASH_POINTER(p) (HashTable[HASH_VALUE(p)] + base) -#define DELTANEXT(p) chainTable[(size_t)(p) & MAXD_MASK] -#define GETNEXT(p) ((p) - (size_t)DELTANEXT(p)) -#define ADD_HASH(p) { size_t delta = (p) - HASH_POINTER(p); if (delta>MAX_DISTANCE) delta = MAX_DISTANCE; DELTANEXT(p) = (U16)delta; HashTable[HASH_VALUE(p)] = (p) - base; } - - -//************************************** -// Private functions -//************************************** -#if LZ4_ARCH64 - -inline static int LZ4_NbCommonBytes (register U64 val) -{ -#if defined(LZ4_BIG_ENDIAN) - #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanReverse64( &r, val ); - return (int)(r>>3); - #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_clzll(val) >> 3); - #else - int r; - if (!(val>>32)) { r=4; } else { r=0; val>>=32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; - #endif -#else - #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanForward64( &r, val ); - return (int)(r>>3); - #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_ctzll(val) >> 3); - #else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58]; - #endif -#endif -} - -#else - -inline static int LZ4_NbCommonBytes (register U32 val) -{ -#if defined(LZ4_BIG_ENDIAN) - #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanReverse( &r, val ); - return (int)(r>>3); - #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_clz(val) >> 3); - #else - int r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; - #endif -#else - #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanForward( &r, val ); - return (int)(r>>3); - #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_ctz(val) >> 3); - #else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; - #endif -#endif -} - -#endif - - -inline static int LZ4HC_Init (LZ4HC_Data_Structure* hc4, const BYTE* base) -{ - MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable)); - MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); - hc4->nextToUpdate = base + LZ4_ARCH64; - hc4->base = base; - return 1; -} - - -inline static void* LZ4HC_Create (const BYTE* base) -{ - void* hc4 = ALLOCATOR(sizeof(LZ4HC_Data_Structure)); - - LZ4HC_Init (hc4, base); - return hc4; -} - - -inline static int LZ4HC_Free (void** LZ4HC_Data) -{ - FREEMEM(*LZ4HC_Data); - *LZ4HC_Data = NULL; - return (1); -} - - -inline static void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const BYTE* ip) -{ - U16* chainTable = hc4->chainTable; - HTYPE* HashTable = hc4->hashTable; - INITBASE(base,hc4->base); - - while(hc4->nextToUpdate < ip) - { - ADD_HASH(hc4->nextToUpdate); - hc4->nextToUpdate++; - } -} - - -inline static int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* const matchlimit, const BYTE** matchpos) -{ - U16* const chainTable = hc4->chainTable; - HTYPE* const HashTable = hc4->hashTable; - const BYTE* ref; - INITBASE(base,hc4->base); - int nbAttempts=MAX_NB_ATTEMPTS; - int ml=0; - - // HC4 match finder - LZ4HC_Insert(hc4, ip); - ref = HASH_POINTER(ip); - while ((ref > (ip-MAX_DISTANCE)) && (nbAttempts)) - { - nbAttempts--; - if (*(ref+ml) == *(ip+ml)) - if (*(U32*)ref == *(U32*)ip) - { - const BYTE* reft = ref+MINMATCH; - const BYTE* ipt = ip+MINMATCH; - - while (ipt ml) { ml = ipt-ip; *matchpos = ref; } - } - ref = GETNEXT(ref); - } - - return ml; -} - - -inline static int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* startLimit, const BYTE* matchlimit, int longest, const BYTE** matchpos, const BYTE** startpos) -{ - U16* const chainTable = hc4->chainTable; - HTYPE* const HashTable = hc4->hashTable; - INITBASE(base,hc4->base); - const BYTE* ref; - int nbAttempts = MAX_NB_ATTEMPTS; - int delta = ip-startLimit; - - // First Match - LZ4HC_Insert(hc4, ip); - ref = HASH_POINTER(ip); - - while ((ref > ip-MAX_DISTANCE) && (ref >= hc4->base) && (nbAttempts)) - { - nbAttempts--; - if (*(startLimit + longest) == *(ref - delta + longest)) - if (*(U32*)ref == *(U32*)ip) - { - const BYTE* reft = ref+MINMATCH; - const BYTE* ipt = ip+MINMATCH; - const BYTE* startt = ip; - - while (iptstartLimit) && (reft > hc4->base) && (startt[-1] == reft[-1])) {startt--; reft--;} - - if ((ipt-startt) > longest) - { - longest = ipt-startt; - *matchpos = reft; - *startpos = startt; - } - } - ref = GETNEXT(ref); - } - - return longest; -} - - -inline static int LZ4_encodeSequence(const BYTE** ip, BYTE** op, const BYTE** anchor, int ml, const BYTE* ref) -{ - int length, len; - BYTE* token; - - // Encode Literal length - length = *ip - *anchor; - token = (*op)++; - if (length>=(int)RUN_MASK) { *token=(RUN_MASK< 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; } - else *token = (length<=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *(*op)++ = 255; *(*op)++ = 255; } if (len > 254) { len-=255; *(*op)++ = 255; } *(*op)++ = (BYTE)len; } - else *token += len; - - // Prepare next loop - *ip += ml; - *anchor = *ip; - - return 0; -} - - -//**************************** -// Compression CODE -//**************************** - -int LZ4_compressHCCtx(LZ4HC_Data_Structure* ctx, - const char* source, - char* dest, - int isize) -{ - const BYTE* ip = (const BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const iend = ip + isize; - const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = (iend - LASTLITERALS); - - BYTE* op = (BYTE*) dest; - - int ml, ml2, ml3, ml0; - const BYTE* ref=NULL; - const BYTE* start2=NULL; - const BYTE* ref2=NULL; - const BYTE* start3=NULL; - const BYTE* ref3=NULL; - const BYTE* start0; - const BYTE* ref0; - - ip++; - - // Main Loop - while (ip < mflimit) - { - ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref)); - if (!ml) { ip++; continue; } - - // saved, in case we would skip too much - start0 = ip; - ref0 = ref; - ml0 = ml; - -_Search2: - if (ip+ml < mflimit) - ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2); - else ml2=ml; - - if (ml2 == ml) // No better match - { - LZ4_encodeSequence(&ip, &op, &anchor, ml, ref); - continue; - } - - if (start0 < ip) - { - if (start2 < ip + ml0) // empirical - { - ip = start0; - ref = ref0; - ml = ml0; - } - } - - // Here, start0==ip - if ((start2 - ip) < 3) // First Match too small : removed - { - ml = ml2; - ip = start2; - ref =ref2; - goto _Search2; - } - -_Search3: - // Currently we have : - // ml2 > ml1, and - // ip1+3 <= ip2 (usually < ip1+ml1) - if ((start2 - ip) < OPTIMAL_ML) - { - int correction; - int new_ml = ml; - if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; - if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = start2 - ip + ml2 - MINMATCH; - correction = new_ml - (start2 - ip); - if (correction > 0) - { - start2 += correction; - ref2 += correction; - ml2 -= correction; - } - } - // Now, we have start2 = ip+new_ml, with new_ml=min(ml, OPTIMAL_ML=18) - - if (start2 + ml2 < mflimit) - ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3); - else ml3=ml2; - - if (ml3 == ml2) // No better match : 2 sequences to encode - { - // ip & ref are known; Now for ml - if (start2 < ip+ml) - { - if ((start2 - ip) < OPTIMAL_ML) - { - int correction; - if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; - if (ip+ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH; - correction = ml - (start2 - ip); - if (correction > 0) - { - start2 += correction; - ref2 += correction; - ml2 -= correction; - } - } - else - { - ml = start2 - ip; - } - } - // Now, encode 2 sequences - LZ4_encodeSequence(&ip, &op, &anchor, ml, ref); - ip = start2; - LZ4_encodeSequence(&ip, &op, &anchor, ml2, ref2); - continue; - } - - if (start3 < ip+ml+3) // Not enough space for match 2 : remove it - { - if (start3 >= (ip+ml)) // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 - { - if (start2 < ip+ml) - { - int correction = (ip+ml) - start2; - start2 += correction; - ref2 += correction; - ml2 -= correction; - if (ml2 < MINMATCH) - { - start2 = start3; - ref2 = ref3; - ml2 = ml3; - } - } - - LZ4_encodeSequence(&ip, &op, &anchor, ml, ref); - ip = start3; - ref = ref3; - ml = ml3; - - start0 = start2; - ref0 = ref2; - ml0 = ml2; - goto _Search2; - } - - start2 = start3; - ref2 = ref3; - ml2 = ml3; - goto _Search3; - } - - // OK, now we have 3 ascending matches; let's write at least the first one - // ip & ref are known; Now for ml - if (start2 < ip+ml) - { - if ((start2 - ip) < (int)ML_MASK) - { - int correction; - if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; - if (ip + ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH; - correction = ml - (start2 - ip); - if (correction > 0) - { - start2 += correction; - ref2 += correction; - ml2 -= correction; - } - } - else - { - ml = start2 - ip; - } - } - LZ4_encodeSequence(&ip, &op, &anchor, ml, ref); - - ip = start2; - ref = ref2; - ml = ml2; - - start2 = start3; - ref2 = ref3; - ml2 = ml3; - - goto _Search3; - - } - - // Encode Last Literals - { - int lastRun = iend - anchor; - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (lastRun< + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + {2C249AD2-A0AE-4A88-8DCD-71F96133690E} + fastlz + Win32Proj + + + + DynamicLibrary + Unicode + true + v110 + + + DynamicLibrary + Unicode + v110 + + + DynamicLibrary + Unicode + true + v110 + + + DynamicLibrary + Unicode + v110 + + + + + + + + + + + + + + + + + + + <_ProjectFileVersion>10.0.30319.1 + $(SolutionDir)$(Configuration)\ + $(Configuration)\ + true + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + true + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + + Disabled + $(JAVA_HOME)\include;$(JAVA_HOME)\include\win32;%(AdditionalIncludeDirectories) + WIN32;_DEBUG;_WINDOWS;_USRDLL;FASTLZ_EXPORTS;%(PreprocessorDefinitions) + true + EnableFastChecks + MultiThreadedDebug + + + Level3 + EditAndContinue + + + true + Windows + MachineX86 + + + copy "$(TargetPath)" "$(ProjectDir)..\..\src\fastlz-windows-x86.dll" + + + + + X64 + + + Disabled + $(JAVA_HOME)\include;$(JAVA_HOME)\include\win32;%(AdditionalIncludeDirectories) + WIN32;_DEBUG;_WINDOWS;_USRDLL;FASTLZ_EXPORTS;%(PreprocessorDefinitions) + true + EnableFastChecks + MultiThreadedDebug + + + Level3 + ProgramDatabase + + + true + Windows + MachineX64 + + + copy "$(TargetPath)" "$(ProjectDir)..\..\src\fastlz-windows-x86_64.dll" + + + + + MaxSpeed + true + $(JAVA_HOME)\include;$(JAVA_HOME)\include\win32;%(AdditionalIncludeDirectories) + WIN32;NDEBUG;_WINDOWS;_USRDLL;FASTLZ_EXPORTS;%(PreprocessorDefinitions) + MultiThreaded + true + + + Level3 + ProgramDatabase + + + true + Windows + true + true + MachineX86 + + + copy "$(TargetPath)" "$(ProjectDir)..\..\src\fastlz-windows-x86.dll" + + + + + X64 + + + MaxSpeed + true + $(JAVA_HOME)\include;$(JAVA_HOME)\include\win32;%(AdditionalIncludeDirectories) + WIN32;NDEBUG;_WINDOWS;_USRDLL;FASTLZ_EXPORTS;%(PreprocessorDefinitions) + MultiThreaded + true + + + Level3 + ProgramDatabase + + + true + Windows + true + true + MachineX64 + + + copy "$(TargetPath)" "$(ProjectDir)..\..\src\fastlz-windows-x86_64.dll" + + + + + + + + + + + + + \ No newline at end of file diff --git a/bundles/org.simantics.fastlz/native/vs2012/fastlz.vcxproj.filters b/bundles/org.simantics.fastlz/native/vs2012/fastlz.vcxproj.filters new file mode 100644 index 000000000..114e17494 --- /dev/null +++ b/bundles/org.simantics.fastlz/native/vs2012/fastlz.vcxproj.filters @@ -0,0 +1,30 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hpp;hxx;hm;inl;inc;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav + + + + + Source Files + + + Source Files + + + + + Resource Files + + + \ No newline at end of file diff --git a/bundles/org.simantics.fastlz/src/libfastlz-darwin-x86_64.jnilib b/bundles/org.simantics.fastlz/src/libfastlz-darwin-x86_64.dylib similarity index 100% rename from bundles/org.simantics.fastlz/src/libfastlz-darwin-x86_64.jnilib rename to bundles/org.simantics.fastlz/src/libfastlz-darwin-x86_64.dylib diff --git a/bundles/org.simantics.fastlz/src/org/simantics/fastlz/java/FastLZJavaInputStream.java b/bundles/org.simantics.fastlz/src/org/simantics/fastlz/java/FastLZJavaInputStream.java index d6ac30896..290990b8d 100644 --- a/bundles/org.simantics.fastlz/src/org/simantics/fastlz/java/FastLZJavaInputStream.java +++ b/bundles/org.simantics.fastlz/src/org/simantics/fastlz/java/FastLZJavaInputStream.java @@ -43,6 +43,11 @@ public class FastLZJavaInputStream extends DecompressingInputStream { super(stream, channel); } + @Override + protected ByteBuffer allocateBuffer(int capacity) { + return ByteBuffer.allocate(capacity); + } + @Override public void decompress(ByteBuffer compressed, int compressedOffset, int compressedSize, ByteBuffer uncompressed, int uncompressedOffset, int uncompressedSize) throws IOException { diff --git a/bundles/org.simantics.fastlz/testcases/org/simantics/fastlz/FastLZBasicTests.java b/bundles/org.simantics.fastlz/testcases/org/simantics/fastlz/FastLZBasicTests.java index 177c8d1db..572ee1189 100644 --- a/bundles/org.simantics.fastlz/testcases/org/simantics/fastlz/FastLZBasicTests.java +++ b/bundles/org.simantics.fastlz/testcases/org/simantics/fastlz/FastLZBasicTests.java @@ -60,7 +60,6 @@ public class FastLZBasicTests { @Test public void validateCompress() throws IOException { validateCompress(testData1); - validateCompress(new File("grades.snp")); } private void validateCompress(File testData) throws IOException { diff --git a/bundles/org.simantics.graph.compiler/META-INF/MANIFEST.MF b/bundles/org.simantics.graph.compiler/META-INF/MANIFEST.MF index 6d785149f..d64d3d564 100644 --- a/bundles/org.simantics.graph.compiler/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.graph.compiler/META-INF/MANIFEST.MF @@ -2,7 +2,7 @@ Manifest-Version: 1.0 Bundle-ManifestVersion: 2 Bundle-Name: Compiler Bundle-SymbolicName: org.simantics.graph.compiler;singleton:=true -Bundle-Version: 1.1.11.qualifier +Bundle-Version: 1.1.15.qualifier Bundle-RequiredExecutionEnvironment: JavaSE-1.8 Require-Bundle: org.simantics.graph;bundle-version="1.0.0";visibility:=reexport, org.simantics.ltk.antlr;bundle-version="1.0.0", diff --git a/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/GraphCompiler.java b/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/GraphCompiler.java index 21761a3b1..5f30082a8 100644 --- a/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/GraphCompiler.java +++ b/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/GraphCompiler.java @@ -124,7 +124,7 @@ public class GraphCompiler { run(new CreateInverseRelations(graph, store)); run(new AddConsistsOf(paths, store)); run(new ConvertPreValues(graph, store, errors)); - run(new ReportCollisions(errors, store)); + run(new ReportCollisions(preferences, errors, store)); if(preferences.validate) run(new ValidateGraph(graph, errors, store, preferences)); diff --git a/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/parsing/Graph.g b/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/parsing/Graph.g index 67b11fbaa..995960f94 100644 --- a/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/parsing/Graph.g +++ b/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/parsing/Graph.g @@ -85,7 +85,7 @@ public Token nextToken() { emit balancing number of DEDENT tokens. */ if(iStack.size() <= 1) - return Token.EOF_TOKEN; + return getEOFToken(); else { while(iStack.size() > 1) { iStack.removeAt(iStack.size()-1); @@ -450,4 +450,4 @@ mapAssignment : value '=' value -> ^(ASSIGNMENT value*) ; - \ No newline at end of file + diff --git a/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/parsing/GraphLexer.java b/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/parsing/GraphLexer.java index 98a10ec97..d75d8cc93 100644 --- a/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/parsing/GraphLexer.java +++ b/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/parsing/GraphLexer.java @@ -108,7 +108,7 @@ public class GraphLexer extends Lexer { emit balancing number of DEDENT tokens. */ if(iStack.size() <= 1) - return Token.EOF_TOKEN; + return getEOFToken(); else { while(iStack.size() > 1) { iStack.removeAt(iStack.size()-1); @@ -2471,4 +2471,4 @@ public class GraphLexer extends Lexer { } -} \ No newline at end of file +} diff --git a/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/validation/ReportCollisions.java b/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/validation/ReportCollisions.java index e7346e945..277ddc7b1 100644 --- a/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/validation/ReportCollisions.java +++ b/bundles/org.simantics.graph.compiler/src/org/simantics/graph/compiler/internal/validation/ReportCollisions.java @@ -2,22 +2,34 @@ package org.simantics.graph.compiler.internal.validation; import java.util.Collection; +import org.simantics.graph.compiler.GraphCompilerPreferences; import org.simantics.graph.compiler.internal.store.LocationStore; import org.simantics.graph.compiler.internal.templates.TemplateDefinitionStore; +import org.simantics.graph.query.Res; import org.simantics.graph.store.GraphStore; +import org.simantics.graph.store.StatementCollision; import org.simantics.ltk.Problem; public class ReportCollisions implements Runnable { + GraphCompilerPreferences preferences; Collection problems; GraphStore store; public ReportCollisions( - Collection problems, + GraphCompilerPreferences preferences, Collection problems, GraphStore store) { + this.preferences = preferences; this.problems = problems; this.store = store; } + private static String abbreviateURI(Res res) { + if(res == null) + return "null"; + String uri = res.toString(); + return uri.replace("http://www.simantics.org/", ""); + } + @Override public void run() { LocationStore locations = store.getStore(LocationStore.class); @@ -32,7 +44,16 @@ public class ReportCollisions implements Runnable { for(int c : store.getStore(TemplateDefinitionStore.class).getCollisions().toArray()) problems.add(new Problem( locations.getLocation(c), - "Two tempalate definitions are given for the same resource.")); + "Two template definitions are given for the same resource.")); + if(preferences.validate) + for(StatementCollision collision : store.statements.getCollisions()) { + problems.add(new Problem( + locations.getLocation(collision.subject), + "The same statement is defined " + collision.count + " times: " + + abbreviateURI(store.idToRes(collision.subject)) + ", " + + abbreviateURI(store.idToRes(collision.predicate)) + ", " + + abbreviateURI(store.idToRes(collision.object)))); + } } } diff --git a/bundles/org.simantics.graph/META-INF/MANIFEST.MF b/bundles/org.simantics.graph/META-INF/MANIFEST.MF index 448d8556b..5f4404acc 100644 --- a/bundles/org.simantics.graph/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.graph/META-INF/MANIFEST.MF @@ -2,7 +2,7 @@ Manifest-Version: 1.0 Bundle-ManifestVersion: 2 Bundle-Name: Transferable Graph Runtime Bundle-SymbolicName: org.simantics.graph -Bundle-Version: 1.1.11.qualifier +Bundle-Version: 1.1.15.qualifier Bundle-RequiredExecutionEnvironment: JavaSE-1.8 Require-Bundle: org.simantics.databoard;bundle-version="0.5.1", gnu.trove3;bundle-version="3.0.0";visibility:=reexport, diff --git a/bundles/org.simantics.graph/src/org/simantics/graph/store/StatementCollision.java b/bundles/org.simantics.graph/src/org/simantics/graph/store/StatementCollision.java new file mode 100644 index 000000000..3bcd4f0bf --- /dev/null +++ b/bundles/org.simantics.graph/src/org/simantics/graph/store/StatementCollision.java @@ -0,0 +1,16 @@ +package org.simantics.graph.store; + +public class StatementCollision { + public final int subject; + public final int predicate; + public final int object; + public final int count; + + public StatementCollision(int subject, int predicate, int object, int count) { + this.subject = subject; + this.predicate = predicate; + this.object = object; + this.count = count; + } +} + diff --git a/bundles/org.simantics.graph/src/org/simantics/graph/store/StatementStore.java b/bundles/org.simantics.graph/src/org/simantics/graph/store/StatementStore.java index 96b6b1c72..02dfddde8 100644 --- a/bundles/org.simantics.graph/src/org/simantics/graph/store/StatementStore.java +++ b/bundles/org.simantics.graph/src/org/simantics/graph/store/StatementStore.java @@ -1,5 +1,7 @@ package org.simantics.graph.store; +import java.util.ArrayList; + import gnu.trove.list.array.TIntArrayList; import gnu.trove.map.hash.TIntIntHashMap; import gnu.trove.map.hash.TIntObjectHashMap; @@ -10,7 +12,7 @@ import gnu.trove.set.hash.TIntHashSet; /** * Statement store indexes a set of statements. - * @author Hannu Niemistö + * @author Hannu Niemist� */ public class StatementStore implements IStore { @@ -287,5 +289,64 @@ public class StatementStore implements IStore { } }); return statements.toArray(); + } + + private static class CollisionSubjectProcedure implements TIntObjectProcedure> { + CollisionPredicateProcedure predicateProcedure; + + public CollisionSubjectProcedure(CollisionPredicateProcedure predicateProcedure) { + this.predicateProcedure = predicateProcedure; + } + + @Override + public boolean execute(int subject, TIntObjectHashMap predicateObjectMap) { + predicateProcedure.subject = subject; + predicateObjectMap.forEachEntry(predicateProcedure); + return true; + } + + } + + private static class CollisionPredicateProcedure implements TIntObjectProcedure { + ArrayList collisions; + int subject; + + public CollisionPredicateProcedure(ArrayList collisions) { + this.collisions = collisions; + } + + @Override + public boolean execute(int predicate, TIntArrayList objects) { + if(objects.size() > 1) { + objects.sort(); + int oldObject = objects.get(0); + int collisionCount = 1; + for(int i=1;i 1) { + collisions.add(new StatementCollision(subject, predicate, oldObject, collisionCount)); + collisionCount = 1; + } + oldObject = curObject; + } + } + if(collisionCount > 1) + collisions.add(new StatementCollision(subject, predicate, oldObject, collisionCount)); + } + return true; + } + + } + + public ArrayList getCollisions() { + ArrayList collisions = new ArrayList(); + CollisionPredicateProcedure predicateProcedure = new CollisionPredicateProcedure(collisions); + CollisionSubjectProcedure subjectProcedure = new CollisionSubjectProcedure(predicateProcedure); + statements.forEachEntry(subjectProcedure); + return collisions; } } diff --git a/bundles/org.simantics.help.base/.classpath b/bundles/org.simantics.help.base/.classpath index 6cf33fb32..b1dabee38 100644 --- a/bundles/org.simantics.help.base/.classpath +++ b/bundles/org.simantics.help.base/.classpath @@ -1,9 +1,5 @@ - - - - diff --git a/bundles/org.simantics.help.base/META-INF/MANIFEST.MF b/bundles/org.simantics.help.base/META-INF/MANIFEST.MF index d9c6d4585..a20f8e475 100644 --- a/bundles/org.simantics.help.base/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.help.base/META-INF/MANIFEST.MF @@ -8,11 +8,8 @@ Bundle-Vendor: Semantum Oy Require-Bundle: org.eclipse.core.runtime, org.eclipse.help.base, org.apache.commons.logging;bundle-version="1.0.4", - org.bouncycastle;bundle-version="1.47.0" + org.apache.pdfbox;bundle-version="2.0.2", + org.apache.pdfbox.fontbox;bundle-version="2.0.2" Bundle-RequiredExecutionEnvironment: JavaSE-1.7 Bundle-ActivationPolicy: lazy -Bundle-ClassPath: ., - xmpbox-1.8.10.jar, - jempbox-1.8.10.jar, - fontbox-1.8.10.jar, - pdfbox-1.8.10.jar +Bundle-ClassPath: . diff --git a/bundles/org.simantics.help.base/build.properties b/bundles/org.simantics.help.base/build.properties index 7904aba26..c125ab4d7 100644 --- a/bundles/org.simantics.help.base/build.properties +++ b/bundles/org.simantics.help.base/build.properties @@ -2,9 +2,5 @@ source.. = src/ output.. = bin/ bin.includes = META-INF/,\ .,\ - plugin.xml,\ - pdfbox-1.8.10.jar,\ - fontbox-1.8.10.jar,\ - jempbox-1.8.10.jar,\ - xmpbox-1.8.10.jar + plugin.xml source.. = src/ diff --git a/bundles/org.simantics.help.base/fontbox-1.8.10.jar b/bundles/org.simantics.help.base/fontbox-1.8.10.jar deleted file mode 100644 index 3284950c8..000000000 Binary files a/bundles/org.simantics.help.base/fontbox-1.8.10.jar and /dev/null differ diff --git a/bundles/org.simantics.help.base/jempbox-1.8.10.jar b/bundles/org.simantics.help.base/jempbox-1.8.10.jar deleted file mode 100644 index 48cc63375..000000000 Binary files a/bundles/org.simantics.help.base/jempbox-1.8.10.jar and /dev/null differ diff --git a/bundles/org.simantics.help.base/pdfbox-1.8.10-src.zip b/bundles/org.simantics.help.base/pdfbox-1.8.10-src.zip deleted file mode 100644 index e05aa634b..000000000 Binary files a/bundles/org.simantics.help.base/pdfbox-1.8.10-src.zip and /dev/null differ diff --git a/bundles/org.simantics.help.base/pdfbox-1.8.10.jar b/bundles/org.simantics.help.base/pdfbox-1.8.10.jar deleted file mode 100644 index 87bb9a704..000000000 Binary files a/bundles/org.simantics.help.base/pdfbox-1.8.10.jar and /dev/null differ diff --git a/bundles/org.simantics.help.base/src/org/simantics/help/base/internal/PDFUtil.java b/bundles/org.simantics.help.base/src/org/simantics/help/base/internal/PDFUtil.java index c12e56b4d..43fac6c70 100644 --- a/bundles/org.simantics.help.base/src/org/simantics/help/base/internal/PDFUtil.java +++ b/bundles/org.simantics.help.base/src/org/simantics/help/base/internal/PDFUtil.java @@ -1,14 +1,14 @@ package org.simantics.help.base.internal; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import org.apache.pdfbox.cos.COSDocument; +import org.apache.pdfbox.io.RandomAccessFile; import org.apache.pdfbox.pdfparser.PDFParser; import org.apache.pdfbox.pdmodel.PDDocument; import org.apache.pdfbox.pdmodel.PDDocumentInformation; -import org.apache.pdfbox.util.PDFTextStripper; +import org.apache.pdfbox.text.PDFTextStripper; import org.eclipse.help.search.ISearchDocument; /** @@ -17,7 +17,7 @@ import org.eclipse.help.search.ISearchDocument; public class PDFUtil { public static void stripText(File fromPdf, ISearchDocument doc) throws IOException { - PDFParser parser = new PDFParser(new FileInputStream(fromPdf)); + PDFParser parser = new PDFParser(new RandomAccessFile(fromPdf, "r")); parser.parse(); try (COSDocument cosDoc = parser.getDocument()) { diff --git a/bundles/org.simantics.help.base/xmpbox-1.8.10.jar b/bundles/org.simantics.help.base/xmpbox-1.8.10.jar deleted file mode 100644 index 7a8a46594..000000000 Binary files a/bundles/org.simantics.help.base/xmpbox-1.8.10.jar and /dev/null differ diff --git a/bundles/org.simantics.help.ui/META-INF/MANIFEST.MF b/bundles/org.simantics.help.ui/META-INF/MANIFEST.MF index e9f8ba1b9..0bc7d909d 100644 --- a/bundles/org.simantics.help.ui/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.help.ui/META-INF/MANIFEST.MF @@ -12,7 +12,8 @@ Require-Bundle: org.eclipse.ui, org.eclipse.ui.editors;bundle-version="3.9.0", org.eclipse.jface.text;bundle-version="3.10.0", org.eclipse.core.resources;bundle-version="3.10.1", - org.simantics.help.core + org.simantics.help.core, + org.slf4j.api Bundle-RequiredExecutionEnvironment: JavaSE-1.8 Bundle-ActivationPolicy: lazy Bundle-ClassPath: . diff --git a/bundles/org.simantics.help.ui/src/org/simantics/help/ui/OpenHelpFileAdapter.java b/bundles/org.simantics.help.ui/src/org/simantics/help/ui/OpenHelpFileAdapter.java index c5847ce0d..6a5e2d968 100644 --- a/bundles/org.simantics.help.ui/src/org/simantics/help/ui/OpenHelpFileAdapter.java +++ b/bundles/org.simantics.help.ui/src/org/simantics/help/ui/OpenHelpFileAdapter.java @@ -2,7 +2,6 @@ package org.simantics.help.ui; import org.eclipse.ui.PartInitException; import org.eclipse.ui.PlatformUI; -import org.simantics.Logger; import org.simantics.Simantics; import org.simantics.db.ReadGraph; import org.simantics.db.Resource; @@ -16,8 +15,11 @@ import org.simantics.help.HelpResources; import org.simantics.ui.workbench.ResourceEditorInput2; import org.simantics.ui.workbench.editor.AbstractResourceEditorAdapter; import org.simantics.utils.ui.workbench.WorkbenchUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class OpenHelpFileAdapter extends AbstractResourceEditorAdapter { + private static final Logger LOGGER = LoggerFactory.getLogger(OpenHelpFileAdapter.class); public OpenHelpFileAdapter() { super("Help File Editor"); @@ -49,7 +51,7 @@ public class OpenHelpFileAdapter extends AbstractResourceEditorAdapter { String editorId = getEditorId(); WorkbenchUtils.openEditor(editorId, new ResourceEditorInput2(editorId, input, model, rvi)); } catch (PartInitException e) { - Logger.defaultLogError(e); + LOGGER.error("Failed to open an editor for help file.", e); } } }); diff --git a/bundles/org.simantics.image.ui/scl/Simantics/Image.scl b/bundles/org.simantics.image.ui/scl/Simantics/Image.scl index 74d197f2f..df9ba8360 100644 --- a/bundles/org.simantics.image.ui/scl/Simantics/Image.scl +++ b/bundles/org.simantics.image.ui/scl/Simantics/Image.scl @@ -8,6 +8,7 @@ type Image = Resource importJava "org.simantics.image.ui.SCLImage" where @JavaName importImageFromFile importImage :: File -> Library -> Image - - @JavaName linkImage - linkImage :: SVGImage -> Image -> () \ No newline at end of file + +// HN: does not exist +// @JavaName linkImage +// linkImage :: SVGImage -> Image -> () \ No newline at end of file diff --git a/bundles/org.simantics.issues.common/META-INF/MANIFEST.MF b/bundles/org.simantics.issues.common/META-INF/MANIFEST.MF index 421606517..23b688bcd 100644 --- a/bundles/org.simantics.issues.common/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.issues.common/META-INF/MANIFEST.MF @@ -11,7 +11,8 @@ Require-Bundle: org.simantics.issues;bundle-version="1.1.0", org.simantics.layer0.utils;bundle-version="0.6.2", org.simantics.db.services;bundle-version="0.6.2", org.simantics;bundle-version="1.0.0", - org.simantics.issues.ui.ontology;bundle-version="1.0.0" + org.simantics.issues.ui.ontology;bundle-version="1.0.0", + org.slf4j.api Export-Package: org.simantics.issues.common, org.simantics.issues.common.preferences Bundle-Vendor: VTT Technical Research Centre of Finland diff --git a/bundles/org.simantics.issues.common/src/org/simantics/issues/common/DependencyIssueValidator2.java b/bundles/org.simantics.issues.common/src/org/simantics/issues/common/DependencyIssueValidator2.java index 7c6cdeccf..2f8c42b90 100644 --- a/bundles/org.simantics.issues.common/src/org/simantics/issues/common/DependencyIssueValidator2.java +++ b/bundles/org.simantics.issues.common/src/org/simantics/issues/common/DependencyIssueValidator2.java @@ -5,7 +5,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import org.simantics.Logger; import org.simantics.db.Issue; import org.simantics.db.ReadGraph; import org.simantics.db.Resource; @@ -17,8 +16,11 @@ import org.simantics.db.common.utils.NameUtils; import org.simantics.db.exception.DatabaseException; import org.simantics.issues.ontology.IssueResource; import org.simantics.layer0.Layer0; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class DependencyIssueValidator2 extends ResourceRead3 { + private static final Logger LOGGER = LoggerFactory.getLogger(DependencyIssueValidator2.class); public static final boolean DEBUG = false; @@ -65,7 +67,7 @@ public class DependencyIssueValidator2 extends ResourceRead3 { if(DEBUG) System.err.println("Validator found: " + contexts.size() + " issues (" + contexts + ")"); return contexts; } catch (DatabaseException e) { - Logger.defaultLogError(e); + LOGGER.error("Reading a constraint validator failed", e); return Collections.emptySet(); } diff --git a/bundles/org.simantics.issues.common/src/org/simantics/issues/common/IssueUtils.java b/bundles/org.simantics.issues.common/src/org/simantics/issues/common/IssueUtils.java index 26f0167ad..f1dcadc89 100644 --- a/bundles/org.simantics.issues.common/src/org/simantics/issues/common/IssueUtils.java +++ b/bundles/org.simantics.issues.common/src/org/simantics/issues/common/IssueUtils.java @@ -25,7 +25,6 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; -import org.simantics.Logger; import org.simantics.Simantics; import org.simantics.databoard.Bindings; import org.simantics.databoard.util.URIStringUtils; @@ -57,11 +56,14 @@ import org.simantics.layer0.Layer0; import org.simantics.operation.Layer0X; import org.simantics.scl.runtime.function.FunctionImpl2; import org.simantics.utils.datastructures.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * @author Tuukka Lehtonen */ public class IssueUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(IssueUtils.class); public static Resource toSeverityResource(IssueResource ISSUE, Severity severity) { switch (severity) { @@ -113,7 +115,7 @@ public class IssueUtils { }); } } catch (DatabaseException e) { - Logger.defaultLogError(e); + LOGGER.error("Updating issue source failed.", e); } } else { Session session = Simantics.getSession(); @@ -173,7 +175,7 @@ public class IssueUtils { @Override public void exception(ReadGraph graph, Throwable throwable) throws DatabaseException { - Logger.defaultLogError(throwable); + LOGGER.error("IssueValidityListener received an exception.", throwable); } } @@ -207,7 +209,7 @@ public class IssueUtils { @Override public void exception(ReadGraph graph, Throwable t) { - Logger.defaultLogError(t); + LOGGER.error("IssueSourceManagedIssuesListener received an exception.", t); } @Override @@ -263,7 +265,7 @@ public class IssueUtils { @Override public void exception(ReadGraph graph, Throwable t) { - Logger.defaultLogError(t); + LOGGER.error("ActiveIssueSourceListener received an exception.", t); } @Override diff --git a/bundles/org.simantics.logback.configuration/.classpath b/bundles/org.simantics.logback.configuration/.classpath new file mode 100644 index 000000000..eca7bdba8 --- /dev/null +++ b/bundles/org.simantics.logback.configuration/.classpath @@ -0,0 +1,7 @@ + + + + + + + diff --git a/bundles/org.simantics.logback.configuration/.project b/bundles/org.simantics.logback.configuration/.project new file mode 100644 index 000000000..3fdee5609 --- /dev/null +++ b/bundles/org.simantics.logback.configuration/.project @@ -0,0 +1,28 @@ + + + org.simantics.logback.configuration + + + + + + org.eclipse.jdt.core.javabuilder + + + + + org.eclipse.pde.ManifestBuilder + + + + + org.eclipse.pde.SchemaBuilder + + + + + + org.eclipse.pde.PluginNature + org.eclipse.jdt.core.javanature + + diff --git a/bundles/org.simantics.logback.configuration/.settings/org.eclipse.jdt.core.prefs b/bundles/org.simantics.logback.configuration/.settings/org.eclipse.jdt.core.prefs new file mode 100644 index 000000000..0c68a61dc --- /dev/null +++ b/bundles/org.simantics.logback.configuration/.settings/org.eclipse.jdt.core.prefs @@ -0,0 +1,7 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled +org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8 +org.eclipse.jdt.core.compiler.compliance=1.8 +org.eclipse.jdt.core.compiler.problem.assertIdentifier=error +org.eclipse.jdt.core.compiler.problem.enumIdentifier=error +org.eclipse.jdt.core.compiler.source=1.8 diff --git a/bundles/org.simantics.logback.configuration/META-INF/MANIFEST.MF b/bundles/org.simantics.logback.configuration/META-INF/MANIFEST.MF new file mode 100644 index 000000000..ae85b8ace --- /dev/null +++ b/bundles/org.simantics.logback.configuration/META-INF/MANIFEST.MF @@ -0,0 +1,9 @@ +Manifest-Version: 1.0 +Bundle-ManifestVersion: 2 +Bundle-Name: Simantics Logback Configuration +Bundle-SymbolicName: org.simantics.logback.configuration +Bundle-Version: 1.0.0.qualifier +Fragment-Host: ch.qos.logback.classic;bundle-version="1.1.7" +Bundle-RequiredExecutionEnvironment: JavaSE-1.8 +Require-Bundle: ch.qos.logback.core;bundle-version="1.1.7" +Bundle-Vendor: Semantum Oy diff --git a/bundles/org.simantics.logback.configuration/build.properties b/bundles/org.simantics.logback.configuration/build.properties new file mode 100644 index 000000000..f9c250446 --- /dev/null +++ b/bundles/org.simantics.logback.configuration/build.properties @@ -0,0 +1,5 @@ +source.. = src/ +output.. = bin/ +bin.includes = META-INF/,\ + .,\ + logback.xml diff --git a/bundles/org.simantics.logback.configuration/logback.xml b/bundles/org.simantics.logback.configuration/logback.xml new file mode 100644 index 000000000..38dc39338 --- /dev/null +++ b/bundles/org.simantics.logback.configuration/logback.xml @@ -0,0 +1,18 @@ + + + + + + + %-5p [%d] %c: %m%n%rEx + + + + + + + + + + + \ No newline at end of file diff --git a/bundles/org.simantics.logback.configuration/src/.keep b/bundles/org.simantics.logback.configuration/src/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/bundles/org.simantics.ltk.antlr/META-INF/MANIFEST.MF b/bundles/org.simantics.ltk.antlr/META-INF/MANIFEST.MF index 99da36f06..0be675325 100644 --- a/bundles/org.simantics.ltk.antlr/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.ltk.antlr/META-INF/MANIFEST.MF @@ -5,5 +5,5 @@ Bundle-SymbolicName: org.simantics.ltk.antlr Bundle-Version: 1.1.10.qualifier Bundle-RequiredExecutionEnvironment: JavaSE-1.8 Require-Bundle: org.simantics.ltk;bundle-version="1.0.0";visibility:=reexport, - org.antlr.runtime;bundle-version="3.2.0";visibility:=reexport + org.antlr.runtime;bundle-version="[3.2.0,4.0.0)";visibility:=reexport Export-Package: org.simantics.ltk.antlr diff --git a/bundles/org.simantics.modeling.ui/META-INF/MANIFEST.MF b/bundles/org.simantics.modeling.ui/META-INF/MANIFEST.MF index 10ad43d9a..f396a5815 100644 --- a/bundles/org.simantics.modeling.ui/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.modeling.ui/META-INF/MANIFEST.MF @@ -32,7 +32,7 @@ Require-Bundle: org.simantics.project;bundle-version="1.0.0", org.simantics.issues;bundle-version="1.1.0", org.simantics.document;bundle-version="1.0.0", org.simantics.graph.db;bundle-version="1.1.9", - org.bouncycastle;bundle-version="1.47.0", + org.bouncycastle.bcprov-jdk14;bundle-version="1.38.0", org.simantics.image2.ontology;bundle-version="1.1.0", org.simantics.scl.compiler;bundle-version="0.4.0", org.simantics.scl.compiler.dummy;bundle-version="1.0.0", @@ -61,7 +61,8 @@ Require-Bundle: org.simantics.project;bundle-version="1.0.0", org.simantics.db.layer0, org.simantics.silk.ontology;bundle-version="1.1.0", org.simantics.image.ui;bundle-version="1.0.0", - org.simantics.export.core;bundle-version="1.0.0" + org.simantics.export.core;bundle-version="1.0.0", + org.slf4j.api Export-Package: org.simantics.modeling.ui, org.simantics.modeling.ui.actions, org.simantics.modeling.ui.chart.property, diff --git a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/actions/CompilePGraphs.java b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/actions/CompilePGraphs.java index 533c8fa3a..282c1d07d 100644 --- a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/actions/CompilePGraphs.java +++ b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/actions/CompilePGraphs.java @@ -20,7 +20,6 @@ import java.util.Map; import java.util.Set; import org.eclipse.core.runtime.FileLocator; -import org.eclipse.core.runtime.Platform; import org.eclipse.jface.dialogs.MessageDialog; import org.eclipse.jface.layout.GridDataFactory; import org.eclipse.jface.layout.GridLayoutFactory; @@ -29,6 +28,7 @@ import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.swt.widgets.Display; import org.eclipse.swt.widgets.Shell; +import org.osgi.framework.Bundle; import org.simantics.PlatformException; import org.simantics.Simantics; import org.simantics.databoard.Bindings; @@ -71,6 +71,7 @@ import org.simantics.graph.representation.TransferableGraph1; import org.simantics.layer0.Layer0; import org.simantics.ltk.ISource; import org.simantics.ltk.Problem; +import org.simantics.modeling.ui.Activator; import org.simantics.utils.datastructures.Pair; /** @@ -160,29 +161,12 @@ public class CompilePGraphs implements ActionFactory { final Collection sources = new ArrayList(); Collection dependencies = new ArrayList(); - File L0GraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.layer0").getEntry("/graph.tg")), "L0_graph.tg"); - File L0XGraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.layer0x.ontology").getEntry("/graph.tg")), "L0X_graph.tg"); - File DiagramGraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.diagram.ontology").getEntry("/graph.tg")), "DIA_graph.tg"); - File G2DGraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.g2d.ontology").getEntry("/graph.tg")), "G2D_graph.tg"); - File StructuralGraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.structural.ontology").getEntry("/graph.tg")), "ST_graph.tg"); - File ModelingGraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.modeling.ontology").getEntry("/graph.tg")), "MOD_graph.tg"); - File SimulationGraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.simulation.ontology").getEntry("/graph.tg")), "SIMU_graph.tg"); - File DocumentGraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.document.ontology").getEntry("/graph.tg")), "DOC_graph.tg"); - File SpreadsheetGraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.spreadsheet.ontology").getEntry("/graph.tg")), "SHEET_graph.tg"); - File ProjectGraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.project.ontology").getEntry("/graph.tg")), "PROJ_graph.tg"); - File SelectionViewGraphFile = url2file(FileLocator.resolve(Platform.getBundle("org.simantics.selectionview.ontology").getEntry("/graph.tg")), "SEL_graph.tg"); - - dependencies.add(GraphCompiler.read(L0GraphFile)); - dependencies.add(GraphCompiler.read(L0XGraphFile)); - dependencies.add(GraphCompiler.read(DiagramGraphFile)); - dependencies.add(GraphCompiler.read(G2DGraphFile)); - dependencies.add(GraphCompiler.read(StructuralGraphFile)); - dependencies.add(GraphCompiler.read(ModelingGraphFile)); - dependencies.add(GraphCompiler.read(SimulationGraphFile)); - dependencies.add(GraphCompiler.read(DocumentGraphFile)); - dependencies.add(GraphCompiler.read(SpreadsheetGraphFile)); - dependencies.add(GraphCompiler.read(ProjectGraphFile)); - dependencies.add(GraphCompiler.read(SelectionViewGraphFile)); + for(Bundle b : Activator.getContext().getBundles()) { + URL tg = b.getEntry("/graph.tg"); + if(tg == null) continue; + File f = url2file(FileLocator.resolve(tg), b.getSymbolicName()); + dependencies.add(GraphCompiler.read(f)); + } final TransferableGraph1 thisOntology = Simantics.sync(new UniqueRead() { @@ -191,7 +175,6 @@ public class CompilePGraphs implements ActionFactory { Layer0 L0 = Layer0.getInstance(graph); Resource parent = graph.getSingleObject(r, L0.PartOf); - String name = graph.getRelatedValue(r, L0.HasName, Bindings.STRING); CopyHandler ch = new DefaultCopyHandler(r) { @@ -282,8 +265,6 @@ public class CompilePGraphs implements ActionFactory { }); - - final StringBuilder errorStringBuilder = new StringBuilder(); GraphCompilerPreferences prefs = new GraphCompilerPreferences(); prefs.validate = true; @@ -378,7 +359,6 @@ public class CompilePGraphs implements ActionFactory { final DefaultPasteImportAdvisor advisor = new DefaultPasteImportAdvisor(r); - // final ImportAdvisor advisor = new ImportAdvisor(r, modelName); DefaultPasteHandler.defaultExecute(result.getGraph(), r, advisor); Simantics.sync(new WriteRequest() { @@ -398,9 +378,6 @@ public class CompilePGraphs implements ActionFactory { } - - - } catch (Exception e) { Logger.defaultLogError(e); } diff --git a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/actions/ModeledActions.java b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/actions/ModeledActions.java index 47d5643a1..ea50134d7 100644 --- a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/actions/ModeledActions.java +++ b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/actions/ModeledActions.java @@ -33,7 +33,6 @@ import org.eclipse.jface.action.ActionContributionItem; import org.eclipse.jface.action.IContributionItem; import org.eclipse.jface.action.MenuManager; import org.eclipse.jface.action.Separator; -import org.simantics.Logger; import org.simantics.browsing.ui.NodeContext; import org.simantics.browsing.ui.common.NodeContextBuilder; import org.simantics.browsing.ui.model.InvalidContribution; @@ -42,13 +41,17 @@ import org.simantics.browsing.ui.model.actions.IActionCategory; import org.simantics.db.ReadGraph; import org.simantics.db.Resource; import org.simantics.db.exception.DatabaseException; +import org.simantics.issues.common.IssueUtils; import org.simantics.modeling.ui.Activator; import org.simantics.project.ontology.ProjectResource; import org.simantics.ui.contribution.DynamicMenuContribution; import org.simantics.ui.selection.WorkbenchSelectionElement; import org.simantics.ui.selection.WorkbenchSelectionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ModeledActions extends DynamicMenuContribution implements IExecutableExtension { + private static final Logger LOGGER = LoggerFactory.getLogger(IssueUtils.class); public static final Set defaultBrowseContexts = Collections.singleton(ProjectResource.URIs.ProjectActionContext); @@ -121,7 +124,7 @@ public class ModeledActions extends DynamicMenuContribution implements IExecutab result.add(NodeContextBuilder.buildWithInput(res)); } } catch (DatabaseException e) { - Logger.defaultLogError(e); + LOGGER.error("Failed to get node contexts for selection.", e); } } } diff --git a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/componentTypeEditor/ConfigurationPropertiesSection.java b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/componentTypeEditor/ConfigurationPropertiesSection.java index 1ad4cd539..477a5543b 100644 --- a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/componentTypeEditor/ConfigurationPropertiesSection.java +++ b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/componentTypeEditor/ConfigurationPropertiesSection.java @@ -38,7 +38,6 @@ import org.eclipse.ui.PlatformUI; import org.eclipse.ui.forms.widgets.Form; import org.eclipse.ui.forms.widgets.FormToolkit; import org.eclipse.ui.forms.widgets.Section; -import org.simantics.Logger; import org.simantics.Simantics; import org.simantics.databoard.Bindings; import org.simantics.db.ReadGraph; @@ -63,9 +62,13 @@ import org.simantics.modeling.userComponent.ComponentTypeCommands; import org.simantics.selectionview.SelectionViewResources; import org.simantics.structural.stubs.StructuralResource2; import org.simantics.utils.datastructures.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ConfigurationPropertiesSection implements ComponentTypeViewerSection { + private static final Logger LOGGER = LoggerFactory.getLogger(ConfigurationPropertiesSection.class); + private static final String[] COLUMN_NAMES = new String[] {"Name", "Type", "Default Value", "Unit", "Range", "Label", "Description"}; private static final int[] COLUMN_LENGTHS = @@ -443,7 +446,7 @@ public class ConfigurationPropertiesSection implements ComponentTypeViewerSectio } catch (DatabaseException e1) { - Logger.defaultLogError(e1); + LOGGER.error("Lifting properties failed", e1); return; } @@ -565,7 +568,7 @@ public class ConfigurationPropertiesSection implements ComponentTypeViewerSectio } }); } catch (DatabaseException e) { - Logger.defaultLogError(e); + LOGGER.error("Finding UserDefinedProperties failed.", e); return Collections.emptyMap(); } } diff --git a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/diagramEditor/OpenSheetAdapter.java b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/diagramEditor/OpenSheetAdapter.java index 207257baa..0a4ca49b4 100644 --- a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/diagramEditor/OpenSheetAdapter.java +++ b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/diagramEditor/OpenSheetAdapter.java @@ -13,7 +13,6 @@ package org.simantics.modeling.ui.diagramEditor; import org.eclipse.ui.PartInitException; import org.eclipse.ui.PlatformUI; -import org.simantics.Logger; import org.simantics.db.ReadGraph; import org.simantics.db.Resource; import org.simantics.db.common.request.PossibleIndexRoot; @@ -28,8 +27,11 @@ import org.simantics.ui.SimanticsUI; import org.simantics.ui.workbench.ResourceEditorInput2; import org.simantics.ui.workbench.editor.AbstractResourceEditorAdapter; import org.simantics.utils.ui.workbench.WorkbenchUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class OpenSheetAdapter extends AbstractResourceEditorAdapter { + private static final Logger LOGGER = LoggerFactory.getLogger(OpenSheetAdapter.class); private static final String EDITOR_ID = "org.simantics.spreadsheet.ui.editor2"; @@ -63,7 +65,7 @@ public class OpenSheetAdapter extends AbstractResourceEditorAdapter { String editorId = getEditorId(); WorkbenchUtils.openEditor(editorId, new ResourceEditorInput2(editorId, r, model, rvi)); } catch (PartInitException e) { - Logger.defaultLogError(e); + LOGGER.error("Failed to open the spreadsheet editor.", e); } } }); diff --git a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/diagramEditor/PopulateElementMonitorDropParticipant.java b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/diagramEditor/PopulateElementMonitorDropParticipant.java index 0deeb1be1..959bbbe6f 100644 --- a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/diagramEditor/PopulateElementMonitorDropParticipant.java +++ b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/diagramEditor/PopulateElementMonitorDropParticipant.java @@ -11,8 +11,6 @@ *******************************************************************************/ package org.simantics.modeling.ui.diagramEditor; -import gnu.trove.set.hash.THashSet; - import java.awt.datatransfer.Transferable; import java.awt.datatransfer.UnsupportedFlavorException; import java.awt.dnd.DnDConstants; @@ -25,7 +23,6 @@ import java.util.HashMap; import java.util.List; import org.eclipse.jface.viewers.IStructuredSelection; -import org.simantics.Logger; import org.simantics.db.ReadGraph; import org.simantics.db.RequestProcessor; import org.simantics.db.Resource; @@ -64,8 +61,13 @@ import org.simantics.ui.selection.WorkbenchSelectionUtils; import org.simantics.utils.datastructures.Triple; import org.simantics.utils.datastructures.hints.IHintContext; import org.simantics.utils.ui.ISelectionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import gnu.trove.set.hash.THashSet; public class PopulateElementMonitorDropParticipant extends PopulateElementDropParticipant implements IDropTargetParticipant { + private static final Logger LOGGER = LoggerFactory.getLogger(PopulateElementMonitorDropParticipant.class); private static final boolean DEBUG = false; @@ -129,14 +131,9 @@ public class PopulateElementMonitorDropParticipant extends PopulateElementDropPa dp.getHints().setHint(DnDHints.KEY_DND_GRID_COLUMNS, Integer.valueOf(1)); } - } catch (UnsupportedFlavorException e) { - Logger.defaultLogError(e); - } catch (IOException e) { - Logger.defaultLogError(e); - } catch (DatabaseException e) { - Logger.defaultLogError(e); + } catch (UnsupportedFlavorException|IOException|DatabaseException e) { + LOGGER.error("dragEnter failed", e); } - } dtde.acceptDrag(DnDConstants.ACTION_COPY); diff --git a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/viewpoint/VariablePropertyRule.java b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/viewpoint/VariablePropertyRule.java index 76ec9b919..0323cfad4 100644 --- a/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/viewpoint/VariablePropertyRule.java +++ b/bundles/org.simantics.modeling.ui/src/org/simantics/modeling/ui/viewpoint/VariablePropertyRule.java @@ -76,6 +76,9 @@ public class VariablePropertyRule implements ChildRule { if (isUnder(graph, L0, SEL, property, propertiesPredicates)) continue props; + Boolean hidden = property.getPossiblePropertyValue(graph, SEL.hidden, Bindings.BOOLEAN); + if(hidden != null && hidden) continue props; + for(String req : requiredProperties) if(property.getPossibleProperty(graph, req) == null) continue props; for(String req : filteredProperties) if(property.getName(graph).equals(req)) continue props; diff --git a/bundles/org.simantics.modeling/META-INF/MANIFEST.MF b/bundles/org.simantics.modeling/META-INF/MANIFEST.MF index 249d9ec7c..5245ed47e 100644 --- a/bundles/org.simantics.modeling/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.modeling/META-INF/MANIFEST.MF @@ -36,7 +36,8 @@ Require-Bundle: org.simantics.simulation;bundle-version="1.0.0", org.simantics.scenegraph.profile;bundle-version="1.0.0", org.simantics.scl.db;bundle-version="0.1.3", org.simantics.selectionview.ontology;bundle-version="1.2.0", - org.simantics.scl.ui;bundle-version="0.5.0" + org.simantics.scl.ui;bundle-version="0.5.0", + org.slf4j.api Export-Package: org.simantics.modeling, org.simantics.modeling.actions, org.simantics.modeling.adapters, diff --git a/bundles/org.simantics.modeling/src/org/simantics/modeling/SCL.java b/bundles/org.simantics.modeling/src/org/simantics/modeling/SCL.java index 52909e018..24e50c0f7 100644 --- a/bundles/org.simantics.modeling/src/org/simantics/modeling/SCL.java +++ b/bundles/org.simantics.modeling/src/org/simantics/modeling/SCL.java @@ -10,7 +10,6 @@ import org.eclipse.core.runtime.Platform; import org.eclipse.core.runtime.jobs.IJobManager; import org.eclipse.core.runtime.jobs.Job; import org.simantics.DatabaseJob; -import org.simantics.Logger; import org.simantics.Simantics; import org.simantics.SimanticsPlatform; import org.simantics.SimanticsPlatform.OntologyRecoveryPolicy; @@ -24,8 +23,12 @@ import org.simantics.db.layer0.util.RemoverUtil; import org.simantics.db.layer0.util.SimanticsClipboard; import org.simantics.db.service.DebugSupport; import org.simantics.db.service.ServiceActivityMonitor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class SCL { + private static final Logger LOGGER = LoggerFactory.getLogger(SCL.class); + public static void killPlatformWrite(WriteGraph graph) throws DatabaseException { // Currently not supported. // Would be relatively easy to support the desired functionality. @@ -113,7 +116,7 @@ public class SCL { try { Thread.sleep(ms); } catch (InterruptedException e) { - Logger.defaultLogError(e); + LOGGER.warn("Sleep was interrupted.", e); } } diff --git a/bundles/org.simantics.modeling/src/org/simantics/modeling/scl/SCLRealm.java b/bundles/org.simantics.modeling/src/org/simantics/modeling/scl/SCLRealm.java index 99ac161a5..8ed8c3e0e 100644 --- a/bundles/org.simantics.modeling/src/org/simantics/modeling/scl/SCLRealm.java +++ b/bundles/org.simantics.modeling/src/org/simantics/modeling/scl/SCLRealm.java @@ -1,7 +1,5 @@ package org.simantics.modeling.scl; -import gnu.trove.map.hash.THashMap; - import java.io.IOException; import java.util.Map; import java.util.concurrent.ExecutorService; @@ -11,7 +9,6 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.simantics.Logger; import org.simantics.databoard.Bindings; import org.simantics.databoard.binding.Binding; import org.simantics.databoard.binding.mutable.Variant; @@ -24,8 +21,14 @@ import org.simantics.scl.runtime.function.Function; import org.simantics.scl.runtime.tuple.Tuple0; import org.simantics.simulator.variable.NodeManager; import org.simantics.simulator.variable.Realm; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import gnu.trove.map.hash.THashMap; public class SCLRealm implements Realm { + private static final Logger LOGGER = LoggerFactory.getLogger(SCLRealm.class); + public static final String SCL = "scl"; THashMap contextTypes = new THashMap(); @@ -118,7 +121,7 @@ public class SCLRealm implements Realm { try { runnable.run(); } catch (Throwable t) { - Logger.defaultLogError(t); + LOGGER.error("Runnable failed in syncExec.", t); } finally { } return; @@ -132,7 +135,7 @@ public class SCLRealm implements Realm { try { runnable.run(); } catch (Throwable t) { - Logger.defaultLogError(t); + LOGGER.error("Runnable failed in syncExec.", t); } finally { executorThread = oldThread; endSyncExec.release(); @@ -146,7 +149,7 @@ public class SCLRealm implements Realm { try { runnable.run(); } catch (Throwable t) { - Logger.defaultLogError(t); + LOGGER.error("Runnable failed in asyncExec.", t); } finally { } return; diff --git a/bundles/org.simantics.modeling/src/org/simantics/modeling/typicals/SyncTypicalTemplatesToInstances.java b/bundles/org.simantics.modeling/src/org/simantics/modeling/typicals/SyncTypicalTemplatesToInstances.java index 83407bb04..44b2abcfe 100644 --- a/bundles/org.simantics.modeling/src/org/simantics/modeling/typicals/SyncTypicalTemplatesToInstances.java +++ b/bundles/org.simantics.modeling/src/org/simantics/modeling/typicals/SyncTypicalTemplatesToInstances.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.Set; import org.eclipse.core.runtime.IProgressMonitor; -import org.simantics.Logger; import org.simantics.Simantics; import org.simantics.databoard.Bindings; import org.simantics.db.ReadGraph; @@ -82,6 +81,8 @@ import org.simantics.utils.datastructures.MapSet; import org.simantics.utils.strings.AlphanumComparator; import org.simantics.utils.strings.EString; import org.simantics.utils.ui.ErrorLogger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import gnu.trove.map.hash.THashMap; import gnu.trove.set.hash.THashSet; @@ -103,6 +104,7 @@ import gnu.trove.set.hash.THashSet; * @see TypicalSynchronizationMetadata */ public class SyncTypicalTemplatesToInstances extends WriteRequest { + private static final Logger LOGGER = LoggerFactory.getLogger(SyncTypicalTemplatesToInstances.class); /** * A constant used as the second argument to @@ -424,8 +426,7 @@ public class SyncTypicalTemplatesToInstances extends WriteRequest { syncInstance(graph, template, instance, templateElements); } } catch (Exception e) { - Logger.defaultLogError(e); - e.printStackTrace(); + LOGGER.error("Template synchronization failed.", e); } finally { this.temporaryDiagram.removeHint(DiagramModelHints.KEY_DIAGRAM_RESOURCE); } diff --git a/bundles/org.simantics.project/META-INF/MANIFEST.MF b/bundles/org.simantics.project/META-INF/MANIFEST.MF index ec287cffc..42e503aab 100644 --- a/bundles/org.simantics.project/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.project/META-INF/MANIFEST.MF @@ -33,12 +33,12 @@ Require-Bundle: org.simantics.db.layer0;bundle-version="[0.8.0,2.0.0)", org.eclipse.equinox.p2.touchpoint.natives;bundle-version="1.0.200", org.eclipse.equinox.p2.transport.ecf;bundle-version="1.0.0", org.simantics.graph.db;bundle-version="1.0.0", - org.apache.log4j;bundle-version="1.2.15", org.simantics.db.procore;bundle-version="1.1.0", org.eclipse.swt;bundle-version="3.6.0";resolution:=optional, org.eclipse.core.resources;bundle-version="3.6.0";resolution:=optional, org.simantics.graph.compiler;bundle-version="1.1.10", - org.simantics.ltk;bundle-version="1.1.10" + org.simantics.ltk;bundle-version="1.1.10", + org.slf4j.api Export-Package: org.eclipse.equinox.internal.p2.ui.query, org.eclipse.equinox.internal.provisional.p2.installer, org.simantics.project, diff --git a/bundles/org.simantics.project/src/org/simantics/project/management/ServerManager.java b/bundles/org.simantics.project/src/org/simantics/project/management/ServerManager.java index 56cf3254c..205706c48 100644 --- a/bundles/org.simantics.project/src/org/simantics/project/management/ServerManager.java +++ b/bundles/org.simantics.project/src/org/simantics/project/management/ServerManager.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.Map; import java.util.Properties; -import org.apache.log4j.Logger; import org.simantics.databoard.util.StreamUtil; import org.simantics.db.Driver; import org.simantics.db.Driver.Management; @@ -40,6 +39,8 @@ import org.simantics.db.service.XSupport; import org.simantics.graph.db.CoreInitialization; import org.simantics.layer0.DatabaseManagementResource; import org.simantics.layer0.Layer0; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Server Manager handles starting and pooling of ProCore server instances. @@ -47,7 +48,8 @@ import org.simantics.layer0.Layer0; * @author Toni Kalajainen */ public class ServerManager { - + private static final Logger LOGGER = LoggerFactory.getLogger(ServerManager.class); + /** Default properties with default user and password */ public static final Properties DEFAULT; @@ -81,8 +83,7 @@ public class ServerManager { */ public Session createDatabase(File databaseDirectory) throws DatabaseException { try { - Logger myLogger = Logger.getLogger(ServerManager.class); - myLogger.debug("Creating database to "+ databaseDirectory); + LOGGER.debug("Creating database to "+ databaseDirectory); Session session = null; ServerEx server1 = getServer(databaseDirectory); @@ -206,8 +207,7 @@ public class ServerManager { if (server.isActive()) server.stop(); } catch (DatabaseException e) { - Logger myLogger = Logger.getLogger(ServerManager.class); - myLogger.error(e); + LOGGER.error("Failed to stop database server.", e); } } servers.clear(); diff --git a/bundles/org.simantics.project/src/org/simantics/project/management/ServerManagerFactory.java b/bundles/org.simantics.project/src/org/simantics/project/management/ServerManagerFactory.java index bbeb7541e..65190614c 100644 --- a/bundles/org.simantics.project/src/org/simantics/project/management/ServerManagerFactory.java +++ b/bundles/org.simantics.project/src/org/simantics/project/management/ServerManagerFactory.java @@ -22,19 +22,21 @@ import java.net.URLDecoder; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; -import org.apache.log4j.Logger; import org.simantics.db.DatabaseUserAgent; import org.simantics.db.Driver; import org.simantics.db.Manager; import org.simantics.db.exception.DatabaseException; -import org.simantics.utils.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ServerManagerFactory { + private static final Logger LOGGER = LoggerFactory.getLogger(ServerManagerFactory.class); + public static ServerManager create(String databaseDriverId, String address) throws IOException, DatabaseException { Driver driver = Manager.getDriver(databaseDriverId); if (driver == null) - throw new IllegalArgumentException("Database driver for ID " + databaseDriverId + " Could not be found!"); - System.out.println("ServerManagerFactory.create called with databaseId=" + databaseDriverId + " and driver is " + driver.toString()); + throw new IllegalArgumentException("Database driver with ID " + databaseDriverId + " could not be found!"); + LOGGER.info("ServerManagerFactory.create called with id {}, driver is {}.", databaseDriverId, driver); DatabaseUserAgent agent = Manager.getUserAgent(databaseDriverId); if (agent != null) driver.setDatabaseUserAgent(address, agent); @@ -166,7 +168,6 @@ public class ServerManagerFactory { * @throws IOException */ private static void extractZip(InputStream zipInput, File dst) throws IOException { - Logger myLogger = Logger.getLogger(FileUtils.class); byte[] buf = new byte[8192]; ZipInputStream zis = new ZipInputStream(zipInput); ZipEntry entry; @@ -175,7 +176,7 @@ public class ServerManagerFactory { while (entry != null) { // for each entry to be extracted String name = entry.getName(); - myLogger.debug("Extracting "+name); + LOGGER.debug("Extracting "+name); File file = new File(dst, name); if (entry.isDirectory()) diff --git a/bundles/org.simantics.scl.compiler/META-INF/MANIFEST.MF b/bundles/org.simantics.scl.compiler/META-INF/MANIFEST.MF index 389381c96..916e66cfd 100755 --- a/bundles/org.simantics.scl.compiler/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.scl.compiler/META-INF/MANIFEST.MF @@ -10,7 +10,8 @@ Require-Bundle: gnu.trove3;bundle-version="3.0.0", org.objectweb.asm;bundle-version="[5.0.0,6.0.0)", org.objectweb.asm.commons;bundle-version="[5.0.0,6.0.0)", org.objectweb.asm.util;bundle-version="[5.0.0,6.0.0)" -Export-Package: org.simantics.scl.compiler.commands, +Export-Package: org.cojen.classfile, + org.simantics.scl.compiler.commands, org.simantics.scl.compiler.common.datatypes, org.simantics.scl.compiler.common.exceptions, org.simantics.scl.compiler.common.names, @@ -38,12 +39,15 @@ Export-Package: org.simantics.scl.compiler.commands, org.simantics.scl.compiler.environment.filter, org.simantics.scl.compiler.environment.specification, org.simantics.scl.compiler.errors, + org.simantics.scl.compiler.internal.elaboration.constraints2, + org.simantics.scl.compiler.internal.elaboration.subsumption, org.simantics.scl.compiler.internal.parsing, org.simantics.scl.compiler.internal.parsing.exceptions, org.simantics.scl.compiler.internal.parsing.parser, org.simantics.scl.compiler.markdown.html, org.simantics.scl.compiler.markdown.inlines, org.simantics.scl.compiler.markdown.internal, + org.simantics.scl.compiler.markdown.nodes, org.simantics.scl.compiler.module, org.simantics.scl.compiler.module.coverage, org.simantics.scl.compiler.module.options, diff --git a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/constants/JavaMathOperation.java b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/constants/JavaMathOperation.java index e13c18563..caf212a61 100644 --- a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/constants/JavaMathOperation.java +++ b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/constants/JavaMathOperation.java @@ -61,6 +61,16 @@ public class JavaMathOperation extends FunctionValue { public static final JavaMathOperation SOR = new JavaMathOperation(Opcodes.IOR, Types.SHORT, Types.SHORT, Types.SHORT); public static final JavaMathOperation SXOR = new JavaMathOperation(Opcodes.IXOR, Types.SHORT, Types.SHORT, Types.SHORT); + public static final JavaMathOperation BADD = new JavaMathOperation(Opcodes.IADD, Types.BYTE, Types.BYTE, Types.BYTE); + public static final JavaMathOperation BSUB = new JavaMathOperation(Opcodes.ISUB, Types.BYTE, Types.BYTE, Types.BYTE); + public static final JavaMathOperation BMUL = new JavaMathOperation(Opcodes.IMUL, Types.BYTE, Types.BYTE, Types.BYTE); + public static final JavaMathOperation BDIV = new JavaMathOperation(Opcodes.IDIV, Types.BYTE, Types.BYTE, Types.BYTE); + public static final JavaMathOperation BREM = new JavaMathOperation(Opcodes.IREM, Types.BYTE, Types.BYTE, Types.BYTE); + public static final JavaMathOperation BNEG = new JavaMathOperation(Opcodes.INEG, Types.BYTE, Types.BYTE); + public static final JavaMathOperation BAND = new JavaMathOperation(Opcodes.IAND, Types.BYTE, Types.BYTE, Types.BYTE); + public static final JavaMathOperation BOR = new JavaMathOperation(Opcodes.IOR, Types.BYTE, Types.BYTE, Types.BYTE); + public static final JavaMathOperation BXOR = new JavaMathOperation(Opcodes.IXOR, Types.BYTE, Types.BYTE, Types.BYTE); + public static final JavaMathOperation CADD = new JavaMathOperation(Opcodes.IADD, Types.CHARACTER, Types.CHARACTER, Types.INTEGER); public static final JavaMathOperation CSUB = new JavaMathOperation(Opcodes.ISUB, Types.INTEGER, Types.CHARACTER, Types.CHARACTER); diff --git a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/expressions/EBlock.java b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/expressions/EBlock.java index 295f3c176..ff1312540 100755 --- a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/expressions/EBlock.java +++ b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/expressions/EBlock.java @@ -1,98 +1,100 @@ -package org.simantics.scl.compiler.elaboration.expressions; - -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; - -import org.simantics.scl.compiler.common.exceptions.InternalCompilerError; -import org.simantics.scl.compiler.elaboration.contexts.TranslationContext; -import org.simantics.scl.compiler.elaboration.expressions.block.GuardStatement; -import org.simantics.scl.compiler.elaboration.expressions.block.LetStatement; -import org.simantics.scl.compiler.elaboration.expressions.block.RuleStatement; -import org.simantics.scl.compiler.elaboration.expressions.block.Statement; -import org.simantics.scl.compiler.errors.Locations; - -public class EBlock extends ASTExpression { - - LinkedList statements = new LinkedList(); - boolean monadic; - - public EBlock() { - } - - public void addStatement(Statement statement) { - statements.add(statement); - } - - public void setMonadic(boolean monadic) { - this.monadic = monadic; - } - - public LinkedList getStatements() { - return statements; - } - - @Override - public Expression resolve(TranslationContext context) { - if(statements.isEmpty()) - throw new InternalCompilerError(); - int i = statements.size()-1; - Statement last = statements.get(i); - if(!(last instanceof GuardStatement)) { - context.getErrorLog().log(last.location, "Block should end with an expression"); - return new EError(location); - } - - Expression in = ((GuardStatement)last).value; - while(--i >= 0) { - Statement cur = statements.get(i); - if(cur instanceof RuleStatement) { - int endId = i+1; - while(i>0 && statements.get(i-1) instanceof RuleStatement) - --i; - in = extractRules(i, endId, in); - } - else if(cur instanceof LetStatement && ((LetStatement)cur).pattern.isFunctionPattern()) { - int endId = i+1; - while(i>0 && (cur = statements.get(i-1)) instanceof LetStatement && - ((LetStatement)cur).pattern.isFunctionPattern()) - --i; - in = extractLet(i, endId, in); - } - else - in = cur.toExpression(context, monadic, in); - } - return in.resolve(context); - } - - private Expression extractRules(int begin, int end, Expression in) { - return new EPreRuleset(statements.subList(begin, end).toArray(new RuleStatement[end-begin]), in); - } - - @SuppressWarnings("unchecked") - private Expression extractLet(int begin, int end, Expression in) { - return new EPreLet((List)(List)statements.subList(begin, end), in); - } - - public static Expression create(ArrayList statements) { - EBlock block = new EBlock(); - for(Expression statement : statements) - block.addStatement(new GuardStatement(statement)); - return block; - } - - @Override - public void setLocationDeep(long loc) { - if(location == Locations.NO_LOCATION) { - location = loc; - for(Statement statement : statements) - statement.setLocationDeep(loc); - } - } - - @Override - public Expression accept(ExpressionTransformer transformer) { - return transformer.transform(this); - } - -} +package org.simantics.scl.compiler.elaboration.expressions; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; + +import org.simantics.scl.compiler.common.exceptions.InternalCompilerError; +import org.simantics.scl.compiler.elaboration.contexts.TranslationContext; +import org.simantics.scl.compiler.elaboration.expressions.block.GuardStatement; +import org.simantics.scl.compiler.elaboration.expressions.block.LetStatement; +import org.simantics.scl.compiler.elaboration.expressions.block.RuleStatement; +import org.simantics.scl.compiler.elaboration.expressions.block.Statement; +import org.simantics.scl.compiler.errors.Locations; + +public class EBlock extends ASTExpression { + + LinkedList statements = new LinkedList(); + boolean monadic; + + public EBlock() { + } + + public void addStatement(Statement statement) { + statements.add(statement); + } + + public void setMonadic(boolean monadic) { + this.monadic = monadic; + } + + public LinkedList getStatements() { + return statements; + } + + @Override + public Expression resolve(TranslationContext context) { + if(statements.isEmpty()) { + context.getErrorLog().log(location, "Block must contain at least one statement."); + return new EError(location); + } + int i = statements.size()-1; + Statement last = statements.get(i); + if(!(last instanceof GuardStatement)) { + context.getErrorLog().log(last.location, "Block should end with an expression"); + return new EError(location); + } + + Expression in = ((GuardStatement)last).value; + while(--i >= 0) { + Statement cur = statements.get(i); + if(cur instanceof RuleStatement) { + int endId = i+1; + while(i>0 && statements.get(i-1) instanceof RuleStatement) + --i; + in = extractRules(i, endId, in); + } + else if(cur instanceof LetStatement && ((LetStatement)cur).pattern.isFunctionPattern()) { + int endId = i+1; + while(i>0 && (cur = statements.get(i-1)) instanceof LetStatement && + ((LetStatement)cur).pattern.isFunctionPattern()) + --i; + in = extractLet(i, endId, in); + } + else + in = cur.toExpression(context, monadic, in); + } + return in.resolve(context); + } + + private Expression extractRules(int begin, int end, Expression in) { + return new EPreRuleset(statements.subList(begin, end).toArray(new RuleStatement[end-begin]), in); + } + + @SuppressWarnings("unchecked") + private Expression extractLet(int begin, int end, Expression in) { + return new EPreLet((List)(List)statements.subList(begin, end), in); + } + + public static Expression create(ArrayList statements) { + EBlock block = new EBlock(); + for(Expression statement : statements) + block.addStatement(new GuardStatement(statement)); + return block; + } + + @Override + public void setLocationDeep(long loc) { + if(location == Locations.NO_LOCATION) { + location = loc; + for(Statement statement : statements) + statement.setLocationDeep(loc); + } + } + + @Override + public Expression accept(ExpressionTransformer transformer) { + return transformer.transform(this); + } + +} diff --git a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/expressions/Expression.java b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/expressions/Expression.java index edf93841b..e01098c12 100755 --- a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/expressions/Expression.java +++ b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/expressions/Expression.java @@ -207,8 +207,6 @@ public abstract class Expression extends Symbol implements Typed { public abstract void collectFreeVariables(THashSet vars); public Expression simplify(SimplificationContext context) { - System.out.println("#############################"); - System.out.println(this); throw new InternalCompilerError(location, getClass().getSimpleName() + " does not support simplify method."); } diff --git a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/java/JavaModule.java b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/java/JavaModule.java index 39c2dbec6..cb39346e4 100755 --- a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/java/JavaModule.java +++ b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/java/JavaModule.java @@ -56,6 +56,16 @@ public class JavaModule extends ConcreteModule { addValue("sor", JavaMathOperation.SOR); addValue("sxor", JavaMathOperation.SXOR); + addValue("badd", JavaMathOperation.BADD); + addValue("bsub", JavaMathOperation.BSUB); + addValue("bmul", JavaMathOperation.BMUL); + addValue("bdiv", JavaMathOperation.BDIV); + addValue("brem", JavaMathOperation.BREM); + addValue("bneg", JavaMathOperation.BNEG); + addValue("band", JavaMathOperation.BAND); + addValue("bor", JavaMathOperation.BOR); + addValue("bxor", JavaMathOperation.BXOR); + // Conversions for(JavaConversionOperation operation : JavaConversionOperation.OPCODES) addValue(operation.getMnemonic(), operation); @@ -75,6 +85,13 @@ public class JavaModule extends ConcreteModule { addValue("lcmpgt", new JavaComparisonOperation(">", Types.LONG)); addValue("lcmpge", new JavaComparisonOperation(">=", Types.LONG)); + addValue("bcmpeq", new JavaComparisonOperation("==", Types.BYTE)); + addValue("bcmpne", new JavaComparisonOperation("!=", Types.BYTE)); + addValue("bcmplt", new JavaComparisonOperation("<", Types.BYTE)); + addValue("bcmple", new JavaComparisonOperation("<=", Types.BYTE)); + addValue("bcmpgt", new JavaComparisonOperation(">", Types.BYTE)); + addValue("bcmpge", new JavaComparisonOperation(">=", Types.BYTE)); + addValue("scmpeq", new JavaComparisonOperation("==", Types.SHORT)); addValue("scmpne", new JavaComparisonOperation("!=", Types.SHORT)); addValue("scmplt", new JavaComparisonOperation("<", Types.SHORT)); @@ -112,7 +129,6 @@ public class JavaModule extends ConcreteModule { addValue("ifge", new JavaComparisonToZeroOperation(">=")); TVar A = Types.var(Kinds.STAR); - TVar B = Types.var(Kinds.STAR); addValue("unsafeCoerce", UnsafeCoerce.INSTANCE); addValue("equals", new JavaMethod(true, diff --git a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/relations/TransitiveClosureRelation.java b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/relations/TransitiveClosureRelation.java index 26111e0b4..c9fa469d2 100644 --- a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/relations/TransitiveClosureRelation.java +++ b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/elaboration/relations/TransitiveClosureRelation.java @@ -67,13 +67,9 @@ public class TransitiveClosureRelation extends AbstractRelation implements Compo type = type.replace(getTypeVariables(), typeParameters); Expression continuation = context.getContinuation(); - System.out.println("continuation = " + continuation + " :: " + continuation.getType()); Variable set = new Variable("set", Types.apply(Types.con("MSet", "T"), type)); Variable f = new Variable("f", Types.functionE(type, Types.PROC, continuation.getType())); Variable innerSolved = new Variable("tcTemp", solved.getType()); - System.out.println("set :: " + set.getType()); - System.out.println("f :: " + f.getType()); - System.out.println("tcTemp :: " + innerSolved.getType()); QueryCompilationContext newContext = context.createSubcontext(new EApply( new EVariable(f), new EVariable(innerSolved) diff --git a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/internal/elaboration/constraints/ReduceSerializable.java b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/internal/elaboration/constraints/ReduceSerializable.java index ea0276e35..3b7ef9d1c 100644 --- a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/internal/elaboration/constraints/ReduceSerializable.java +++ b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/internal/elaboration/constraints/ReduceSerializable.java @@ -98,8 +98,8 @@ public class ReduceSerializable { private static final THashMap BINDING_CONSTANTS2 = new THashMap(); static { - BINDING_CONSTANTS2.put(MAP, new JavaConstructor("org/simantics/databoard/binding/impl/HashMapBinding", Types.NO_EFFECTS, Types.pred(Types.SERIALIZABLE, Types.apply(MAP, A, B)), Types.pred(Types.SERIALIZABLE, A), Types.pred(Types.SERIALIZABLE, B))); - BINDING_CONSTANTS2.put(MMAP, new JavaConstructor("org/simantics/databoard/binding/impl/HashMapBinding", Types.NO_EFFECTS, Types.pred(Types.SERIALIZABLE, Types.apply(MMAP, A, B)), Types.pred(Types.SERIALIZABLE, A), Types.pred(Types.SERIALIZABLE, B))); + BINDING_CONSTANTS2.put(MAP, new JavaConstructor("org/simantics/databoard/binding/impl/DefaultMapBinding", Types.NO_EFFECTS, Types.pred(Types.SERIALIZABLE, Types.apply(MAP, A, B)), Types.pred(Types.SERIALIZABLE, A), Types.pred(Types.SERIALIZABLE, B))); + BINDING_CONSTANTS2.put(MMAP, new JavaConstructor("org/simantics/databoard/binding/impl/DefaultMapBinding", Types.NO_EFFECTS, Types.pred(Types.SERIALIZABLE, Types.apply(MMAP, A, B)), Types.pred(Types.SERIALIZABLE, A), Types.pred(Types.SERIALIZABLE, B))); } public static Reduction reduceSerializable(Type parameter) { diff --git a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/top/ExpressionEvaluator.java b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/top/ExpressionEvaluator.java index 5c8f59bb2..39f0ac721 100644 --- a/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/top/ExpressionEvaluator.java +++ b/bundles/org.simantics.scl.compiler/src/org/simantics/scl/compiler/top/ExpressionEvaluator.java @@ -182,7 +182,7 @@ public class ExpressionEvaluator { final Environment environment = runtimeEnvironment.getEnvironment(); // Parse expression - if(expressionText != null) { + if(expressionText != null && !expressionText.trim().isEmpty()) { try { switch(parseMode) { case BLOCK: { diff --git a/bundles/org.simantics.scl.data/scl/Data/Json.md b/bundles/org.simantics.scl.data/scl/Data/Json.md new file mode 100644 index 000000000..fe8e7218f --- /dev/null +++ b/bundles/org.simantics.scl.data/scl/Data/Json.md @@ -0,0 +1,42 @@ +# Basic functions + +::value[toJsonString, fromJsonString] + +# Supported value types + +This module supports the following value types: + +``` +instance Json String +instance Json Short +instance Json Integer +instance Json Long +instance Json Float +instance Json Double + +instance (Json a) => Json [a] +instance (Json a) => Json (Maybe a) + +instance Json () +instance (Json a, Json b) => Json (a, b) +instance (Json a, Json b, Json c) => Json (a, b, c) +instance (Json a, Json b, Json c, Json d) => Json (a, b, c, d) +instance (Json a, Json b, Json c, Json d, Json e) => Json (a, b, c, d, e) + +instance Json Json +``` + +# Generic JSON Type + +::data[Json, JsonField] + +# Adding support for additional value types + +::data[JsonGenerator, JsonParser] +::class[Json] + +It is enough to implement `toJson` and `fromJson`. + +# Undocumented entities + +::undocumented[] \ No newline at end of file diff --git a/bundles/org.simantics.scl.data/scl/Data/Json.scl.skip b/bundles/org.simantics.scl.data/scl/Data/Json.scl.skip new file mode 100644 index 000000000..fb034cd57 --- /dev/null +++ b/bundles/org.simantics.scl.data/scl/Data/Json.scl.skip @@ -0,0 +1,421 @@ +import "StandardLibrary" +import "Data/Writer" +import "JavaBuiltin" as Java + +importJava "com.fasterxml.jackson.core.JsonGenerator" where + data JsonGenerator + +@private +importJava "com.fasterxml.jackson.core.JsonGenerator" where + writeNull :: JsonGenerator -> () + + writeStartArray :: JsonGenerator -> () + @JavaName writeStartArray + writeStartArrayN :: JsonGenerator -> Integer -> () + writeEndArray :: JsonGenerator -> () + + writeStartObject :: JsonGenerator -> () + writeFieldName :: JsonGenerator -> String -> () + writeEndObject :: JsonGenerator -> () + + writeBoolean :: JsonGenerator -> Boolean -> () + + writeString :: JsonGenerator -> String -> () + + @JavaName writeNumber + writeNumberDouble :: JsonGenerator -> Double -> () + @JavaName writeNumber + writeNumberInteger :: JsonGenerator -> Integer -> () + @JavaName writeNumber + writeNumberLong :: JsonGenerator -> Long -> () + @JavaName writeNumber + writeNumberShort :: JsonGenerator -> Short -> () + @JavaName writeNumber + writeNumberFloat :: JsonGenerator -> Float -> () + + @JavaName close + closeGenerator :: JsonGenerator -> () + +@private +importJava "com.fasterxml.jackson.core.JsonToken" where + data JsonToken + END_ARRAY :: JsonToken + END_OBJECT :: JsonToken + FIELD_NAME :: JsonToken + NOT_AVAILABLE :: JsonToken + START_ARRAY :: JsonToken + START_OBJECT :: JsonToken + VALUE_EMBEDDED_OBJECT :: JsonToken + VALUE_FALSE :: JsonToken + VALUE_NULL :: JsonToken + VALUE_NUMBER_FLOAT :: JsonToken + VALUE_NUMBER_INT :: JsonToken + VALUE_STRING :: JsonToken + VALUE_TRUE :: JsonToken +instance Eq JsonToken where + (==) = Java.equals + +importJava "com.fasterxml.jackson.core.JsonParser" where + data JsonParser + +@private +importJava "com.fasterxml.jackson.core.JsonParser" where + nextToken :: JsonParser -> JsonToken + currentToken :: JsonParser -> JsonToken + getDoubleValue :: JsonParser -> Double + getIntValue :: JsonParser -> Integer + getText :: JsonParser -> String + getShortValue :: JsonParser -> Short + getFloatValue :: JsonParser -> Float + getLongValue :: JsonParser -> Long + nextFieldName :: JsonParser -> Maybe String + +@private +importJava "com.fasterxml.jackson.core.JsonFactory" where + data JsonFactory + + @JavaName "" + createJsonFactory :: JsonFactory + + @JavaName createGenerator + createWriterGenerator :: JsonFactory -> Writer -> JsonGenerator + + @JavaName createParser + createStringParser :: JsonFactory -> String -> JsonParser + +@private +defaultFactory = createJsonFactory + +@private +@inline +assertStartArray :: JsonParser -> () +assertStartArray p = if currentToken p == START_ARRAY + then () + else fail "Expected START_ARRAY token." + +@private +@inline +assertEndArray :: JsonParser -> () +assertEndArray p = if nextToken p == END_ARRAY + then () + else fail "Expected END_ARRAY token." + +// *** Json type class ******************************************************** + +class Json a where + writeJson :: JsonGenerator -> a -> () + readJson :: JsonParser -> a + toJson :: a -> Json + fromJson :: Json -> a + + writeJson g v = writeJson g (toJson v) + readJson p = fromJson (readJson p) + +@private +readNextJson :: Json a => JsonParser -> a +readNextJson p = do + nextToken p + readJson p + +""" +Converts the value to a string encoded with JSON +""" +toJsonString :: Json a => a -> String +toJsonString v = runProc do + writer = createStringWriter + generator = createWriterGenerator defaultFactory (toWriter writer) + writeJson generator v + closeGenerator generator + resultOfStringWriter writer + +""" +Parses a JSON encoded string into a value +""" +fromJsonString :: Json a => String -> a +fromJsonString str = runProc do + parser = createStringParser defaultFactory str + readNextJson parser + +instance Json String where + writeJson = writeString + readJson = getText + toJson = JsonString + fromJson (JsonString value) = value + +instance Json Boolean where + writeJson = writeBoolean + readJson p = + if currentToken p == VALUE_TRUE + then True + else False + toJson = JsonBoolean + fromJson (JsonBoolean value) = value + +instance Json Double where + writeJson = writeNumberDouble + readJson = getDoubleValue + toJson = JsonDouble + fromJson (JsonDouble value) = value + +instance Json Float where + writeJson = writeNumberFloat + readJson = getFloatValue + toJson = JsonDouble . toDouble + fromJson (JsonDouble value) = fromDouble value + +instance Json Integer where + writeJson = writeNumberInteger + readJson = getIntValue + toJson = JsonLong . fromInteger + fromJson (JsonLong value) = Java.l2i value + +instance Json Long where + writeJson = writeNumberLong + readJson = getLongValue + toJson = JsonLong + fromJson (JsonLong value) = value + +instance Json Short where + writeJson = writeNumberShort + readJson = getShortValue + toJson = JsonLong . Java.i2l . Java.s2i + fromJson (JsonLong value) = Java.i2s (Java.l2i value) + +instance (Json a) => Json (Maybe a) where + writeJson g (Just v) = writeJson g v + writeJson g Nothing = writeNull g + readJson p = + if currentToken p == VALUE_NULL + then Nothing + else Just (readJson p) + toJson (Just value) = toJson value + toJson Nothing = JsonNull + fromJson JsonNull = Nothing + fromJson json = Just (fromJson json) + +instance (Json a) => Json [a] where + writeJson g l = do + writeStartArray g + iter (writeJson g) l + writeEndArray g + readJson p = MList.freeze result + where + result = MList.create () + assertStartArray p + while (nextToken p != END_ARRAY) + (MList.add result $ readJson p) + toJson l = JsonArray (map toJson l) + fromJson (JsonArray l) = map fromJson l + +instance Json () where + writeJson g _ = do + writeStartArray g + writeEndArray g + readJson p = do + assertStartArray p + assertEndArray p + () + toJson _ = JsonArray [] + fromJson (JsonArray []) = () + +instance (Json a, Json b) => Json (a, b) where + writeJson g (a, b) = do + writeStartArray g + writeJson g a + writeJson g b + writeEndArray g + readJson p = (a, b) + where + assertStartArray p + a = readNextJson p + b = readNextJson p + assertEndArray p + toJson (a, b) = JsonArray [toJson a, toJson b] + fromJson (JsonArray [a, b]) = (fromJson a, fromJson b) + +instance (Json a, Json b, Json c) => Json (a, b, c) where + writeJson g (a, b, c) = do + writeStartArray g + writeJson g a + writeJson g b + writeJson g c + writeEndArray g + readJson p = (a, b, c) + where + assertStartArray p + a = readNextJson p + b = readNextJson p + c = readNextJson p + assertEndArray p + toJson (a, b, c) = JsonArray [toJson a, toJson b, toJson c] + fromJson (JsonArray [a, b, c]) = (fromJson a, fromJson b, fromJson c) + +instance (Json a, Json b, Json c, Json d) => Json (a, b, c, d) where + writeJson g (a, b, c, d) = do + writeStartArray g + writeJson g a + writeJson g b + writeJson g c + writeJson g d + writeEndArray g + readJson p = (a, b, c, d) + where + assertStartArray p + a = readNextJson p + b = readNextJson p + c = readNextJson p + d = readNextJson p + assertEndArray p + toJson (a, b, c, d) = JsonArray [toJson a, toJson b, toJson c, toJson d] + fromJson (JsonArray [a, b, c, d]) = (fromJson a, fromJson b, fromJson c, fromJson d) + +instance (Json a, Json b, Json c, Json d, Json e) => Json (a, b, c, d, e) where + writeJson g (a, b, c, d, e) = do + writeStartArray g + writeJson g a + writeJson g b + writeJson g c + writeJson g d + writeJson g e + writeEndArray g + readJson p = (a, b, c, d, e) + where + assertStartArray p + a = readNextJson p + b = readNextJson p + c = readNextJson p + d = readNextJson p + e = readNextJson p + assertEndArray p + toJson (a, b, c, d, e) = JsonArray [toJson a, toJson b, toJson c, toJson d, toJson e] + fromJson (JsonArray [a, b, c, d, e]) = (fromJson a, fromJson b, fromJson c, fromJson d, fromJson e) + +data Json = + JsonString String + | JsonDouble Double + | JsonLong Long + | JsonArray [Json] + | JsonBoolean Boolean + | JsonNull + | JsonObject [JsonField] +data JsonField = JsonField String Json + +deriving instance Show Json +deriving instance Eq Json +deriving instance Show JsonField +deriving instance Eq JsonField + +instance Json Json where + writeJson g (JsonString value) = writeString g value + writeJson g (JsonDouble value) = writeNumberDouble g value + writeJson g (JsonLong value) = writeNumberLong g value + writeJson g (JsonBoolean value) = writeBoolean g value + writeJson g JsonNull = writeNull g + writeJson g (JsonArray values) = do + writeStartArray g + iter (writeJson g) values + writeEndArray g + writeJson g (JsonObject fields) = do + writeStartObject g + iter (\(JsonField name value) -> do + writeFieldName g name + writeJson g value) fields + writeEndObject g + + readJson p = do + token = currentToken p + if token == VALUE_STRING + then JsonString (getText p) + else if token == VALUE_NUMBER_FLOAT + then JsonDouble (getDoubleValue p) + else if token == VALUE_NUMBER_INT + then JsonLong (getLongValue p) + else if token == VALUE_TRUE + then JsonBoolean True + else if token == VALUE_FALSE + then JsonBoolean False + else if token == VALUE_NULL + then JsonNull + else if token == START_ARRAY + then do + result = MList.create () + while (nextToken p != END_ARRAY) + (MList.add result $ readJson p) + JsonArray (MList.freeze result) + else if token == START_OBJECT + then do + result = MList.create () + readJsonObjectContents result p + JsonObject (MList.freeze result) + else fail "Unsupported token type." + toJson = id + fromJson = id + +@private +readJsonObjectContents :: MList.T JsonField -> JsonParser -> () +readJsonObjectContents result p = + match nextFieldName p with + Just name -> do + MList.add result $ JsonField name (readNextJson p) + readJsonObjectContents result p + Nothing -> () + +/* +@private +makeTypeEqual :: a -> a -> () +makeTypeEqual _ _ = () + +@private +testValue :: Json a => Show a => Eq a => a -> () +testValue v1 = do + v2 = toJsonString v1 + v3 = fromJsonString v2 + makeTypeEqual v1 v3 + print "\(v1) -> \(v2) -> \(v3)" + if v1 != v3 + then fail "Values differ" + else () + +testGenericJson :: String -> () +testGenericJson v1 = do + v2 = fromJsonString v1 :: Json + v3 = toJsonString v2 + print "\(v1) -> \(v2) -> \(v3)" + if v1 != v3 + then fail "Values differ" + else () + +testIt :: () +testIt = do + testValue "asd" + testValue True + testValue False + testValue (123 :: Short) + testValue (123 :: Integer) + testValue (123 :: Long) + testValue (123 :: Double) + testValue (123 :: Float) + testValue (Nothing :: Maybe String) + testValue (Just "asd") + testValue ["a", "b", "c"] + testValue [[],["a"],["b","c"]] + testValue () + testValue ("a", "b") + testValue ("a", "b", "c") + testValue ("a", "b", "c", "d") + testValue [Just "a", Nothing] + testValue [("a", "b"), ("c", "d")] + testValue (("a", "b"), ("c", "d")) + + testGenericJson "\"asd\"" + testGenericJson "123" + testGenericJson "123.0" + testGenericJson "true" + testGenericJson "false" + testGenericJson "null" + testGenericJson "[1,2,3]" + testGenericJson "[[1],[2,3],[]]" + testGenericJson "{}" + testGenericJson "{\"a\":123,\"b\":[]}" + testGenericJson "{\"a\":{}}" +*/ \ No newline at end of file diff --git a/bundles/org.simantics.scl.data/scl/Data/Writer.scl b/bundles/org.simantics.scl.data/scl/Data/Writer.scl new file mode 100644 index 000000000..4c526f2c1 --- /dev/null +++ b/bundles/org.simantics.scl.data/scl/Data/Writer.scl @@ -0,0 +1,18 @@ +import "JavaBuiltin" as Java + +importJava "java.io.Writer" where + data Writer + +importJava "java.io.StringWriter" where + data StringWriter + + @JavaName "" + createStringWriter :: StringWriter + + @JavaName toString + resultOfStringWriter :: StringWriter -> String + +class WriterLike a where + toWriter :: a -> Writer +instance WriterLike StringWriter where + toWriter = Java.unsafeCoerce \ No newline at end of file diff --git a/bundles/org.simantics.scl.db/scl/Simantics/DB.scl b/bundles/org.simantics.scl.db/scl/Simantics/DB.scl index 023960678..74bc42204 100644 --- a/bundles/org.simantics.scl.db/scl/Simantics/DB.scl +++ b/bundles/org.simantics.scl.db/scl/Simantics/DB.scl @@ -11,31 +11,6 @@ effect WriteGraph "graph" "org.simantics.db.WriteGraph" -importJava "org.simantics.databoard.type.Datatype" where - data Datatype - @JavaName toString - showDatatype :: Datatype -> String - -instance Show Datatype where - show = showDatatype - -importJava "org.simantics.databoard.Bindings" where - @JavaName getBinding - datatypeBinding :: Datatype -> Binding Dynamic - -importJava "org.simantics.databoard.binding.mutable.Variant" where - @JavaName "" - createVariant_ :: Binding Dynamic -> Dynamic -> Variant - @JavaName "type" - variantDatatype :: Variant -> Datatype - -createVariant :: Datatype -> Dynamic -> Variant -createVariant dt v = createVariant_ (datatypeBinding dt) v - -importJava "org.simantics.databoard.Datatypes" where - @JavaName translate - translateDatatype :: String -> Datatype - importJava "org.simantics.db.Resource" where "A resource is a node in a semantic graph." data Resource diff --git a/bundles/org.simantics.scl.runtime/META-INF/MANIFEST.MF b/bundles/org.simantics.scl.runtime/META-INF/MANIFEST.MF index 64d5ac63d..7274b0bf2 100755 --- a/bundles/org.simantics.scl.runtime/META-INF/MANIFEST.MF +++ b/bundles/org.simantics.scl.runtime/META-INF/MANIFEST.MF @@ -21,4 +21,5 @@ Export-Package: org.simantics.scl.runtime, org.simantics.scl.runtime.xml Require-Bundle: org.junit;bundle-version="4.12.0";resolution:=optional, gnu.trove3;bundle-version="3.0.0", - org.simantics.databoard;bundle-version="0.6.5";visibility:=reexport + org.simantics.databoard;bundle-version="0.6.5";visibility:=reexport, + org.slf4j.api;bundle-version="1.7.20" diff --git a/bundles/org.simantics.scl.runtime/scl/Databoard.scl b/bundles/org.simantics.scl.runtime/scl/Databoard.scl new file mode 100644 index 000000000..f81376621 --- /dev/null +++ b/bundles/org.simantics.scl.runtime/scl/Databoard.scl @@ -0,0 +1,279 @@ +import "Prelude" +import "Random" + +/// Datatype /// + +"A data type component with component name and data type" +@JavaType "org.simantics.databoard.type.Component" +@FieldNames [name, "type"] +data DatatypeComponent = DatatypeComponent String Datatype + +"""A data type that represents the data types supported by the Simantics +Databoard plugin.""" +@JavaType "org.simantics.databoard.type.Datatype" +data Datatype = + @JavaType "org.simantics.databoard.type.BooleanType" + BooleanType + | @JavaType "org.simantics.databoard.type.ByteType" + ByteType + | @JavaType "org.simantics.databoard.type.IntegerType" + IntegerType + | @JavaType "org.simantics.databoard.type.LongType" + LongType + | @JavaType "org.simantics.databoard.type.FloatType" + FloatType + | @JavaType "org.simantics.databoard.type.DoubleType" + DoubleType + | @JavaType "org.simantics.databoard.type.StringType" + StringType + | @JavaType "org.simantics.databoard.type.ArrayType" + @FieldNames [componentType] + ArrayType Datatype + | @JavaType "org.simantics.databoard.type.OptionalType" + @FieldNames [componentType] + OptionalType Datatype + | @JavaType "org.simantics.databoard.type.MapType" + @FieldNames [keyType, valueType] + MapType Datatype Datatype + | @JavaType "org.simantics.databoard.type.RecordType" + @FieldNames [components] + RecordType (Vector DatatypeComponent) + | @JavaType "org.simantics.databoard.type.UntionType" + @FieldNames [components] + UnionType (Vector DatatypeComponent) + | @JavaType "org.simantics.databoard.type.VariantType" + VariantType + +importJava "org.simantics.databoard.type.Datatype" where + @private + @JavaName toString + showDatatype :: Datatype -> String + + "Get the number of type components in an data type" + @JavaName getComponentCount + datatypeCompnentCount :: Datatype -> Integer + + "Get a component type of a composite data type" + @JavaName getComponentType + datatypeComponentType :: Datatype -> ChildReference -> Datatype + + @private + @JavaName equals + datatypeEquals :: Datatype -> Datatype -> Boolean + +instance Show Datatype where + show = showDatatype + +instance Eq Datatype where + (==) = datatypeEquals + +/// Binding /// + +importJava "org.simantics.databoard.binding.Binding" where + "Check whether a dynamic object is an instance of a given binding" + @JavaName isInstance + isBindingInstance :: Binding Dynamic -> Dynamic -> Boolean + + "Create a serializable object from a textual representation" + parseValueDefinition :: Serializable a => String -> a + + "Compare two serializable objects\n\nResult is -1, 0 or 1 depending the partial ordering of the objects." + @JavaName compare + compareObjects :: Serializable a => a -> a -> Integer + + "Return true, if two serializable values are equal" + @JavaName equals + serializableEq :: Serializable a => a -> a -> Boolean + + "The default value of a serializable type" + @JavaName createDefault + serializableDefaultValue :: Serializable a => a + + "Create a random value of a serializable type" + @JavaName createRandom + serializableRandomValue :: Serializable a => a + + "Get a textual representation of a serializable value" + @JavaName toString + showSerializable :: Serializable a => a -> String + + @private + @JavaName getComponent + getSerializableComponent_ :: Serializable a => a -> ChildReference -> Binding b -> b + + "Get a component binding" + @JavaName getComponentBinding + getComponentBinding :: Binding a -> ChildReference -> Binding b + + @private + @JavaName equals + bindingEquals :: Binding a -> Binding a -> Boolean + +instance Eq (Binding a) where + (==) = bindingEquals + +"Get a child data component of a composite serializable value" +getSerializableComponent :: Serializable a => Serializable b => a -> ChildReference -> b +getSerializableComponent object ref = getSerializableComponent_ object ref binding + +/// Serializer /// + +importJava "org.simantics.databoard.serialization.Serializer" where + "A data serializer for SCL type a" + data Serializer a + + @private + @JavaName "serialize" + serialize_ :: Serializer a -> a -> ByteArray + + @private + @JavaName "deserialize" + deserialize_ :: Serializer a -> ByteArray -> a + +importJava "org.simantics.databoard.Bindings" where + @private + @JavaName "getSerializer" + serializerOf :: Binding a -> Serializer a + + @private + @JavaName toString + bindingToString :: Binding a -> String + + "Adapt between types using explicitly provided binding objects: `adapt_ value from to`" + @JavaName adapt + adapt_ :: a -> Binding a -> Binding b -> b + +"Adapt value from one serializable type to another" +adapt :: Serializable a => Serializable b => a -> b +adapt x = adapt_ x binding binding + +instance Show (Binding a) where + show = bindingToString + +"Serializes a value to a byte array using default serializer." +serialize :: Serializable a => a -> ByteArray +serialize v = serialize_ (serializerOf binding) v + +"Deserializes a value from a byte array using default serializer." +deserialize :: Serializable a => ByteArray -> a +deserialize ba = deserialize_ (serializerOf binding) ba + +importJava "org.simantics.databoard.Bindings" where + "Get a default binding for a given data type" + @JavaName getBinding + datatypeBinding :: Datatype -> Binding Dynamic + +importJava "org.simantics.databoard.Datatypes" where + "Get a data type from a string representation" + @JavaName translate + translateDatatype :: String -> Datatype + +importJava "org.simantics.databoard.binding.mutable.Variant" where + // data Variant (in Builtins) + "Create a variant using an explicitly provided binding value (unchecked cast)" + @JavaName "" + createVariant_ :: Binding Dynamic -> Dynamic -> Variant + + "Get the data type of a variant object" + @JavaName "type" + variantDatatype :: Variant -> Datatype + + "Get raw value contained by a variant (unchecked cast)" + @JavaName getValue + rawVariantValue :: Variant -> a + + "Create a variant from a raw object (based on Java class)" + @JavaName ofInstance + variantOf :: a -> Variant + + "Create a variant with explicitly provided binding and value" + @JavaName "" + variant_ :: Binding a -> a -> Variant + + "Get value from a variant using a given binding" + @JavaName getValue + variantValue_ :: Variant -> Binding a -> a + + @private + @JavaName toString + showVariant :: Variant -> String + + "Get a component of compound data value in a variant" + @JavaName getComponent + variantComponent :: Variant -> ChildReference -> Variant + +"Create a variant of a given data type from an object in the default binding (unchecked, use with extreme caution)" +createVariant :: Datatype -> Dynamic -> Variant +createVariant dt v = createVariant_ (datatypeBinding dt) v + +"Create a variant from a serializable value" +variant :: Serializable a => a -> Variant +variant v = variant_ binding v + +"Get the value of a variant in a serializable type" +variantValue :: Serializable a => Variant -> a +variantValue v = variantValue_ v binding + +instance Show Variant where + show = showVariant + +"Get an element of a compound variant value using an index reference" +variantElement :: Serializable a => Variant -> Integer -> a +variantElement v i = variantValue (variantComponent v (indexReference i)) + +importJava "org.simantics.databoard.accessor.reference.ChildReference" where + "A reference to a child element in a composite data type/binding or value" + data ChildReference + + "Combine a list of child data object references into a single path reference" + @JavaName compile + compileReference :: [ChildReference] -> ChildReference + +importJava "org.simantics.databoard.accessor.reference.IndexReference" where + """Get a reference to a child data object using an index (zero-based) +* Element index of an array object +* Field index of a record or union type +* 0: + * Key component of a map type/binding + * Component of any single-component type/binding (optional, array) + * Contained value/type of any single-element object (optional, union, variant) +* 1: + * Value component of a map type/binding + """ + @JavaName "" + indexReference :: Integer -> ChildReference + +importJava "org.simantics.databoard.accessor.reference.KeyReference" where + """Get a reference to a MapType child data object using a given key value +* Contained value of a map object for a given key value + """ + @JavaName "" + keyReference :: Variant -> ChildReference + +importJava "org.simantics.databoard.accessor.reference.NameReference" where + """Get a reference to a child data object using a field name +* A component name of a record or union data type/binding +* "key": The key component of a map data type/binding +* "value": The value component of a map data type/binding + """ + @JavaName "" + nameReference :: String -> ChildReference + +importJava "org.simantics.databoard.accessor.reference.LabelReference" where + """Get a reference to a child data object using a label +* A component name of a record or union data type/binding +* A string representation of the index of a record or union data type/binding component +* "v": The component type of an array/optional data type/binding +* "0"/"key": The key component of a map data type/binding +* "1"/"value": The value component of a map data type/binding + """ + @JavaName "" + labelReference :: String -> ChildReference + +importJava "org.simantics.databoard.accessor.reference.ComponentReference" where + """Get a reference to a component child data object +* Component of an array/optional data type/binding +* Contained value of an optional/variant/union object + """ + @JavaName "" + componentReference :: ChildReference diff --git a/bundles/org.simantics.scl.runtime/scl/Expressions/Equations.scl b/bundles/org.simantics.scl.runtime/scl/Expressions/Equations.scl deleted file mode 100644 index e695a0089..000000000 --- a/bundles/org.simantics.scl.runtime/scl/Expressions/Equations.scl +++ /dev/null @@ -1,13 +0,0 @@ -effect Equation - "equation" - "org.simantics.scl.runtime.equations.EquationContext" - -importJava "org.simantics.scl.runtime.equations.EquationContext" where - data EquationContext - - listenEquationVariable :: String -> (a -> ()) -> () - setEquationVariable :: String -> a -> () - applyEquationContext :: ( a) -> EquationContext -> a - -importJava "org.simantics.scl.runtime.equations.TestEquationContext" where - solveEquations :: ( a) -> [(String,String)] \ No newline at end of file diff --git a/bundles/org.simantics.scl.runtime/scl/Prelude.scl b/bundles/org.simantics.scl.runtime/scl/Prelude.scl index 0dcd1b750..21658862d 100644 --- a/bundles/org.simantics.scl.runtime/scl/Prelude.scl +++ b/bundles/org.simantics.scl.runtime/scl/Prelude.scl @@ -558,6 +558,43 @@ importJava "java.lang.Math" where /// Integer /// +@private +importJava "java.lang.Byte" where + @JavaName toString + showByte :: Byte -> String + + @JavaName parseByte + readByte :: String -> Byte + +instance Eq Byte where + (==) = Java.bcmpeq + (!=) = Java.bcmpne + +instance Ord Byte where + (<) = Java.bcmplt + (<=) = Java.bcmple + (>) = Java.bcmpgt + (>=) = Java.bcmpge + +instance Additive Byte where + zero = Java.i2b Java.iconst_0 + (+) = Java.badd + +instance Ring Byte where + neg = Java.bneg + (-) = Java.bsub + one = Java.i2b Java.iconst_1 + (*) = Java.bmul + fromInteger = Java.i2b + +instance Show Byte where + show = showByte + precedence v = if v >= 0 then 0 else 100 + +instance Read Byte where + read = readByte + + @private importJava "java.lang.Short" where @JavaName toString @@ -1266,7 +1303,7 @@ instance MonadZero Maybe where mzero = Nothing instance MonadOr Maybe where - morelse (Just a) _ = Just a + morelse a@(Just _) _ = a morelse _ b = b "`execJust v f` executes the function `f` with parameter value `x`, if `v=Just x`. If `v=Nothing`, the function does nothing." @@ -2240,45 +2277,7 @@ instance Show TypeRep where isSpecialType (TCon "Builtin" "(,,,)") = True isSpecialType (TApply a _) = isSpecialType a */ -// Serializable - -importJava "org.simantics.databoard.serialization.Serializer" where - data Serializer a - - @private - @JavaName "serialize" - serialize_ :: Serializer a -> a -> ByteArray - - @private - @JavaName "deserialize" - deserialize_ :: Serializer a -> ByteArray -> a - -importJava "org.simantics.databoard.Bindings" where - @private - @JavaName "getSerializer" - serializerOf :: Binding a -> Serializer a - - @private - @JavaName toString - bindingToString :: Binding a -> String - - @JavaName adapt - adapt_ :: a -> Binding a -> Binding b -> b - -adapt :: Serializable a => Serializable b => a -> b -adapt x = adapt_ x binding binding - -instance Show (Binding a) where - show = bindingToString -"Serializes a value to a byte array." -serialize :: Serializable a => a -> ByteArray -serialize v = serialize_ (serializerOf binding) v - -"Deserializes a value from a byte array." -deserialize :: Serializable a => ByteArray -> a -deserialize ba = deserialize_ (serializerOf binding) ba - // ByteArray importJava "java.util.Arrays" where @@ -2289,53 +2288,6 @@ importJava "java.util.Arrays" where instance Show ByteArray where show = byteArrayToString -importJava "org.simantics.databoard.binding.mutable.Variant" where - // data Variant (in Builtins) - @JavaName getValue - rawVariantValue :: Variant -> a - @JavaName ofInstance - variantOf :: a -> Variant - @JavaName "" - variantOfWithBinding :: Binding a -> a -> Variant - @JavaName getValue - variantValue_ :: Variant -> Binding a -> a - @JavaName toString - showVariant :: Variant -> String - - @JavaName getComponent - variantComponent :: Variant -> ChildReference -> Variant - -variantValue :: Serializable a => Variant -> a -variantValue v = variantValue_ v binding - -instance Show Variant where - show = showVariant - -variantElement :: Serializable a => Variant -> Integer -> a -variantElement v i = variantValue (variantComponent v (indexReference i)) - -importJava "org.simantics.databoard.accessor.reference.ChildReference" where - data ChildReference - - @JavaName compile - compileReference :: [ChildReference] -> ChildReference - -importJava "org.simantics.databoard.accessor.reference.IndexReference" where - @JavaName "" - indexReference :: Integer -> ChildReference - -importJava "org.simantics.databoard.accessor.reference.KeyReference" where - @JavaName "" - keyReference :: Variant -> ChildReference - -importJava "org.simantics.databoard.accessor.reference.NameReference" where - @JavaName "" - nameReference :: String -> ChildReference - -importJava "org.simantics.databoard.accessor.reference.LabelReference" where - @JavaName "" - labelReference :: String -> ChildReference - // Type @private diff --git a/bundles/org.simantics.scl.runtime/scl/StandardLibrary.scl b/bundles/org.simantics.scl.runtime/scl/StandardLibrary.scl index 6b43166e3..122fcd02b 100644 --- a/bundles/org.simantics.scl.runtime/scl/StandardLibrary.scl +++ b/bundles/org.simantics.scl.runtime/scl/StandardLibrary.scl @@ -4,6 +4,7 @@ include "BigInteger" include "ArrayList" as ArrayList include "String" as String include "Vector" +include "Databoard" include "Debug" as Debug include "Lazy" as Lazy include "File" as File @@ -15,7 +16,7 @@ include "MSet" as MSet include "MList" as MList include "MMultiMap" as MMultiMap include "Coercion" -include "Json2" +//include "Json2" include "IterN" as Extra diff --git a/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/EquationContext.java b/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/EquationContext.java deleted file mode 100644 index f20dd6fb3..000000000 --- a/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/EquationContext.java +++ /dev/null @@ -1,20 +0,0 @@ -package org.simantics.scl.runtime.equations; - -import org.simantics.scl.runtime.SCLContext; -import org.simantics.scl.runtime.function.Function; -import org.simantics.scl.runtime.tuple.Tuple0; - -public interface EquationContext { - void listenEquationVariable(String variableName, Function callback); - void setEquationVariable(String variableName, Object value); - - public static Object applyEquationContext(Function f, EquationContext equationContext) { - SCLContext context = SCLContext.getCurrent(); - Object oldEquationContext = context.put("equation", equationContext); - try { - return f.apply(Tuple0.INSTANCE); - } finally { - context.put("equation", oldEquationContext); - } - } -} diff --git a/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/TestEquationContext.java b/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/TestEquationContext.java deleted file mode 100644 index 856ef2870..000000000 --- a/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/equations/TestEquationContext.java +++ /dev/null @@ -1,95 +0,0 @@ -package org.simantics.scl.runtime.equations; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import org.simantics.scl.runtime.SCLContext; -import org.simantics.scl.runtime.function.Function; -import org.simantics.scl.runtime.tuple.Tuple0; -import org.simantics.scl.runtime.tuple.Tuple2; - -import gnu.trove.map.hash.THashMap; -import gnu.trove.procedure.TObjectObjectProcedure; - -public class TestEquationContext implements EquationContext { - - public static final boolean TRACE = true; - - THashMap values = new THashMap(); - THashMap> listenerMap = new THashMap>(); - - @Override - public void listenEquationVariable(String variableName, Function listener) { - if(TRACE) - System.out.println("listenEquationVariable(" + variableName + ", " + listener + ")"); - if(values.containsKey(variableName)) { - Object value = values.get(variableName); - if(TRACE) - System.out.println(" apply " + value); - listener.apply(value); - } - else { - if(TRACE) - System.out.println(" add listener"); - ArrayList listeners = listenerMap.get(variableName); - if(listeners == null) { - listeners = new ArrayList(); - listenerMap.put(variableName, listeners); - } - listeners.add(listener); - } - } - - @Override - public void setEquationVariable(String variableName, Object value) { - if(TRACE) - System.out.println("setEquationVariable(" + variableName + ", " + value + ")"); - if(values.containsKey(variableName)) - throw new IllegalStateException("Value for " + variableName + " already defined (oldValue=" + values.get(variableName) + - ", newValue=" + value + ")."); - values.put(variableName, value); - ArrayList listeners = listenerMap.remove(variableName); - SCLContext context = SCLContext.getCurrent(); - if(listeners != null) { - Object oldEquationContex = context.put("equation", this); - try { - for(Function listener : listeners) { - if(TRACE) - System.out.println(" apply " + listener + " " + value); - listener.apply(value); - } - } finally { - context.put("equation", oldEquationContex); - } - } - } - - public static List solveEquations(Function f) { - TestEquationContext equationContext = new TestEquationContext(); - SCLContext context = SCLContext.getCurrent(); - Object oldEquationContext = context.put("equation", equationContext); - try { - f.apply(Tuple0.INSTANCE); - } finally { - context.put("equation", oldEquationContext); - } - ArrayList result = new ArrayList(equationContext.values.size()); - equationContext.values.forEachEntry(new TObjectObjectProcedure() { - @Override - public boolean execute(String a, Object b) { - result.add(new Tuple2(a, String.valueOf(b))); - return true; - } - }); - Collections.sort(result, (t1, t2) -> { - return ((String)t1.c0).compareTo((String)t2.c0); - }); - return result; - } - - public THashMap getValues() { - return values; - } - -} diff --git a/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/reporting/SCLReportingHandler.java b/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/reporting/SCLReportingHandler.java index a53595646..91e71d652 100755 --- a/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/reporting/SCLReportingHandler.java +++ b/bundles/org.simantics.scl.runtime/src/org/simantics/scl/runtime/reporting/SCLReportingHandler.java @@ -1,5 +1,8 @@ package org.simantics.scl.runtime.reporting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** *

An interface that is used to handle printing commands from SCL * such as Prelude.print. This interface is typically stored to @@ -11,6 +14,7 @@ package org.simantics.scl.runtime.reporting; * @author Hannu Niemistö */ public interface SCLReportingHandler { + public static final Logger LOGGER = LoggerFactory.getLogger(SCLReportingHandler.class); public static final String REPORTING_HANDLER = "reportingHandler"; public void print(String text); @@ -19,6 +23,34 @@ public interface SCLReportingHandler { public void didWork(double amount); public static final SCLReportingHandler DEFAULT = new AbstractSCLReportingHandler() { + @Override + public void print(String text) { + LOGGER.info(text); + } + + @Override + public void printError(String error) { + LOGGER.error(error); + } + }; + + public static final SCLReportingHandler DEFAULT_WITHOUT_ECHO = new AbstractSCLReportingHandler() { + @Override + public void print(String text) { + LOGGER.info(text); + } + + @Override + public void printError(String error) { + LOGGER.error(error); + } + + @Override + public void printCommand(String command) { + } + }; + + public static final SCLReportingHandler SYSOUT = new AbstractSCLReportingHandler() { @Override public void print(String text) { System.out.println(text); @@ -30,7 +62,7 @@ public interface SCLReportingHandler { } }; - public static final SCLReportingHandler DEFAULT_WITHOUT_ECHO = new AbstractSCLReportingHandler() { + public static final SCLReportingHandler SYSOUT_WITHOUT_ECHO = new AbstractSCLReportingHandler() { @Override public void print(String text) { System.out.println(text); diff --git a/bundles/org.simantics.scl.ui/src/org/simantics/scl/ui/editor2/OpenDeclaration.java b/bundles/org.simantics.scl.ui/src/org/simantics/scl/ui/editor2/OpenDeclaration.java index f8ea0c20d..522b8d3ed 100644 --- a/bundles/org.simantics.scl.ui/src/org/simantics/scl/ui/editor2/OpenDeclaration.java +++ b/bundles/org.simantics.scl.ui/src/org/simantics/scl/ui/editor2/OpenDeclaration.java @@ -46,14 +46,27 @@ public class OpenDeclaration extends AbstractHandler { return text.substring(startPos, endPos); } - public static String extractAt(String text, int caretPos) { + public static String extractIdentifierOrSymbolAt(String text, int caretPos) { String result = extractIdentifierAt(text, caretPos); if(!result.isEmpty()) return result; return extractSymbolAt(text, caretPos); } - + private static String extractLineAt(String text, int caretPos) { + int startPos = caretPos; + while(startPos > 0 && !isNewline(text.charAt(startPos-1))) + --startPos; + int endPos = caretPos; + while(endPos < text.length() && !isNewline(text.charAt(endPos))) + ++endPos; + return text.substring(startPos, endPos); + } + + private static boolean isNewline(char c) { + return c=='\n' || c=='\r'; + } + @Override public Object execute(ExecutionEvent event) throws ExecutionException { IEditorPart editor = @@ -62,14 +75,28 @@ public class OpenDeclaration extends AbstractHandler { return null; SCLModuleEditor2 moduleEditor = (SCLModuleEditor2)editor; StyledText styledText = (StyledText)moduleEditor.getAdapter(Control.class); - String identifierAtCaret = extractAt(styledText.getText(), styledText.getCaretOffset()); - if(identifierAtCaret.isEmpty()) - return null; - SCLTextEditorEnvironment editorEnvironment = moduleEditor.getSCLTextEditorEnvironment(); - editorEnvironment.updateEnvironment(moduleEditor.getDocument()); - SCLValue value = editorEnvironment.getValue(identifierAtCaret); - if(value != null) - OpenSCLDefinition.openDefinition(value); + String text = styledText.getText(); + int caretOffset = styledText.getCaretOffset(); + + // Find the line where the caret is + String lineAtCaret = extractLineAt(text, caretOffset); + if(lineAtCaret.startsWith("import ") || lineAtCaret.startsWith("include ")) { + int p1 = lineAtCaret.indexOf('"', 6); + int p2 = lineAtCaret.indexOf('"', p1+1); + String moduleName = lineAtCaret.substring(p1+1, p2); + OpenSCLModule.openModule(moduleName); + } + else { + // Try to find an identifier at caret + String identifierAtCaret = extractIdentifierOrSymbolAt(text, caretOffset); + if(identifierAtCaret.isEmpty()) + return null; + SCLTextEditorEnvironment editorEnvironment = moduleEditor.getSCLTextEditorEnvironment(); + editorEnvironment.updateEnvironment(moduleEditor.getDocument()); + SCLValue value = editorEnvironment.getValue(identifierAtCaret); + if(value != null) + OpenSCLDefinition.openDefinition(value); + } return null; } diff --git a/bundles/org.simantics.scl.ui/src/org/simantics/scl/ui/editor2/OpenSCLModule.java b/bundles/org.simantics.scl.ui/src/org/simantics/scl/ui/editor2/OpenSCLModule.java index a9792a4d0..a0208e5b2 100644 --- a/bundles/org.simantics.scl.ui/src/org/simantics/scl/ui/editor2/OpenSCLModule.java +++ b/bundles/org.simantics.scl.ui/src/org/simantics/scl/ui/editor2/OpenSCLModule.java @@ -1,31 +1,35 @@ -package org.simantics.scl.ui.editor2; - -import org.eclipse.core.commands.AbstractHandler; -import org.eclipse.core.commands.ExecutionEvent; -import org.eclipse.core.commands.ExecutionException; -import org.eclipse.ui.IWorkbenchPage; -import org.eclipse.ui.PartInitException; -import org.eclipse.ui.PlatformUI; - -public class OpenSCLModule extends AbstractHandler { - - @Override - public Object execute(ExecutionEvent event) throws ExecutionException { - SCLModuleSelectionDialog dialog = new SCLModuleSelectionDialog( - PlatformUI.getWorkbench().getActiveWorkbenchWindow().getShell()); - if(dialog.open() == SCLModuleSelectionDialog.OK) { - String moduleName = (String)dialog.getFirstResult(); - IWorkbenchPage page = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage(); - if(page == null) - return null; - SCLModuleEditorInput input = new StandardSCLModuleEditorInput(moduleName); - try { - page.openEditor(input, "org.simantics.scl.ui.editor2"); - } catch (PartInitException e) { - e.printStackTrace(); - } - } - return null; - } - -} +package org.simantics.scl.ui.editor2; + +import org.eclipse.core.commands.AbstractHandler; +import org.eclipse.core.commands.ExecutionEvent; +import org.eclipse.core.commands.ExecutionException; +import org.eclipse.ui.IWorkbenchPage; +import org.eclipse.ui.PartInitException; +import org.eclipse.ui.PlatformUI; + +public class OpenSCLModule extends AbstractHandler { + + @Override + public Object execute(ExecutionEvent event) throws ExecutionException { + SCLModuleSelectionDialog dialog = new SCLModuleSelectionDialog( + PlatformUI.getWorkbench().getActiveWorkbenchWindow().getShell()); + if(dialog.open() == SCLModuleSelectionDialog.OK) { + String moduleName = (String)dialog.getFirstResult(); + openModule(moduleName); + } + return null; + } + + public static void openModule(String moduleName) { + IWorkbenchPage page = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage(); + if(page == null) + return; + SCLModuleEditorInput input = new StandardSCLModuleEditorInput(moduleName); + try { + page.openEditor(input, "org.simantics.scl.ui.editor2"); + } catch (PartInitException e) { + e.printStackTrace(); + } + } + +} diff --git a/bundles/org.simantics.selectionview.ontology/graph.tg b/bundles/org.simantics.selectionview.ontology/graph.tg index 757158d3b..49a3f113f 100644 Binary files a/bundles/org.simantics.selectionview.ontology/graph.tg and b/bundles/org.simantics.selectionview.ontology/graph.tg differ diff --git a/bundles/org.simantics.selectionview.ontology/graph/Selectionview.pgraph b/bundles/org.simantics.selectionview.ontology/graph/Selectionview.pgraph index f9eb00666..8905c6252 100644 --- a/bundles/org.simantics.selectionview.ontology/graph/Selectionview.pgraph +++ b/bundles/org.simantics.selectionview.ontology/graph/Selectionview.pgraph @@ -99,6 +99,8 @@ SEL.SortingName ==> "String" "Boolean" "Boolean"