/*******************************************************************************
- * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * Copyright (c) 2007, 2018 Association for Decentralized Information Management
* in Industry THTH ry.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
-import java.time.Instant;
-import java.time.ZoneId;
-import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.UUID;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipFile;
import org.eclipse.core.runtime.ILog;
import org.eclipse.core.runtime.IProduct;
import org.eclipse.core.runtime.Platform;
import org.eclipse.core.runtime.Status;
import org.eclipse.core.runtime.SubMonitor;
+import org.eclipse.osgi.service.datalocation.Location;
import org.eclipse.osgi.service.resolver.BundleDescription;
import org.ini4j.Ini;
import org.ini4j.InvalidFileFormatException;
import org.simantics.db.UndoContext;
import org.simantics.db.VirtualGraph;
import org.simantics.db.WriteGraph;
+import org.simantics.db.common.processor.MergingDelayedWriteProcessor;
+import org.simantics.db.common.processor.MergingGraphRequestProcessor;
import org.simantics.db.common.request.ObjectsWithType;
-import org.simantics.db.common.request.Queries;
-import org.simantics.db.common.request.WriteRequest;
import org.simantics.db.common.request.WriteResultRequest;
import org.simantics.db.common.utils.Transaction;
import org.simantics.db.exception.ClusterSetExistException;
import org.simantics.db.exception.DatabaseException;
-import org.simantics.db.exception.ResourceNotFoundException;
import org.simantics.db.indexing.DatabaseIndexing;
import org.simantics.db.layer0.genericrelation.DependenciesRelation;
+import org.simantics.db.layer0.genericrelation.IndexException;
+import org.simantics.db.layer0.genericrelation.IndexedRelations;
+import org.simantics.db.layer0.request.PossibleResource;
import org.simantics.db.layer0.util.SimanticsClipboardImpl;
import org.simantics.db.layer0.util.SimanticsKeys;
import org.simantics.db.layer0.util.TGTransferableGraphSource;
import org.simantics.db.layer0.variable.VariableRepository;
import org.simantics.db.management.SessionContext;
import org.simantics.db.request.Read;
+import org.simantics.db.request.Write;
import org.simantics.db.service.LifecycleSupport.LifecycleListener;
import org.simantics.db.service.LifecycleSupport.LifecycleState;
import org.simantics.db.service.QueryControl;
import org.simantics.db.service.UndoRedoSupport;
import org.simantics.db.service.VirtualGraphSupport;
import org.simantics.db.service.XSupport;
+import org.simantics.db.services.GlobalServiceInitializer;
import org.simantics.graph.db.GraphDependencyAnalyzer;
import org.simantics.graph.db.GraphDependencyAnalyzer.IU;
import org.simantics.graph.db.GraphDependencyAnalyzer.IdentityNode;
import org.simantics.graph.diff.Diff;
import org.simantics.graph.diff.TransferableGraphDelta1;
import org.simantics.internal.Activator;
+import org.simantics.internal.TimedSessionCache;
import org.simantics.internal.startup.StartupExtensions;
import org.simantics.layer0.Layer0;
import org.simantics.operation.Layer0X;
import org.simantics.project.ProjectFeatures;
import org.simantics.project.ProjectKeys;
import org.simantics.project.Projects;
+import org.simantics.project.SessionDescriptor;
import org.simantics.project.exception.ProjectException;
import org.simantics.project.features.registry.GroupReference;
import org.simantics.project.management.DatabaseManagement;
import org.simantics.project.management.ServerManager;
import org.simantics.project.management.ServerManagerFactory;
import org.simantics.project.management.WorkspaceUtil;
+import org.simantics.scl.compiler.module.repository.ModuleRepository;
+import org.simantics.scl.osgi.SCLOsgi;
import org.simantics.utils.FileUtils;
import org.simantics.utils.datastructures.Pair;
-import org.simantics.utils.logging.TimeLogger;
import org.simantics.utils.strings.EString;
+import org.simantics.utils.threads.ExecutorWorker;
+import org.simantics.utils.threads.ThreadUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* It is applied when the ontology in the database of a workspace doesn't match
* a newer ontology in the Eclipse workspace.
*/
- public static enum OntologyRecoveryPolicy { ThrowError, Merge, ReinstallDatabase }
+ public static enum OntologyRecoveryPolicy { ThrowError, Merge, ReinstallDatabase, Bypass}
/**
* This policy dictates how the Simantics platform startup should react if
/** Session specific bindings */
public SimanticsBindings simanticsBindings;
- public SimanticsBindings simanticsBindings2;
public Thread mainThread;
+ private Thread shutdownHook = new Thread() {
+ @Override
+ public void run() {
+ try {
+ LOGGER.warn("Simantics platform was not properly shut down. Executing safety shutdown hook.");
+ shutdown(null, false);
+ } catch (PlatformException e) {
+ LOGGER.error("Simantics Platform shutdown hook execution failed.", e);
+ log.log(new Status(IStatus.ERROR, Activator.PLUGIN_ID, "Simantics Platform shutdown hook execution failed.", e));
+ }
+ }
+ };
+
/**
* The {@link IProject} activated by
* {@link #startUp(IProgressMonitor, RecoveryPolicy, OntologyRecoveryPolicy, ServerAddress, PlatformUserAgent)}
return application != null ? application : UUID.randomUUID().toString();
}
- private Session setupDatabase(String databaseDriverId, IProgressMonitor progressMonitor, RecoveryPolicy workspacePolicy, PlatformUserAgent userAgent) throws PlatformException {
+ private SessionDescriptor setupDatabase(String databaseDriverId, IProgressMonitor progressMonitor, RecoveryPolicy workspacePolicy, PlatformUserAgent userAgent) throws PlatformException {
if (progressMonitor == null)
progressMonitor = new NullProgressMonitor();
Path workspaceLocation = Platform.getLocation().toFile().toPath();
public void synchronizeOntologies(IProgressMonitor progressMonitor, OntologyRecoveryPolicy ontologyPolicy, boolean requireSynchronize) throws PlatformException {
- if (progressMonitor == null) progressMonitor = new NullProgressMonitor();
-
- final DatabaseManagement mgmt = new DatabaseManagement();
+ SubMonitor monitor = SubMonitor.convert(progressMonitor, 100);
+ monitor.setTaskName("Compile dynamic ontologies");
PlatformUtil.compileAllDynamicOntologies();
- progressMonitor.setTaskName("Asserting all ontologies are installed");
- final Map<GraphBundleRef, GraphBundleEx> platformTGs = new HashMap<GraphBundleRef, GraphBundleEx>();
+ String message = "Asserting all ontologies are installed";
+ LOGGER.info(message);
+ monitor.setTaskName(message);
+
+ DatabaseManagement mgmt = new DatabaseManagement();
+ Map<GraphBundleRef, GraphBundleEx> platformTGs = new HashMap<>();
try {
// Get a list of bundles installed into the database
- progressMonitor.subTask("find installed bundles from database");
- Map<GraphBundleRef, GraphBundleEx> installedTGs = new HashMap<GraphBundleRef, GraphBundleEx>();
+ message = "find installed bundles from database";
+ monitor.subTask(message);
+ LOGGER.info(message);
+ Map<GraphBundleRef, GraphBundleEx> installedTGs = new HashMap<>();
for (GraphBundle b : session.syncRequest( mgmt.GraphBundleQuery )) {
installedTGs.put(GraphBundleRef.of(b), GraphBundleEx.extend(b));
}
// if(installedTGs.size() > 1) return;
// Get a list of all bundles in the platform (Bundle Context)
- List<GraphBundle> tgs = new ArrayList<GraphBundle>();
- progressMonitor.subTask("load all transferable graphs from platform");
- PlatformUtil.getAllGraphs(tgs);
- progressMonitor.subTask("extend bundles to compile versions");
+ message = "load all transferable graphs from platform";
+ monitor.subTask(message);
+ LOGGER.info(message);
+ Collection<GraphBundle> tgs = PlatformUtil.getAllGraphs();
+ message = "extend bundles to compile versions";
+ monitor.subTask(message);
+ LOGGER.info(message);
for (GraphBundle b : tgs) {
GraphBundleEx gbe = GraphBundleEx.extend(b);
gbe.build();
}
// Compile a list of TGs that need to be installed or reinstalled in the database
- progressMonitor.subTask("check bundle reinstallation demand");
- List<GraphBundleEx> installTGs = new ArrayList<GraphBundleEx>();
+ message = "check bundle reinstallation demand";
+ monitor.subTask(message);
+ LOGGER.info(message);
+ List<GraphBundleEx> installTGs = new ArrayList<>();
// Create list of TGs to update, <newTg, oldTg>
- Map<GraphBundleEx,GraphBundleEx> reinstallTGs = new TreeMap<GraphBundleEx,GraphBundleEx>();
+ Map<GraphBundleEx,GraphBundleEx> reinstallTGs = new TreeMap<>();
for (Entry<GraphBundleRef, GraphBundleEx> e : platformTGs.entrySet()) {
GraphBundleRef key = e.getKey();
GraphBundleEx platformBundle = e.getValue();
session.getService(XSupport.class).setServiceMode(true, true);
// Throw error
- if (ontologyPolicy == OntologyRecoveryPolicy.ThrowError) {
+ if (ontologyPolicy == OntologyRecoveryPolicy.ThrowError || ontologyPolicy == OntologyRecoveryPolicy.Bypass) {
StringBuilder sb = new StringBuilder("The following graphs are not installed in the database: ");
if (!installTGs.isEmpty()) {
int i = 0;
sb.append(" Database/Platform Bundle version mismatch.\n");
}
sb.append("Hint: Use -fixErrors to install the graphs.");
- throw new PlatformException(sb.toString());
+ if (ontologyPolicy == OntologyRecoveryPolicy.ThrowError)
+ throw new PlatformException(sb.toString());
+ else
+ log.log(new Status(IStatus.WARNING, Activator.PLUGIN_ID, sb.toString()));
}
+
// Reinstall database
if (ontologyPolicy == OntologyRecoveryPolicy.ReinstallDatabase) {
log.log(new Status(IStatus.INFO, Activator.PLUGIN_ID, "Reinstalling the database."));
}
if (ontologyPolicy == OntologyRecoveryPolicy.Merge) {
- progressMonitor.subTask("Merging ontology changes");
+ message = "Merging ontology changes";
+ monitor.subTask(message);
+ LOGGER.info(message);
// Sort missing TGs into install order
GraphDependencyAnalyzer<GraphBundle> analyzer = new GraphDependencyAnalyzer<GraphBundle>();
for(GraphBundle tg : installTGs) analyzer.addGraph(tg, tg.getGraph());
sb.append("Conflict with "+problem.first+" and "+problem.second+".\n");
}
throw new PlatformException(sb.toString());
- }
- else if(!session.syncRequest( analyzer.queryExternalDependenciesSatisfied )) {
+ } else if(!session.syncRequest( analyzer.queryExternalDependenciesSatisfied )) {
Collection<IdentityNode> unsatisfiedDependencies = analyzer.getUnsatisfiedDependencies();
StringBuilder sb = new StringBuilder();
for (IdentityNode dep: unsatisfiedDependencies) {
}
throw new PlatformException(sb.toString());
}
-
+
+ message = "Analyzed graph bundles";
+ monitor.subTask(message);
+ LOGGER.info(message);
+
List<GraphBundle> sortedBundles = analyzer.getSortedGraphs();
if(!sortedBundles.isEmpty()) {
-
- session.syncRequest(new WriteRequest() {
- @Override
- public void perform(WriteGraph graph) throws DatabaseException {
- try {
- graph.newClusterSet(graph.getRootLibrary());
- } catch (ClusterSetExistException e) {
- // Cluster set exist already, no problem.
- }
- graph.setClusterSet4NewResource(graph.getRootLibrary());
- graph.flushCluster();
+
+ session.syncRequest((Write) graph -> {
+ try {
+ graph.newClusterSet(graph.getRootLibrary());
+ } catch (ClusterSetExistException e) {
+ // Cluster set exist already, no problem.
}
+ graph.setClusterSet4NewResource(graph.getRootLibrary());
+ graph.flushCluster();
});
boolean mergedOntologies = false;
log.log(new Status(IStatus.INFO, Activator.PLUGIN_ID, "Merging new version of "+tg.toString()));
startTransaction(session, true);
-
+
//delta.print();
try {
-
-
long[] resourceArray = TransferableGraphs.applyDelta(writeGraph(), oldResources, delta);
tg.setResourceArray(resourceArray);
mgmt.setGraphBundleEntry(tg);
}
}
}
-
- session.syncRequest(new WriteRequest() {
- @Override
- public void perform(WriteGraph graph) throws DatabaseException {
- graph.setClusterSet4NewResource(graph.getRootLibrary());
- graph.flushCluster();
- }
+
+ session.syncRequest((Write) graph -> {
+ graph.setClusterSet4NewResource(graph.getRootLibrary());
+ graph.flushCluster();
});
if (mergedOntologies)
DatabaseIndexing.deleteAllIndexes();
}
-
- TimeLogger.log("Ontologies synchronized.");
-
}
session.getService(XSupport.class).setServiceMode(false, false);
}
- progressMonitor.worked(20);
+ message = "Ontologies synchronized";
+ monitor.subTask(message);
+ LOGGER.info(message);
+ monitor.worked(100);
} catch (IOException e) {
throw new PlatformException(e);
} catch (DatabaseException e) {
public boolean assertProject(IProgressMonitor progressMonitor, RecoveryPolicy workspacePolicy, boolean installProject) throws PlatformException {
- if (progressMonitor == null) progressMonitor = new NullProgressMonitor();
+ SubMonitor monitor = SubMonitor.convert(progressMonitor, 10);
final DatabaseManagement mgmt = new DatabaseManagement();
- progressMonitor.setTaskName("Asserting project resource exists in the database");
+ monitor.setTaskName("Asserting project resource exists in the database");
try {
- projectResource = session.syncRequest( Queries.resource( projectURI ) );
- } catch (ResourceNotFoundException nfe) {
- // Project was not found
- if (workspacePolicy == RecoveryPolicy.ThrowError)
- throw new PlatformException("Project Resource "+projectURI+" is not found in the database.");
- // Create empty project with no features
- try {
- Transaction.startTransaction(session, true);
+ projectResource = session.syncRequest(new PossibleResource(projectURI));
+ if (projectResource == null) {
+ // Project was not found
+ if (workspacePolicy == RecoveryPolicy.ThrowError)
+ throw new PlatformException("Project Resource "+projectURI+" is not found in the database.");
+ // Create empty project with no features
try {
- // The project needs to be created mutable.
- session.getService(XSupport.class).setServiceMode(true, false);
+ Transaction.startTransaction(session, true);
+ try {
+ // The project needs to be created mutable.
+ session.getService(XSupport.class).setServiceMode(true, false);
- ArrayList<String> empty = new ArrayList<String>();
- projectResource = mgmt.createProject(projectName, empty);
- installProject |= true;
+ ArrayList<String> empty = new ArrayList<String>();
+ projectResource = mgmt.createProject(projectName, empty);
+ installProject |= true;
- session.getService(XSupport.class).setServiceMode(false, false);
- Transaction.commit();
- } finally {
- Transaction.endTransaction();
+ session.getService(XSupport.class).setServiceMode(false, false);
+ Transaction.commit();
+ } finally {
+ Transaction.endTransaction();
+ }
+ //session.getService( LifecycleSupport.class ).save();
+ } catch (DatabaseException e) {
+ throw new PlatformException("Failed to create "+projectURI, e);
}
- //session.getService( LifecycleSupport.class ).save();
- } catch (DatabaseException e) {
- throw new PlatformException("Failed to create "+projectURI, e);
}
} catch (DatabaseException e) {
throw new PlatformException("Failed to create "+projectURI, e);
}
- progressMonitor.worked(10);
+ monitor.worked(10);
return installProject;
Set<GroupReference> publishedFeatureGroups = ProjectFeatures.getInstallGroupsOfPublishedFeatures();
Collection<GroupReference> groupsWithoutVersion = GroupReference.stripVersions(publishedFeatureGroups);
- // final List<String> Platform_Features = new ArrayList<String>();
- //
- // // Convert graph instances
- // Collection<TransferableGraph1> platformGraphs = new ArrayList<TransferableGraph1>();
- // for (GraphBundleEx e : platformTGs.values()) platformGraphs.add( e.getGraph() );
- // IGraph graph = Graphs.createGraph(platformGraphs);
- //
- // Res PublishedProjectFeatures = UriUtils.uriToPath( ProjectResource.URIs.PublishedProjectFeatures );
- // Path HasFeature = UriUtils.uriToPath( ProjectResource.URIs.HasFeature );
- // for(Res feature : graph.getObjects(PublishedProjectFeatures, HasFeature)) {
- // System.out.println("Installing Project Feature: "+feature.toString());
- // Platform_Features.add( feature.toString() );
- // }
-
try {
- Transaction.startTransaction(session, true);
- try {
- // for (String feature : Platform_Features) {
- // try {
- // getResource(feature);
- // } catch(ResourceNotFoundException e) {
- // System.out.println(feature+" not found");
- // }
- // mgmt.installFeature(projectResource, feature);
- // }
- Projects.setProjectInstalledGroups(writeGraph(), projectResource, groupsWithoutVersion);
- Transaction.commit();
- } finally {
- Transaction.endTransaction();
- }
- //session.getService( LifecycleSupport.class ).save();
- } catch(DatabaseException ae) {
+ session.syncRequest(
+ (Write) graph ->
+ Projects.setProjectInstalledGroups(graph, projectResource, groupsWithoutVersion));
+ } catch (DatabaseException ae) {
throw new PlatformException("Failed to install features", ae);
}
progressMonitor.worked(10);
}
public void resetDatabase(IProgressMonitor monitor) throws PlatformException {
+ // TODO: fix this to use Path APIs
File dbLocation = Platform.getLocation().append("db").toFile();
if(!dbLocation.exists()) return;
try { // Load driver
- Driver driver = Manager.getDriver("procore");
+ Driver driver = Manager.getDriver("acorn");
Management management = driver.getManagement(dbLocation.getAbsolutePath(), null);
management.delete();
} catch (DatabaseException e) {
resetDatabase(monitor);
}
- public boolean handleBaselineDatabase() throws PlatformException {
- Path workspaceLocation = Platform.getLocation().toFile().toPath();
- Path baselineIndicatorFile = workspaceLocation.resolve(".baselined");
- if (Files.isRegularFile(baselineIndicatorFile)) {
- // This means that the workspace has already been initialized from
- // a database baseline and further initialization is not necessary.
- return true;
- }
+ private static Path tryGetInstallLocation() {
+ Location l = Platform.getInstallLocation();
+ return l == null ? null : new File(l.getURL().getPath()).toPath();
+ }
+ private Path resolveBaselineFile() throws PlatformException {
String dbBaselineArchive = System.getProperty("org.simantics.db.baseline", null);
if (dbBaselineArchive == null)
- return false;
+ return null;
Path baseline = Paths.get(dbBaselineArchive);
- if (!Files.isRegularFile(baseline))
- throw new PlatformException("Specified database baseline archive " + baseline + " does not exist. Cannot initialize workspace database.");
-
- validateBaselineFile(baseline);
- validateWorkspaceForBaselineInitialization(workspaceLocation);
-
- try {
- Files.createDirectories(workspaceLocation);
- FileUtils.extractZip(baseline.toFile(), workspaceLocation.toFile());
- Files.write(baselineIndicatorFile, baselineIndicatorContents(baselineIndicatorFile));
- return true;
- } catch (IOException e) {
- throw new PlatformException(e);
+ if (baseline.isAbsolute()) {
+ if (!Files.isRegularFile(baseline))
+ throw new PlatformException("Specified database baseline archive " + baseline
+ + " does not exist. Cannot initialize workspace database from baseline.");
+ return baseline;
}
- }
- private static final DateTimeFormatter TIMESTAMP_FORMAT = DateTimeFormatter.ofPattern("d. MMM yyyy HH:mm:ss");
-
- private static byte[] baselineIndicatorContents(Path path) throws IOException {
- return String.format("%s%n%s%n",
- path.toString(),
- Instant.now().atZone(ZoneId.systemDefault()).format(TIMESTAMP_FORMAT))
- .getBytes("UTF-8");
+ // Relative path resolution order:
+ // 1. from the platform "install location"
+ // 2. from working directory
+ Path installLocation = tryGetInstallLocation();
+ if (installLocation != null) {
+ Path installedBaseline = installLocation.resolve(dbBaselineArchive);
+ if (Files.isRegularFile(installedBaseline))
+ return installedBaseline;
+ }
+ if (!Files.isRegularFile(baseline))
+ throw new PlatformException("Specified database baseline archive " + baseline
+ + " does not exist in either the install location (" + installLocation
+ + ") or the working directory (" + Paths.get(".").toAbsolutePath()
+ + "). Cannot initialize workspace database.");
+ return null;
}
- private void validateWorkspaceForBaselineInitialization(Path workspaceLocation) throws PlatformException {
- try {
- Path db = workspaceLocation.resolve("db");
- if (Files.exists(db))
- throw new PlatformException("Database location " + db + " already exists. Cannot re-initialize workspace from baseline.");
- Path index = workspaceLocation.resolve(".metadata/.plugins/org.simantics.db.indexing");
- if (!Files.exists(index) || !isEmptyDirectory(index))
- throw new PlatformException("Index location " + index + " already exists. Cannot re-initialize workspace from baseline.");
- } catch (IOException e) {
- throw new PlatformException("Failed to validate workspace for baseline initialization", e);
+ private boolean handleBaselineDatabase() throws PlatformException {
+ Path workspaceLocation = Platform.getLocation().toFile().toPath();
+ Path baselineIndicatorFile = workspaceLocation.resolve(".baselined");
+ if (Files.isRegularFile(baselineIndicatorFile)) {
+ // This means that the workspace has already been initialized from
+ // a database baseline and further initialization is not necessary.
+ return true;
}
- }
- private static boolean isEmptyDirectory(Path dir) throws IOException {
- return Files.walk(dir).count() == 1;
- }
+ Path baseline = resolveBaselineFile();
+ if (baseline == null)
+ return false;
- private void validateBaselineFile(Path baseline) throws PlatformException {
- try (ZipFile zip = new ZipFile(baseline.toFile())) {
- ZipEntry db = zip.getEntry("db");
- if (db == null)
- throw new PlatformException("Baseline archive does not contain database directory 'db'");
- ZipEntry index = zip.getEntry(".metadata/.plugins/org.simantics.db.indexing");
- if (index == null)
- throw new PlatformException("Baseline archive does not contain database index directory '.metadata/.plugins/org.simantics.db.indexing'");
- } catch (IOException e) {
- throw new PlatformException("Failed to validate baseline archive " + baseline, e);
- }
+ DatabaseBaselines.validateBaselineFile(baseline);
+ DatabaseBaselines.validateWorkspaceForBaselineInitialization(workspaceLocation);
+ DatabaseBaselines.initializeWorkspaceWithBaseline(baseline, workspaceLocation, baselineIndicatorFile);
+ return true;
}
/**
* installing project features.
* <p>
*
- * In SWB this is handled in SimanticsWorkbenchAdvisor#openWindows().
+ * In Simantics Workbench this is handled in
+ * <code>SimanticsWorkbenchAdvisor#openWindows()</code>.
* <p>
*
* If remote server is given, simantics plaform takes connection there
* startup or <code>null</code> to resort to default measures
* @throws PlatformException
*/
- public SessionContext startUp(String databaseDriverId, IProgressMonitor progressMonitor, RecoveryPolicy workspacePolicy,
+ public synchronized SessionContext startUp(String databaseDriverId, IProgressMonitor progressMonitor, RecoveryPolicy workspacePolicy,
OntologyRecoveryPolicy ontologyPolicy, boolean requireSynchronize, PlatformUserAgent userAgent)
throws PlatformException
{
assert(!running);
- TimeLogger.log("Beginning of SimanticsPlatform.startUp");
-
LOGGER.info("Beginning of SimanticsPlatform.startUp");
-
- if (progressMonitor == null) progressMonitor = new NullProgressMonitor();
+
+ SubMonitor monitor = SubMonitor.convert(progressMonitor, 1000);
// For debugging on what kind of platform automatic tests are running in
// case there are problems.
// 0. Consult all startup extensions before doing anything with the workspace.
StartupExtensions.consultStartupExtensions();
- TimeLogger.log("Consulted platform pre-startup extensions");
+ LOGGER.info("Consulted platform pre-startup extensions");
// 0.1. Clear all temporary files
Simantics.clearTemporaryDirectory();
- TimeLogger.log("Cleared temporary directory");
+ LOGGER.info("Cleared temporary directory");
// 0.2 Clear VariableRepository.repository static map which holds references to SessionImplDb
VariableRepository.clear();
-
+
+ // 0.2.1 Activate org.simantics.scl.osgi to prime the SCL compiler early.
+ @SuppressWarnings("unused")
+ ModuleRepository modRepo = SCLOsgi.MODULE_REPOSITORY;
+
// 0.3 Handle baseline database before opening db
+ @SuppressWarnings("unused")
boolean usingBaseline = handleBaselineDatabase();
-
+
// 1. Assert there is a database at <workspace>/db
- session = setupDatabase(databaseDriverId, progressMonitor, workspacePolicy, userAgent);
- TimeLogger.log("Database setup complete");
-
- // 1.1
+ SessionDescriptor sessionDescriptor = setupDatabase(databaseDriverId, monitor.newChild(200, SubMonitor.SUPPRESS_NONE), workspacePolicy, userAgent);
+ session = sessionDescriptor.getSession();
+ LOGGER.info("Database setup complete");
+
+ // 2. Delete all indexes if we cannot be certain they are up-to-date
+ // A full index rebuild will be done later, before project activation.
XSupport support = session.getService(XSupport.class);
if (support.rolledback()) {
try {
throw new PlatformException(e);
}
}
-
- // 2. Assert all graphs, and correct versions, are installed to the database
- if(!usingBaseline) {
- synchronizeOntologies(progressMonitor, ontologyPolicy, requireSynchronize);
- TimeLogger.log("Synchronized ontologies");
- }
+
+ // 3. Assert all graphs, and correct versions, are installed to the database
+ synchronizeOntologies(monitor.newChild(400, SubMonitor.SUPPRESS_NONE), ontologyPolicy, requireSynchronize);
// 4. Assert simantics.cfg exists
- boolean installProject = assertConfiguration(progressMonitor,workspacePolicy);
+ boolean installProject = assertConfiguration(monitor.newChild(25, SubMonitor.SUPPRESS_NONE),workspacePolicy);
// 5. Assert Project Resource is installed in the database
- installProject = assertProject(progressMonitor, workspacePolicy, installProject);
+ installProject = assertProject(monitor.newChild(25, SubMonitor.SUPPRESS_NONE), workspacePolicy, installProject);
// 6. Install all features into project, if in debug mode
- updateInstalledGroups(progressMonitor, installProject);
- TimeLogger.log("Installed all features into project");
+ updateInstalledGroups(monitor.newChild(25), true); //installProject);
+ LOGGER.info("Installed all features into project");
// 7. Assert L0.Session in database for this session
- assertSessionModel(progressMonitor);
+ assertSessionModel(monitor.newChild(25, SubMonitor.SUPPRESS_NONE));
session.getService(XSupport.class).setServiceMode(false, false);
try {
- session.sync(new WriteRequest() {
-
- @Override
- public void perform(WriteGraph graph) throws DatabaseException {
- QueryControl qc = graph.getService(QueryControl.class);
- qc.flush(graph);
- }
-
+ String message = "Flush query cache";
+ monitor.setTaskName(message);
+ LOGGER.info(message);
+ session.syncRequest((Write) graph -> {
+ QueryControl qc = graph.getService(QueryControl.class);
+ qc.flush(graph);
});
- TimeLogger.log("Flushed queries");
} catch (DatabaseException e) {
LOGGER.error("Flushing queries failed.", e);
}
boolean loadProject = true;
try {
-
+ String message = "Open database session";
+ monitor.setTaskName(message);
+ LOGGER.info(message);
sessionContext = SimanticsPlatform.INSTANCE.createSessionContext(true);
// This must be before setSessionContext since some listeners might query this
sessionContext.setHint(SimanticsKeys.KEY_PROJECT, SimanticsPlatform.INSTANCE.projectResource);
Simantics.setSessionContext(sessionContext);
// 1. Put ResourceBinding that throws an exception to General Bindings
- simanticsBindings = new SimanticsBindings( null );
+ message = "Put ResourceBinding that throws an exception to General Bindings";
+ LOGGER.info(message);
+ simanticsBindings = new SimanticsBindings();
Bindings.classBindingFactory.addFactory( simanticsBindings );
-
- // 2. Create session-specific second Binding context (Databoard) and
- // put that to Session as a service
Session session = sessionContext.getSession();
- Databoard sessionDataboard = new Databoard();
- session.registerService(Databoard.class, sessionDataboard);
- simanticsBindings2 = new SimanticsBindings( session );
- sessionDataboard.classBindingFactory.addFactory( simanticsBindings2 );
+ session.registerService(Databoard.class, Bindings.databoard);
// Register datatype bindings
+ message = "Register datatype bindings";
+ LOGGER.info(message);
Bindings.defaultBindingFactory.getRepository().put(RGB.Integer.BINDING.type(), RGB.Integer.BINDING);
Bindings.defaultBindingFactory.getRepository().put(Font.BINDING.type(), Font.BINDING);
- if(loadProject) {
+ if (support.rolledback() || sessionDescriptor.isFreshDatabase()) {
+ message = "Rebuilding all indexes";
+ LOGGER.info(message);
+ monitor.setTaskName(message);
+ try {
+ session.getService(IndexedRelations.class).fullRebuild(monitor.newChild(100), session);
+ } catch (IndexException e) {
+ LOGGER.error("Failed to re-build all indexes", e);
+ }
+ } else {
+ monitor.worked(100);
+ }
- TimeLogger.log("Load projects");
+ if(loadProject) {
+ message = "Load project";
+ monitor.setTaskName(message);
+ LOGGER.info(message);
project = Projects.loadProject(sessionContext.getSession(), SimanticsPlatform.INSTANCE.projectResource);
-
sessionContext.setHint(ProjectKeys.KEY_PROJECT, project);
- TimeLogger.log("Loading projects complete");
+ monitor.worked(100);
+ message = "Loading projects complete";
+ LOGGER.info(message);
+ message = "Activate project";
+ monitor.setTaskName(message);
+ LOGGER.info(message);
project.activate();
- TimeLogger.log("Project activated");
+ monitor.worked(100);
+ LOGGER.info("Project activated");
}
} catch (DatabaseException e) {
running = true;
+ // #7650: improve shutdown robustness in all applications that use the platform
+ Runtime.getRuntime().addShutdownHook(shutdownHook);
+
+ // Discard database session undo history at this point to prevent
+ // the user from undoing any initialization operations performed
+ // by the platform startup.
+ SimanticsPlatform.INSTANCE.discardSessionUndoHistory();
+ LOGGER.info("Discarded session undo history");
+
return sessionContext;
}
+ public void registerServices(SessionContext context) {
+ new GlobalServiceInitializer().initialize(session);
+ session.registerService(MergingGraphRequestProcessor.class, new MergingGraphRequestProcessor("SessionService", session, 20));
+ session.registerService(MergingDelayedWriteProcessor.class, new MergingDelayedWriteProcessor(session, 20));
+ }
+
+
public SessionContext createSessionContext(boolean init) throws PlatformException {
try {
// Construct and initialize SessionContext from Session.
SessionContext sessionContext = SessionContext.create(session, init);
- TimeLogger.log("Session context created");
+ String message = "Session context created";
+ LOGGER.info(message);
if (init) {
- sessionContext.registerServices();
- TimeLogger.log("Session services registered");
+ registerServices(sessionContext);
+ message = "Session services registered";
+ LOGGER.info(message);
}
return sessionContext;
} catch (DatabaseException e) {
}
}
-// private static File getIgnorePrerequisitesFile(URL workspaceUrl) {
-// if (workspaceUrl == null)
-// return null;
-// return new File(workspaceUrl.getPath(), ".ignorePrerequisites");
-// }
-//
-// private void ensurePrerequisites(IProgressMonitor progressMonitor, PlatformUserAgent userAgent) throws PlatformException {
-// Location loc = Platform.getInstanceLocation();
-// File ignorePrerequisites = getIgnorePrerequisitesFile(loc.getURL());
-// if (loc.isSet() && ignorePrerequisites != null) {
-// if (ignorePrerequisites.exists() || ignorePrerequisites.isFile())
-// return;
-// }
-//
-// try {
-// ServerEnvironment.ensureServerDependenciesMet();
-// } catch (ExecutionEnvironmentException e) {
-// // Not installed properly, ask user whether to try installation.
-// try {
-// StringBuilder msg = new StringBuilder();
-// msg.append("Your system seems to be missing the following prerequisites for running this application:\n\n");
-// for (Product product : e.requiredProducts)
-// msg.append("\t" + product.getDescription() + "\n");
-// msg.append("\nYou can either install the missing components now or ignore and attempt to start the application without them. Ignore Always will ignore this question for this workspace.");
-// msg.append("\n\nSelecting Cancel will close the application.");
-//
-// int selection = 0;
-// if (userAgent != null) {
-// selection = userAgent.showPrompt("Missing Prerequisites", msg.toString(), new String[] {
-// "Install Pre-requisites",
-// "Ignore Now",
-// "Ignore Always",
-// "Cancel"
-// }, selection);
-// }
-// boolean tryInstall = false;
-// switch (selection) {
-// case 0:
-// tryInstall = true;
-// break;
-// case 2:
-// ignorePrerequisites.createNewFile();
-// case 1:
-// break;
-// case 3:
-// case -1:
-// throw new CancelStartupException();
-// }
-//
-// if (tryInstall) {
-// // Try to install it and check for success afterwards.
-// ServerEnvironment.tryInstallDependencies(progressMonitor);
-// ServerEnvironment.ensureServerDependenciesMet();
-// }
-// } catch (InstallException ie) {
-// throw new PlatformException(ie);
-// } catch (ExecutionEnvironmentException eee) {
-// throw new PlatformException(eee);
-// } catch (IOException ie) {
-// throw new PlatformException(ie);
-// }
-// }
-// }
+ /**
+ * Perform normal shutdown for the Simantics Platform.
+ *
+ * @param progressMonitor optional progress monitor
+ * @throws PlatformException
+ * @see {@link #shutdown(IProgressMonitor, boolean)}
+ */
+ public synchronized void shutdown(IProgressMonitor progressMonitor) throws PlatformException {
+ shutdown(progressMonitor, true);
+ }
/**
* Shutdown Simantics Platform.
*
- * In SWB this is handled in SimanticsWorkbenchAdvisor#disconnectFromWorkspace.
+ * In Simantics Workbench this is handled in
+ * <code>SimanticsWorkbenchAdvisor#disconnectFromWorkspace</code>.
*
- * @param progressMonitor optional progress monitor
+ * @param progressMonitor
+ * optional progress monitor
+ * @param clearTemporaryFiles
+ * allow or prevent deletion of temporary files at the end of the
+ * shutdown procedure
* @throws PlatformException
*/
- public void shutdown(IProgressMonitor progressMonitor) throws PlatformException
+ public synchronized void shutdown(IProgressMonitor progressMonitor, boolean clearTemporaryFiles) throws PlatformException
{
SubMonitor progress = SubMonitor.convert(progressMonitor, 100);
PlatformException platformException = null;
}
progress.worked(10);
+ // NOP at the moment
+ TimedSessionCache.close();
+
+ progress.subTask("Thread pools");
+ ThreadUtils.shutdown();
+ ExecutorWorker.shutdown();
+ progress.worked(5);
+
running = false;
progress.subTask("Close Database Session");
- Databoard databoard = null;
if (sessionContext != null) {
Session s = sessionContext.peekSession();
if (s != null) {
- databoard = s.peekService(Databoard.class);
-
progress.subTask("Flushing Index Caches");
try {
Simantics.flushIndexCaches(progress.newChild(20), s);
Bindings.classBindingFactory.removeFactory( simanticsBindings );
simanticsBindings = null;
}
- if (databoard != null) {
- if (simanticsBindings2 != null) {
- databoard.classBindingFactory.removeFactory( simanticsBindings2 );
- simanticsBindings2 = null;
- }
- databoard.clear();
- }
// Make sure Simantics clipboard doesn't store unwanted session data references.
Simantics.setClipboard(new SimanticsClipboardImpl());
- progress.worked(30);
+ progress.worked(50);
session = null;
projectResource = null;
}
progress.worked(10);
- progress.subTask("Clearing Workspace Temporary Directory");
+ progress.subTask("Clear index status");
try {
- Simantics.clearTemporaryDirectory();
- } catch (Throwable t) {
- LOGGER.error("Failed to clear the temporary directory.", t);
+ // Everything ok, clear index dirty state.
+ DatabaseIndexing.clearAllDirty();
+ } catch (IOException e) {
+ LOGGER.error("Problems encountered while refreshing database index states, see exception for details.", e);
+ }
+ progress.worked(5);
+
+ if (clearTemporaryFiles) {
+ progress.subTask("Clearing Workspace Temporary Directory");
+ try {
+ Simantics.clearTemporaryDirectory();
+ } catch (Throwable t) {
+ LOGGER.error("Failed to clear the temporary directory.", t);
+ }
}
progress.worked(10);
if (null != platformException)
throw platformException;
+
+ // #7650: improve shutdown robustness in all applications that use the platform
+ Runtime.getRuntime().removeShutdownHook(shutdownHook);
}
// TODO: consider removing this in the future ??