*******************************************************************************/
package org.simantics.db.indexing;
-import java.io.File;
import java.nio.file.Path;
import org.eclipse.core.runtime.IPath;
*/
public final class DatabaseIndexing {
- private static final boolean DEBUG = IndexPolicy.TRACE_INDEX_MANAGEMENT;
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(DatabaseIndexing.class);
public static Path getIndexBaseLocation() {
*******************************************************************************/
package org.simantics.db.indexing;
-import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.WeakHashMap;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.simantics.db.ReadGraph;
import org.simantics.db.RequestProcessor;
import org.simantics.db.Resource;
+import org.simantics.db.common.request.ReadRequest;
import org.simantics.db.common.request.UniqueRead;
+import org.simantics.db.common.utils.NameUtils;
import org.simantics.db.exception.DatabaseException;
import org.simantics.db.exception.InvalidResourceReferenceException;
import org.simantics.db.indexing.IndexedRelationsSearcherBase.State;
import org.simantics.db.layer0.adapter.GenericRelation;
import org.simantics.db.layer0.genericrelation.IndexException;
import org.simantics.db.layer0.genericrelation.IndexedRelations;
+import org.simantics.db.layer0.util.Layer0Utils;
import org.simantics.db.service.QueryControl;
import org.simantics.db.service.SerialisationSupport;
+import org.simantics.operation.Layer0X;
import org.simantics.utils.datastructures.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
return rwlock.tryLock(processor, write);
}
- private IndexedRelationsSearcherBase makeSearcher(final RequestProcessor processor, final Resource relation, final Resource input) {
+ private static IndexedRelationsSearcherBase makeSearcher(final RequestProcessor processor, final Resource relation, final Resource input) {
try {
return processor.syncRequest(new UniqueRead<IndexedRelationsSearcherBase>() {
}
+ @Override
+ public void fullRebuild(IProgressMonitor monitor, RequestProcessor processor) throws IndexException {
+ try {
+ processor.syncRequest(new ReadRequest() {
+ @Override
+ public void run(ReadGraph graph) throws DatabaseException {
+ try {
+ fullRebuild(monitor, graph);
+ } catch (IOException e) {
+ throw new DatabaseException(e);
+ }
+ }
+ });
+ } catch (DatabaseException e) {
+ throw new IndexException(e);
+ }
+ }
+
+ private void fullRebuild(IProgressMonitor monitor, ReadGraph graph) throws DatabaseException, IOException {
+ long startTime = System.currentTimeMillis();
+ Resource relation = Layer0X.getInstance(graph).DependenciesRelation;
+ SerialisationSupport ss = graph.getService(SerialisationSupport.class);
+ Set<Resource> indexRoots = Layer0Utils.listIndexRoots(graph);
+ List<CompletableFuture<?>> waitFor = new ArrayList<>(indexRoots.size());
+ SubMonitor mon = SubMonitor.convert(monitor, indexRoots.size()*2);
+
+ for (Resource indexRoot : indexRoots) {
+ monitor.subTask(NameUtils.getSafeName(graph, indexRoot));
+
+ IndexedRelationsSearcherBase searcher = makeSearcher(graph, relation, indexRoot);
+
+ GenericRelation r = graph.adapt(relation, GenericRelation.class);
+ if (r == null)
+ throw new DatabaseException("Given resource " + relation + "could not be adapted to GenericRelation.");
+
+ Object[] bound = new Object[] { ss.getRandomAccessId(indexRoot) };
+ GenericRelation selection = r.select(IndexedRelationsSearcherBase.getPattern(r, bound.length), bound);
+
+ long relStart = System.currentTimeMillis();
+ List<Object[]> results = selection.realize(graph);
+ if (LOGGER.isDebugEnabled()) {
+ long relEnd = System.currentTimeMillis() - relStart;
+ LOGGER.debug(indexRoot + " realized " + relEnd);
+ }
+ mon.worked(1);
+
+ CompletableFuture<?> result = new CompletableFuture<>();
+ waitFor.add(result);
+ ForkJoinPool.commonPool().submit(() -> {
+ long startTime1 = System.currentTimeMillis();
+ try {
+ searcher.initializeIndexImpl(result, mon.newChild(1, SubMonitor.SUPPRESS_ALL_LABELS), r, results, bound, true);
+ searcher.setReady();
+ } catch (IOException e) {
+ result.completeExceptionally(e);
+ LOGGER.error("Could not initialize index", e);
+ } finally {
+ if (LOGGER.isDebugEnabled())
+ LOGGER.debug(indexRoot + " initialized " + (System.currentTimeMillis() - startTime1));
+ }
+ });
+ }
+ for (CompletableFuture<?> fut : waitFor) {
+ try {
+ fut.get();
+ } catch (InterruptedException | ExecutionException e) {
+ throw (IOException) e.getCause();
+ }
+ }
+ if (LOGGER.isInfoEnabled()) {
+ long endTime = System.currentTimeMillis() - startTime;
+ LOGGER.info("All indexes rebuilt in {}", endTime);
+ }
+ }
+
}
*******************************************************************************/
package org.simantics.db.indexing;
-import gnu.trove.map.hash.THashMap;
-import gnu.trove.set.hash.TLongHashSet;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import gnu.trove.map.hash.THashMap;
+import gnu.trove.set.hash.TLongHashSet;
+
/**
* @author Tuukka Lehtonen
* @author Antti Villberg
super.removeIndex(monitor, r, processor, key, keyValues);
}
- public List<Object[]> allDocs(IProgressMonitor monitor, Session session) throws ParseException, IOException,
- DatabaseException {
+ public List<Object[]> allDocs(IProgressMonitor monitor, Session session) throws IOException {
Query query = new MatchAllDocsQuery();
}
for(ScoreDoc scoreDoc:scoreDocs) {
-
- try {
-
- Document doc = reader.document(scoreDoc.doc);
- List<IndexableField> fs = doc.getFields();
- Object[] o = new Object[fs.size()];
- int index = 0;
- for (IndexableField f : fs) {
- String clazz = classMap.get(f.name());
- if ("Long".equals(clazz)) {
- o[index++] = Long.parseLong(f.stringValue());
- } else {
- o[index++] = f.stringValue();
- }
+ Document doc = reader.document(scoreDoc.doc);
+ List<IndexableField> fs = doc.getFields();
+ Object[] o = new Object[fs.size()];
+ int index = 0;
+ for (IndexableField f : fs) {
+ String clazz = classMap.get(f.name());
+ if ("Long".equals(clazz)) {
+ o[index++] = Long.parseLong(f.stringValue());
+ } else {
+ o[index++] = f.stringValue();
}
- result.add(o);
- } catch (CorruptIndexException e) {
- throw new DatabaseException(e);
- } catch (IOException e) {
- throw new DatabaseException(e);
}
-
+ result.add(o);
}
changeState(monitor, session, State.READY);
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Semaphore;
-import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.lucene.document.Document;
import org.simantics.db.service.SerialisationSupport;
import org.simantics.utils.FileUtils;
import org.simantics.utils.datastructures.Pair;
+import org.simantics.utils.threads.ThreadUtils;
import org.slf4j.Logger;
import gnu.trove.map.hash.THashMap;
}
}
- private static String getPattern(GenericRelation relation, int boundCount) {
+ public static String getPattern(GenericRelation relation, int boundCount) {
String result = "";
for (int i = 0; i < boundCount; i++)
result += "b";
result += "f";
return result;
}
-
- private static final int INDEXING_THREAD_COUNT = 2;
-
- private static final ExecutorService executor = Executors.newFixedThreadPool(INDEXING_THREAD_COUNT, new ThreadFactory() {
- @Override
- public Thread newThread(Runnable r) {
- Thread t = new Thread(r, "Lucene Index Creator");
- if (!t.isDaemon())
- t.setDaemon(true);
- if (t.getPriority() != Thread.NORM_PRIORITY)
- t.setPriority(Thread.NORM_PRIORITY);
- return t;
- }
- });
void initializeIndex(IProgressMonitor monitor, ReadGraph graph, Object[] bound, boolean overwrite)
throws IOException, DatabaseException
"Reindexing " + NameUtils.getSafeLabel(graph, input),
mon -> {
try {
- initializeIndexImpl(mon, graph, bound, overwrite);
+ GenericRelation r = graph.adapt(relation, GenericRelation.class);
+ if (r == null)
+ throw new DatabaseException("Given resource " + relation + "could not be adapted to GenericRelation.");
+
+ GenericRelation selection = r.select(getPattern(r, bound.length), bound);
+
+ List<Object[]> results = selection.realize(graph);
+ initializeIndexImpl(new CompletableFuture<>(), mon, r, results, bound, overwrite);
} catch (IOException e) {
getLogger().error("Index is in problematic state! {}", this, e);
throw new DatabaseException(e);
});
}
- void initializeIndexImpl(IProgressMonitor monitor, ReadGraph graph, final Object[] bound, boolean overwrite) throws IOException,
- DatabaseException {
-
- final SubMonitor mon = SubMonitor.convert(monitor, 100);
-
- if (IndexPolicy.TRACE_INDEX_INIT)
- System.out.println(getDescriptor() + "Initializing index at " + indexPath + " (overwrite = " + overwrite + ")");
- mon.beginTask("Initializing Index", 100);
-
- if (overwrite) {
- if (Files.exists(indexPath)) {
- mon.subTask("Erasing previous index");
- if (getLogger().isDebugEnabled())
- getLogger().debug("Erasing previous index {}", indexPath.toAbsolutePath());
- FileUtils.delete(indexPath);
- }
- }
-
- final AtomicReference<FSDirectory> directory = new AtomicReference<FSDirectory>();
- final AtomicReference<IndexWriter> writer = new AtomicReference<IndexWriter>();
+ private static final int INDEXING_THREAD_COUNT = 2; // this is quite good parallelism level for lucene
+ void initializeIndexImpl(CompletableFuture<?> result, IProgressMonitor monitor, GenericRelation r, List<Object[]> results, final Object[] bound, boolean overwrite) throws IOException {
try {
- mon.subTask("Start index write");
- createDirectory(indexPath);
-
- directory.set(FSDirectory.open(indexPath.toFile()));
- IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_4_9, Queries.getAnalyzer()).setOpenMode(OpenMode.CREATE);
- writer.set(new IndexWriter(directory.get(), conf));
-
- mon.worked(5);
-
- final GenericRelation r = graph.adapt(relation, GenericRelation.class);
- if (r == null)
- throw new DatabaseException("Given resource " + graph.syncRequest(new SafeName(relation))
- + "could not be adapted to GenericRelation.");
-
- long realizeStart = 0;
- if (IndexPolicy.PERF_INDEX_INIT)
- realizeStart = System.nanoTime();
-
- mon.subTask("Calculating indexed content");
- GenericRelation selection = r.select(getPattern(r, bound.length), bound);
- mon.worked(5);
- List<Object[]> results = selection.realize(graph);
- mon.worked(40);
-
- if (IndexPolicy.PERF_INDEX_INIT)
- System.out.println(getDescriptor() + "Realized index with " + results.size() + " entries at " + indexPath + " in " + (1e-9 * (System.nanoTime()-realizeStart)) + " seconds.");
+ final SubMonitor mon = SubMonitor.convert(monitor, 100);
+
if (IndexPolicy.TRACE_INDEX_INIT)
- System.out.println(getDescriptor() + "Indexed relation " + r + " produced " + results.size() + " results");
-
- long start = IndexPolicy.PERF_INDEX_INIT ? System.nanoTime() : 0;
-
- mon.subTask("Indexing content");
- final Semaphore s = new Semaphore(0);
- mon.setWorkRemaining(results.size());
-
- for (int i = 0; i < INDEXING_THREAD_COUNT; i++) {
- final int startIndex = i;
- executor.submit(() -> {
- try {
- Document document = new Document();
- Field[] fs = makeFieldsForRelation(r, bound.length, document);
-
- for (int index = startIndex; index < results.size(); index += INDEXING_THREAD_COUNT) {
- if (setFields(fs, results.get(index)) == null)
- continue;
- try {
- writer.get().addDocument(document);
- } catch (CorruptIndexException e) {
- getLogger().error("Index is corrupted! {}", this, e);
- throw new IllegalStateException(e);
- } catch (IOException e) {
- getLogger().error("Index is in problematic state! {}", this, e);
- throw new IllegalStateException(e);
- } finally {
- synchronized (mon) {
- mon.worked(1);
+ System.out.println(getDescriptor() + "Initializing index at " + indexPath + " (overwrite = " + overwrite + ")");
+ mon.beginTask("Initializing Index", 100);
+
+ if (overwrite) {
+ if (Files.exists(indexPath)) {
+ mon.subTask("Erasing previous index");
+ if (getLogger().isDebugEnabled())
+ getLogger().debug("Erasing previous index {}", indexPath.toAbsolutePath());
+ FileUtils.delete(indexPath);
+ }
+ }
+
+ final AtomicReference<FSDirectory> directory = new AtomicReference<FSDirectory>();
+ final AtomicReference<IndexWriter> writer = new AtomicReference<IndexWriter>();
+
+ try {
+ mon.subTask("Start index write");
+ createDirectory(indexPath);
+
+ directory.set(FSDirectory.open(indexPath.toFile()));
+ IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_4_9, Queries.getAnalyzer()).setOpenMode(OpenMode.CREATE);
+ writer.set(new IndexWriter(directory.get(), conf));
+
+ mon.worked(5);
+
+ long realizeStart = 0;
+ if (IndexPolicy.PERF_INDEX_INIT)
+ realizeStart = System.nanoTime();
+
+ mon.subTask("Calculating indexed content");
+ mon.worked(5);
+
+ mon.worked(40);
+
+ if (IndexPolicy.PERF_INDEX_INIT)
+ System.out.println(getDescriptor() + "Realized index with " + results.size() + " entries at " + indexPath + " in " + (1e-9 * (System.nanoTime()-realizeStart)) + " seconds.");
+
+ if (IndexPolicy.TRACE_INDEX_INIT)
+ System.out.println(getDescriptor() + "Indexed relation " + r + " produced " + results.size() + " results");
+
+ long start = IndexPolicy.PERF_INDEX_INIT ? System.nanoTime() : 0;
+
+ mon.subTask("Indexing content");
+ final Semaphore s = new Semaphore(0);
+ mon.setWorkRemaining(results.size());
+ for (int i = 0; i < INDEXING_THREAD_COUNT; i++) {
+ final int startIndex = i;
+ ThreadUtils.getBlockingWorkExecutor().submit(() -> {
+ try {
+ Document document = new Document();
+ Field[] fs = makeFieldsForRelation(r, bound.length, document);
+
+ for (int index = startIndex; index < results.size(); index += INDEXING_THREAD_COUNT) {
+ if (setFields(fs, results.get(index)) == null)
+ continue;
+ try {
+ writer.get().addDocument(document);
+ } catch (CorruptIndexException e) {
+ getLogger().error("Index is corrupted! {}", this, e);
+ throw new IllegalStateException(e);
+ } catch (IOException e) {
+ getLogger().error("Index is in problematic state! {}", this, e);
+ throw new IllegalStateException(e);
+ } finally {
+ synchronized (mon) {
+ mon.worked(1);
+ }
}
}
+ } catch (DatabaseException e) {
+ throw new IllegalStateException(e);
+ } finally {
+ s.release();
}
- } catch (DatabaseException e) {
- throw new IllegalStateException(e);
- } finally {
- s.release();
- }
- });
- }
-
- try {
- s.acquire(INDEXING_THREAD_COUNT);
- } catch (InterruptedException e) {
- getLogger().error("Could not initialize index {}", this, e);
- }
-
- // http://www.gossamer-threads.com/lists/lucene/java-dev/47895
- // and http://lucene.apache.org/java/docs/index.html#27+November+2011+-+Lucene+Core+3.5.0
- // advise against calling optimize at all. So let's not do it anymore.
- //writer.get().optimize();
- //writer.get().commit();
-
- mon.subTask("Flushing");
-
- if (IndexPolicy.PERF_INDEX_INIT)
- System.out.println(getDescriptor() + "Wrote index at " + indexPath + " in " + (1e-9 * (System.nanoTime()-start)) + " seconds.");
-
- } catch (DatabaseException e) {
- getLogger().error("Could not initialize index due to db {}", this, e);
- } finally {
- try {
- closeWriter(writer.getAndSet(null));
+ });
+ }
+
+ try {
+ s.acquire(INDEXING_THREAD_COUNT);
+ } catch (InterruptedException e) {
+ getLogger().error("Could not initialize index {}", this, e);
+ }
+
+ // http://www.gossamer-threads.com/lists/lucene/java-dev/47895
+ // and http://lucene.apache.org/java/docs/index.html#27+November+2011+-+Lucene+Core+3.5.0
+ // advise against calling optimize at all. So let's not do it anymore.
+ //writer.get().optimize();
+ //writer.get().commit();
+
+ mon.subTask("Flushing");
+
+ if (IndexPolicy.PERF_INDEX_INIT)
+ System.out.println(getDescriptor() + "Wrote index at " + indexPath + " in " + (1e-9 * (System.nanoTime()-start)) + " seconds.");
+
+ result.complete(null);
+ // } catch (DatabaseException e) {
+ // getLogger().error("Could not initialize index due to db {}", this, e);
} finally {
- FileUtils.uncheckedClose(directory.getAndSet(null));
+ try {
+ closeWriter(writer.getAndSet(null));
+ } finally {
+ FileUtils.uncheckedClose(directory.getAndSet(null));
+ }
}
+ } catch (Throwable t) {
+ getLogger().error("Could not initialize index", t);
+ result.completeExceptionally(t);
}
}
import java.io.IOException;
import java.nio.file.Path;
-import java.util.HashMap;
+import java.util.Collection;
import java.util.List;
import java.util.Map;
-import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Semaphore;
+import java.util.stream.Stream;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.IndexWriter;
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(MemoryIndexing.class);
- final private Session session;
-
- final Map<String,Map<String,List<Map<String, Object>>>> persistentCache = new HashMap<String,Map<String,List<Map<String, Object>>>>();
- final Map<String,Map<String,List<Resource>>> persistentCacheResources = new HashMap<String,Map<String,List<Resource>>>();
-
- final private Map<String,RAMDirectory> directories = new HashMap<String,RAMDirectory>();
-
- final private Map<String,IndexedRelationsSearcherBase> immutableSearchers = new HashMap<String,IndexedRelationsSearcherBase>();
- final private Map<String,IndexedRelationsSearcher> searchers = new HashMap<String,IndexedRelationsSearcher>();
+ final private Session session;
+
+ final Map<String,Map<String,List<Map<String, Object>>>> persistentCache = new ConcurrentHashMap<>();
+ final Map<String,Map<String,List<Resource>>> persistentCacheResources = new ConcurrentHashMap<>();
+
+ final private ConcurrentHashMap<String,RAMDirectory> directories = new ConcurrentHashMap<>();
+
+ final private ConcurrentHashMap<String,IndexedRelationsSearcherBase> immutableSearchers = new ConcurrentHashMap<>();
+ final private ConcurrentHashMap<String,IndexedRelationsSearcher> searchers = new ConcurrentHashMap<>();
public MemoryIndexing(Session session) {
this.session = session;
}
-
+
protected Path getIndexDirectory(Resource relation, Resource input) {
return DatabaseIndexing.getIndexLocation(session, relation, input);
}
-
+
public IndexedRelationsSearcher get(RequestProcessor processor, Resource relation, Resource input) {
Path location = getIndexDirectory(relation, input);
- try {
- String key = location.toAbsolutePath().toString();
- IndexedRelationsSearcher searcher = searchers.get(key);
- if (searcher == null) {
+ String key = location.toAbsolutePath().toString();
+ return searchers.computeIfAbsent(key, t -> {
+ try {
GenericRelation r = processor.sync(new Adapt<GenericRelation>(relation, GenericRelation.class));
- searcher = new IndexedRelationsSearcher(processor, relation, input, r);
- searchers.put(key, searcher);
+ return new IndexedRelationsSearcher(processor, relation, input, r);
+ } catch (Exception e) {
+ LOGGER.error("Could not get searcher for relation {} and input {} in location {}", relation, input, location, e);
+ return null;
}
- return searcher;
- } catch (Exception e) {
- LOGGER.error("Could not get searcher for relation {} and input {} in location {}", relation, input, location, e);
- return null;
- }
+ });
}
public IndexedRelationsSearcherBase getImmutable(RequestProcessor processor, Resource relation, Resource input) {
Path location = getIndexDirectory(relation, input);
- try {
- String key = location.toAbsolutePath().toString();
- IndexedRelationsSearcherBase searcher = immutableSearchers.get(key);
- if (searcher == null) {
- searcher = new ImmutableIndexedRelationsSearcher(processor, relation, input);
- immutableSearchers.put(key, searcher);
+ String key = location.toAbsolutePath().toString();
+ return immutableSearchers.computeIfAbsent(key, t -> {
+ try {
+ return new ImmutableIndexedRelationsSearcher(processor, relation, input);
+ } catch (Exception e) {
+ LOGGER.error("Could not get searcher base for relation {} and input {} in location {}", relation, input, location, e);
+ return null;
}
- return searcher;
- } catch (Exception e) {
- LOGGER.error("Could not get searcher base for relation {} and input {} in location {}", relation, input, location, e);
- return null;
- }
+ });
}
public static MemoryIndexing getInstance(Session session) {
- MemoryIndexing ret = session.peekService(MemoryIndexing.class);
+ MemoryIndexing ret = session.peekService(MemoryIndexing.class);
if(ret == null) {
- ret = new MemoryIndexing(session);
+ ret = new MemoryIndexing(session);
session.registerService(MemoryIndexing.class, ret);
}
return ret;
}
- public synchronized Directory getDirectory(String path, Analyzer analyzer) throws IOException {
- RAMDirectory directory = directories.get(path);
- if (directory == null) {
- synchronized (directories) {
- directory = directories.get(path);
- if (directory == null) {
- directory = new RAMDirectory();
+ public Directory getDirectory(String path, Analyzer analyzer) throws IOException {
+ try {
+ return directories.computeIfAbsent(path, t -> {
+ try {
+ RAMDirectory directory = new RAMDirectory();
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_4_9, analyzer);
new IndexWriter(directory, config.setOpenMode(OpenMode.CREATE)).close();
- directories.put(path, directory);
+ return directory;
+ } catch (IOException e) {
+ throw new RuntimeException(e);
}
- }
+ });
+ } catch (RuntimeException e) {
+ throw (IOException) e.getCause();
}
- return directory;
-
}
-
+
public void remove(String path) {
directories.remove(path);
}
-
+
public void flush(IProgressMonitor progress) throws Exception {
-
+ long startTime = System.currentTimeMillis();
SubMonitor monitor = SubMonitor.convert(progress);
-
- Set<Map.Entry<String, IndexedRelationsSearcher>> set = searchers.entrySet();
- Set<Map.Entry<String, IndexedRelationsSearcherBase>> iset = immutableSearchers.entrySet();
-
- monitor.setWorkRemaining(set.size()+iset.size());
-
- for(Map.Entry<String, IndexedRelationsSearcher> entry : set) {
-
- IndexedRelationsSearcher persistent = entry.getValue();
- IndexedRelationsMemorySearcher searcher = persistent.cache;
-
- if(persistent.isIndexAvailable()) {
- List<Object[]> os = searcher.allDocs(monitor, session);
- persistent.applyChanges(monitor, session, searcher.r, os);
- }
-
- monitor.worked(1);
- entry.getValue().changeState(monitor, session, State.READY);
-
- }
-
- for(Map.Entry<String, IndexedRelationsSearcherBase> entry : iset) {
-
- entry.getValue().changeState(monitor, session, State.READY);
- monitor.worked(1);
-
- }
-
+ Collection<IndexedRelationsSearcher> searcherEntries = searchers.values();
+ Collection<IndexedRelationsSearcherBase> immutableSearcherEntries = immutableSearchers.values();
+ int count = searcherEntries.size() + immutableSearcherEntries.size();
+ Semaphore sema = new Semaphore(0);
+ Stream.concat(searcherEntries.stream(), immutableSearcherEntries.stream()).parallel().forEach(base -> {
+ try {
+ if (base.isIndexAvailable()) {
+ if (base instanceof IndexedRelationsSearcher) {
+ IndexedRelationsMemorySearcher searcher = ((IndexedRelationsSearcher) base).cache;
+ try {
+ List<Object[]> os = searcher.allDocs(monitor, session);
+ ((IndexedRelationsSearcher) base).applyChanges(monitor, session, searcher.r, os);
+ } catch (Exception e) {
+ LOGGER.error("Could not flush", e);
+ }
+ }
+ }
+ monitor.worked(1);
+ base.changeState(monitor, session, State.READY);
+ } finally {
+ sema.release();
+ }
+ });
+ sema.acquire(count);
+ long totalTime = System.currentTimeMillis() - startTime;
+ LOGGER.info("index flush " + totalTime);
}
}
public void reset(IProgressMonitor monitor, RequestProcessor processor, Resource resource, Resource input)
throws IndexException;
+ public void fullRebuild(IProgressMonitor monitor, RequestProcessor processor) throws IndexException;
+
}
--- /dev/null
+package org.simantics.project;
+
+import org.simantics.db.Session;
+
+/**
+ * @author Jani Simomaa
+ * @since 1.34.0
+ */
+public class SessionDescriptor {
+
+ private final Session session;
+ private final boolean fresh;
+
+ public SessionDescriptor(Session session, boolean fresh) {
+ this.session = session;
+ this.fresh = fresh;
+ }
+
+ public Session getSession() {
+ return session;
+ }
+
+ public boolean isFreshDatabase() {
+ return fresh;
+ }
+
+}
\ No newline at end of file
import java.net.URL;
import java.net.URLDecoder;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Enumeration;
import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
import java.util.jar.Attributes;
import java.util.jar.Manifest;
+import java.util.stream.Collectors;
import org.eclipse.core.internal.runtime.PlatformActivator;
import org.eclipse.core.runtime.FileLocator;
import org.eclipse.equinox.p2.metadata.Version;
import org.eclipse.equinox.p2.metadata.VersionedId;
import org.osgi.framework.Bundle;
-import org.simantics.databoard.adapter.AdaptException;
import org.simantics.databoard.binding.Binding;
import org.simantics.databoard.binding.mutable.Variant;
import org.simantics.databoard.container.DataContainer;
import org.simantics.databoard.container.DataContainers;
-import org.simantics.databoard.serialization.SerializationException;
import org.simantics.graph.compiler.CompilationResult;
import org.simantics.graph.compiler.GraphCompiler;
import org.simantics.graph.compiler.GraphCompilerPreferences;
* @param collection
* @throws IOException
*/
- public static void getAllGraphs(Collection<GraphBundle> collection) throws IOException {
- for (Bundle bundle : getBundles()) {
- GraphBundle entry = getGraph(bundle);
- if (entry!=null) collection.add(entry);
- }
- }
+ public static Collection<GraphBundle> getAllGraphs() throws IOException {
+ CompletableFuture<Object> f = new CompletableFuture<>();
+ Bundle[] bundles = getBundles();
+ Collection<GraphBundle> gbundles = Arrays.stream(bundles).map(t -> { // this could be done in parallel in the future?
+ if (f.isCompletedExceptionally())
+ return null;
+ try {
+ return PlatformUtil.getGraph(t);
+ } catch (IOException e) {
+ if (LOGGER.isDebugEnabled())
+ LOGGER.debug("Could not get graph {}", t, e);
+ f.completeExceptionally(e);
+ return null;
+ }
+ }).filter(Objects::nonNull).collect(Collectors.toList());
+ if (f.isCompletedExceptionally()) {
+ try {
+ f.get();
+ } catch (ExecutionException | InterruptedException e) {
+ throw (IOException) e.getCause();
+ }
+ }
+ return gbundles;
+ }
/**
* Get bundle
import org.simantics.graph.db.CoreInitialization;
import org.simantics.layer0.DatabaseManagementResource;
import org.simantics.layer0.Layer0;
+import org.simantics.project.SessionDescriptor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* @param initialGraphs initialGraphs to install
* @throws DatabaseException
*/
- public Session createDatabase(File databaseDirectory) throws DatabaseException {
+ public SessionDescriptor createDatabase(File databaseDirectory) throws DatabaseException {
try {
LOGGER.debug("Creating database to "+ databaseDirectory);
}
};
session.syncRequest(req);
- return session;
+ return new SessionDescriptor(session, false);
}
CoreInitialization.initializeBuiltins(session);
// This will try to initialize Builtins.class but because there is no statements
mgt.createGraphBundle(graph, l0ex);
graph.flushCluster();
}});
- return session;
+ return new SessionDescriptor(session, true);
} finally {
if (null == session)
server1.stop();
import org.simantics.SimanticsPlatform;
import org.simantics.SimanticsPlatform.OntologyRecoveryPolicy;
import org.simantics.SimanticsPlatform.RecoveryPolicy;
+import org.simantics.TimingProgressMonitor;
import org.simantics.application.arguments.IArguments;
import org.simantics.application.arguments.SimanticsArguments;
import org.simantics.db.common.Indexing;
import org.simantics.db.exception.ResourceNotFoundException;
import org.simantics.db.indexing.DatabaseIndexing;
import org.simantics.db.layer0.genericrelation.DependenciesRelation;
+import org.simantics.db.layer0.genericrelation.IndexException;
+import org.simantics.db.layer0.genericrelation.IndexedRelations;
import org.simantics.db.layer0.util.SimanticsClipboardImpl;
import org.simantics.db.layer0.util.SimanticsKeys;
import org.simantics.db.layer0.util.TGTransferableGraphSource;
import org.simantics.project.ProjectFeatures;
import org.simantics.project.ProjectKeys;
import org.simantics.project.Projects;
+import org.simantics.project.SessionDescriptor;
import org.simantics.project.exception.ProjectException;
import org.simantics.project.features.registry.GroupReference;
import org.simantics.project.management.DatabaseManagement;
return application != null ? application : UUID.randomUUID().toString();
}
- private Session setupDatabase(String databaseDriverId, IProgressMonitor progressMonitor, RecoveryPolicy workspacePolicy, PlatformUserAgent userAgent) throws PlatformException {
+ private SessionDescriptor setupDatabase(String databaseDriverId, IProgressMonitor progressMonitor, RecoveryPolicy workspacePolicy, PlatformUserAgent userAgent) throws PlatformException {
if (progressMonitor == null)
progressMonitor = new NullProgressMonitor();
Path workspaceLocation = Platform.getLocation().toFile().toPath();
PlatformUtil.compileAllDynamicOntologies();
- progressMonitor.setTaskName("Asserting all ontologies are installed");
+ String message = "Asserting all ontologies are installed";
+ LOGGER.info(message);
+ progressMonitor.setTaskName(message);
final Map<GraphBundleRef, GraphBundleEx> platformTGs = new HashMap<GraphBundleRef, GraphBundleEx>();
try {
// Get a list of bundles installed into the database
- progressMonitor.subTask("find installed bundles from database");
+ message = "find installed bundles from database";
+ progressMonitor.subTask(message);
+ LOGGER.info(message);
Map<GraphBundleRef, GraphBundleEx> installedTGs = new HashMap<GraphBundleRef, GraphBundleEx>();
for (GraphBundle b : session.syncRequest( mgmt.GraphBundleQuery )) {
installedTGs.put(GraphBundleRef.of(b), GraphBundleEx.extend(b));
// if(installedTGs.size() > 1) return;
// Get a list of all bundles in the platform (Bundle Context)
- List<GraphBundle> tgs = new ArrayList<GraphBundle>();
- progressMonitor.subTask("load all transferable graphs from platform");
- PlatformUtil.getAllGraphs(tgs);
- progressMonitor.subTask("extend bundles to compile versions");
+ message = "load all transferable graphs from platform";
+ progressMonitor.subTask(message);
+ LOGGER.info(message);
+ Collection<GraphBundle> tgs = PlatformUtil.getAllGraphs();
+ message = "extend bundles to compile versions";
+ progressMonitor.subTask(message);
+ LOGGER.info(message);
for (GraphBundle b : tgs) {
GraphBundleEx gbe = GraphBundleEx.extend(b);
gbe.build();
}
// Compile a list of TGs that need to be installed or reinstalled in the database
- progressMonitor.subTask("check bundle reinstallation demand");
+ message = "check bundle reinstallation demand";
+ progressMonitor.subTask(message);
+ LOGGER.info(message);
List<GraphBundleEx> installTGs = new ArrayList<GraphBundleEx>();
// Create list of TGs to update, <newTg, oldTg>
Map<GraphBundleEx,GraphBundleEx> reinstallTGs = new TreeMap<GraphBundleEx,GraphBundleEx>();
}
if (ontologyPolicy == OntologyRecoveryPolicy.Merge) {
- progressMonitor.subTask("Merging ontology changes");
+ message = "Merging ontology changes";
+ progressMonitor.subTask(message);
+ LOGGER.info(message);
// Sort missing TGs into install order
GraphDependencyAnalyzer<GraphBundle> analyzer = new GraphDependencyAnalyzer<GraphBundle>();
for(GraphBundle tg : installTGs) analyzer.addGraph(tg, tg.getGraph());
public boolean assertProject(IProgressMonitor progressMonitor, RecoveryPolicy workspacePolicy, boolean installProject) throws PlatformException {
- if (progressMonitor == null) progressMonitor = new NullProgressMonitor();
+ SubMonitor monitor = SubMonitor.convert(progressMonitor, 10);
final DatabaseManagement mgmt = new DatabaseManagement();
- progressMonitor.setTaskName("Asserting project resource exists in the database");
+ monitor.setTaskName("Asserting project resource exists in the database");
try {
projectResource = session.syncRequest( Queries.resource( projectURI ) );
} catch (ResourceNotFoundException nfe) {
} catch (DatabaseException e) {
throw new PlatformException("Failed to create "+projectURI, e);
}
- progressMonitor.worked(10);
+ monitor.worked(10);
return installProject;
TimeLogger.log("Beginning of SimanticsPlatform.startUp");
LOGGER.info("Beginning of SimanticsPlatform.startUp");
-
- if (progressMonitor == null) progressMonitor = new NullProgressMonitor();
+
+ SubMonitor monitor = SubMonitor.convert(progressMonitor, 1000);
// For debugging on what kind of platform automatic tests are running in
// case there are problems.
boolean usingBaseline = handleBaselineDatabase();
// 1. Assert there is a database at <workspace>/db
- session = setupDatabase(databaseDriverId, progressMonitor, workspacePolicy, userAgent);
+ SessionDescriptor sessionDescriptor = setupDatabase(databaseDriverId, monitor.newChild(200, SubMonitor.SUPPRESS_NONE), workspacePolicy, userAgent);
+ session = sessionDescriptor.getSession();
TimeLogger.log("Database setup complete");
- // 1.1
+ // 1.1 Delete all indexes if we cannot be certain they are up-to-date
+ // A full index rebuild will be done later, before project activation.
XSupport support = session.getService(XSupport.class);
if (support.rolledback()) {
try {
// 2. Assert all graphs, and correct versions, are installed to the database
if(!usingBaseline) {
- synchronizeOntologies(progressMonitor, ontologyPolicy, requireSynchronize);
+ synchronizeOntologies(monitor.newChild(400, SubMonitor.SUPPRESS_NONE), ontologyPolicy, requireSynchronize);
TimeLogger.log("Synchronized ontologies");
}
// 4. Assert simantics.cfg exists
- boolean installProject = assertConfiguration(progressMonitor,workspacePolicy);
+ boolean installProject = assertConfiguration(monitor.newChild(25, SubMonitor.SUPPRESS_NONE),workspacePolicy);
// 5. Assert Project Resource is installed in the database
- installProject = assertProject(progressMonitor, workspacePolicy, installProject);
+ installProject = assertProject(monitor.newChild(25, SubMonitor.SUPPRESS_NONE), workspacePolicy, installProject);
// 6. Install all features into project, if in debug mode
- updateInstalledGroups(progressMonitor, true); //installProject);
+ updateInstalledGroups(monitor.newChild(25), true); //installProject);
TimeLogger.log("Installed all features into project");
// 7. Assert L0.Session in database for this session
- assertSessionModel(progressMonitor);
+ assertSessionModel(monitor.newChild(25, SubMonitor.SUPPRESS_NONE));
session.getService(XSupport.class).setServiceMode(false, false);
try {
+ monitor.setTaskName("Flush query cache");
session.syncRequest((Write) graph -> {
QueryControl qc = graph.getService(QueryControl.class);
qc.flush(graph);
boolean loadProject = true;
try {
+ monitor.setTaskName("Open database session");
sessionContext = SimanticsPlatform.INSTANCE.createSessionContext(true);
// This must be before setSessionContext since some listeners might query this
sessionContext.setHint(SimanticsKeys.KEY_PROJECT, SimanticsPlatform.INSTANCE.projectResource);
Bindings.defaultBindingFactory.getRepository().put(RGB.Integer.BINDING.type(), RGB.Integer.BINDING);
Bindings.defaultBindingFactory.getRepository().put(Font.BINDING.type(), Font.BINDING);
- if(loadProject) {
+ if (support.rolledback() || sessionDescriptor.isFreshDatabase()) {
+ monitor.setTaskName("Rebuilding all indexes");
+ try {
+ session.getService(IndexedRelations.class).fullRebuild(monitor.newChild(100), session);
+ } catch (IndexException e) {
+ LOGGER.error("Failed to re-build all indexes", e);
+ }
+ } else {
+ monitor.worked(100);
+ }
+ if(loadProject) {
TimeLogger.log("Load projects");
project = Projects.loadProject(sessionContext.getSession(), SimanticsPlatform.INSTANCE.projectResource);
+ monitor.worked(100);
sessionContext.setHint(ProjectKeys.KEY_PROJECT, project);
TimeLogger.log("Loading projects complete");
project.activate();
TimeLogger.log("Project activated");
+ monitor.worked(100);
}
} catch (DatabaseException e) {
* Contributors:
* VTT Technical Research Centre of Finland - initial API and implementation
*******************************************************************************/
-package org.simantics.workbench.internal;
+package org.simantics;
import org.eclipse.core.runtime.NullProgressMonitor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* @author Tuukka Lehtonen
*/
-class TimingProgressMonitor extends NullProgressMonitor {
+public class TimingProgressMonitor extends NullProgressMonitor {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(TimingProgressMonitor.class);
private final long start;
this.start = System.nanoTime();
}
+ private void log(String msg) {
+ LOGGER.info(msg + " @ " + (System.nanoTime() - start)*1e-9);
+ }
+
@Override
public void worked(int work) {
- System.out.println("worked(" + work + ") @ " + (System.nanoTime() - start)*1e-9);
+ log("worked(" + work + ")");
}
@Override
public void subTask(String name) {
- System.out.println("subTask(" + name + ") @ " + (System.nanoTime() - start)*1e-9);
+ log("subTask(" + name + ")");
}
@Override
public void setTaskName(String name) {
- System.out.println("setTaskName(" + name + ") @ " + (System.nanoTime() - start)*1e-9);
+ log("setTaskName(" + name + ")");
}
@Override
public void setCanceled(boolean value) {
- System.out.println("setCanceled(" + value+ ") @ " + (System.nanoTime() - start)*1e-9);
+ log("setCanceled(" + value+ ")");
super.setCanceled(value);
}
@Override
public boolean isCanceled() {
- System.out.println("isCanceled() @ " + (System.nanoTime() - start)*1e-9);
+ log("isCanceled()");
return super.isCanceled();
}
@Override
public void internalWorked(double work) {
- System.out.println("internalWorked(" + work + ") @ " + (System.nanoTime() - start)*1e-9);
+ log("internalWorked(" + work + ")");
}
@Override
public void done() {
- System.out.println("done() @ " + (System.nanoTime() - start)*1e-9);
+ log("done()");
}
@Override
public void beginTask(String name, int totalWork) {
- System.out.println("beginTask(" + name + ", " + totalWork + ") @ " + (System.nanoTime() - start)*1e-9);
+ log("beginTask(" + name + ", " + totalWork + ")");
}
}
\ No newline at end of file