1 /*******************************************************************************
2 * Copyright (c) 2007, 2010 Association for Decentralized Information Management
4 * All rights reserved. This program and the accompanying materials
5 * are made available under the terms of the Eclipse Public License v1.0
6 * which accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
10 * VTT Technical Research Centre of Finland - initial API and implementation
11 *******************************************************************************/
12 package org.simantics.acorn;
14 import java.io.IOException;
15 import java.nio.file.Path;
16 import java.util.ArrayList;
17 import java.util.LinkedList;
18 import java.util.concurrent.ExecutorService;
19 import java.util.concurrent.Executors;
20 import java.util.concurrent.Semaphore;
21 import java.util.concurrent.ThreadFactory;
22 import java.util.concurrent.TimeUnit;
24 import org.simantics.acorn.internal.ClusterChange;
25 import org.simantics.acorn.internal.ClusterUpdateProcessorBase;
26 import org.simantics.acorn.internal.UndoClusterUpdateProcessor;
27 import org.simantics.acorn.lru.ClusterInfo;
28 import org.simantics.acorn.lru.ClusterStreamChunk;
29 import org.simantics.acorn.lru.ClusterUpdateOperation;
30 import org.simantics.acorn.lru.ClusterChangeSet.Entry;
31 import org.simantics.db.ClusterCreator;
32 import org.simantics.db.Database;
33 import org.simantics.db.ServiceLocator;
34 import org.simantics.db.common.utils.Logger;
35 import org.simantics.db.exception.DatabaseException;
36 import org.simantics.db.exception.SDBException;
37 import org.simantics.db.server.ProCoreException;
38 import org.simantics.db.service.ClusterSetsSupport;
39 import org.simantics.db.service.ClusterUID;
40 import org.simantics.utils.datastructures.Pair;
41 import org.simantics.utils.logging.TimeLogger;
43 import gnu.trove.map.hash.TLongObjectHashMap;
45 public class GraphClientImpl2 implements Database.Session {
47 public static final boolean DEBUG = false;
49 public final ClusterManager clusters;
51 private TransactionManager transactionManager = new TransactionManager();
52 private ExecutorService executor = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Main Program", false));
53 private ExecutorService saver = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Snapshot Saver", true));
55 private static GraphClientImpl2 INSTANCE;
56 private Path dbFolder;
57 private final Database database;
58 private ServiceLocator locator;
59 private MainProgram mainProgram;
61 static class ClientThreadFactory implements ThreadFactory {
66 public ClientThreadFactory(String name, boolean daemon) {
72 public Thread newThread(Runnable r) {
73 Thread thread = new Thread(r, name);
74 thread.setDaemon(daemon);
79 public GraphClientImpl2(Database database, Path dbFolder, ServiceLocator locator) throws IOException {
80 this.database = database;
81 this.dbFolder = dbFolder;
82 this.locator = locator;
83 this.clusters = new ClusterManager(dbFolder);
85 ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class);
86 cssi.updateReadAndWriteDirectories(clusters.lastSessionDirectory, clusters.workingDirectory);
87 mainProgram = new MainProgram(this, clusters);
88 executor.execute(mainProgram);
92 public Path getDbFolder() {
96 public void tryMakeSnapshot() throws IOException {
101 saver.execute(new Runnable() {
105 Transaction tr = null;
107 // First take a write transaction
108 tr = askWriteTransaction(-1);
109 // Then make sure that MainProgram is idling
110 mainProgram.mutex.acquire();
112 synchronized(mainProgram) {
113 if(mainProgram.operations.isEmpty()) {
116 // MainProgram is becoming busy again - delay snapshotting
121 mainProgram.mutex.release();
123 } catch (IOException e) {
124 Logger.defaultLogError(e);
125 } catch (ProCoreException e) {
126 Logger.defaultLogError(e);
127 } catch (InterruptedException e) {
128 Logger.defaultLogError(e);
132 endTransaction(tr.getTransactionId());
133 } catch (ProCoreException e) {
134 Logger.defaultLogError(e);
142 public void makeSnapshot(boolean force) throws IOException {
143 clusters.makeSnapshot(locator, force);
146 public <T> T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException {
147 return clusters.clone(uid, creator);
150 // private void save() throws IOException {
154 public void load() throws IOException {
158 // public void modiFileEx(ClusterUID uid, int resourceKey, long offset, long size, byte[] bytes, long pos, ClusterSupport support) {
159 // clusters.modiFileEx(uid, resourceKey, offset, size, bytes, pos, support);
163 public Database getDatabase() {
167 private boolean closed = false;
168 private boolean isClosing = false;
171 public void close() throws ProCoreException {
172 System.err.println("Closing " + this + " and mainProgram " + mainProgram);
173 if(!closed && !isClosing) {
182 boolean executorTerminated = executor.awaitTermination(500, TimeUnit.MILLISECONDS);
183 boolean saverTerminated = saver.awaitTermination(500, TimeUnit.MILLISECONDS);
185 System.err.println("executorTerminated=" + executorTerminated + ", saverTerminated=" + saverTerminated);
192 } catch (IOException | InterruptedException e) {
193 throw new ProCoreException(e);
201 public void open() throws ProCoreException {
202 throw new UnsupportedOperationException();
206 public boolean isClosed() throws ProCoreException {
211 public void acceptCommit(long transactionId, long changeSetId, byte[] metadata) throws ProCoreException {
213 clusters.state.headChangeSetId++;
215 long committedChangeSetId = changeSetId + 1;
217 clusters.commitChangeSet(committedChangeSetId, metadata);
219 clusters.state.transactionId = transactionId;
221 mainProgram.committed();
223 TimeLogger.log("Accepted commit");
228 public long cancelCommit(long transactionId, long changeSetId,
229 byte[] metadata, OnChangeSetUpdate onChangeSetUpdate)
230 throws ProCoreException {
231 System.err.println("GraphClientImpl2.cancelCommit() called!! this is experimental and might cause havoc!");
233 undo(new long[] {changeSetId}, onChangeSetUpdate);
234 } catch (SDBException e) {
236 throw new ProCoreException(e);
238 clusters.state.headChangeSetId++;
239 return clusters.state.headChangeSetId;
243 public Transaction askReadTransaction() throws ProCoreException {
244 return transactionManager.askReadTransaction();
247 enum TransactionState {
251 class TransactionRequest {
252 public TransactionState state;
253 public Semaphore semaphore;
254 public TransactionRequest(TransactionState state, Semaphore semaphore) {
256 this.semaphore = semaphore;
260 class TransactionManager {
262 private TransactionState currentTransactionState = TransactionState.IDLE;
264 private int reads = 0;
266 LinkedList<TransactionRequest> requests = new LinkedList<TransactionRequest>();
268 TLongObjectHashMap<TransactionRequest> requestMap = new TLongObjectHashMap<TransactionRequest>();
270 private synchronized Transaction makeTransaction(TransactionRequest req) {
272 final int csId = clusters.state.headChangeSetId;
273 final long trId = clusters.state.transactionId+1;
274 requestMap.put(trId, req);
275 return new Transaction() {
278 public long getTransactionId() {
283 public long getHeadChangeSetId() {
290 * This method cannot be synchronized since it waits and must support multiple entries
291 * by query thread(s) and internal transactions such as snapshot saver
293 public Transaction askReadTransaction() throws ProCoreException {
295 Semaphore semaphore = new Semaphore(0);
297 TransactionRequest req = queue(TransactionState.READ, semaphore);
301 } catch (InterruptedException e) {
302 throw new ProCoreException(e);
305 return makeTransaction(req);
309 private synchronized void dispatch() {
310 TransactionRequest r = requests.removeFirst();
311 if(r.state == TransactionState.READ) reads++;
312 r.semaphore.release();
315 private synchronized void processRequests() {
319 if(requests.isEmpty()) return;
320 TransactionRequest req = requests.peek();
322 if(currentTransactionState == TransactionState.IDLE) {
324 // Accept anything while IDLE
325 currentTransactionState = req.state;
328 } else if (currentTransactionState == TransactionState.READ) {
330 if(req.state == currentTransactionState) {
342 } else if (currentTransactionState == TransactionState.WRITE) {
353 private synchronized TransactionRequest queue(TransactionState state, Semaphore semaphore) {
354 TransactionRequest req = new TransactionRequest(state, semaphore);
355 requests.addLast(req);
361 * This method cannot be synchronized since it waits and must support multiple entries
362 * by query thread(s) and internal transactions such as snapshot saver
364 public Transaction askWriteTransaction()
365 throws ProCoreException {
367 Semaphore semaphore = new Semaphore(0);
369 TransactionRequest req = queue(TransactionState.WRITE, semaphore);
373 } catch (InterruptedException e) {
374 throw new ProCoreException(e);
377 mainProgram.startTransaction(clusters.state.headChangeSetId+1);
379 return makeTransaction(req);
383 public synchronized long endTransaction(long transactionId) throws ProCoreException {
385 TransactionRequest req = requestMap.remove(transactionId);
386 if(req.state == TransactionState.WRITE) {
387 currentTransactionState = TransactionState.IDLE;
392 currentTransactionState = TransactionState.IDLE;
396 return clusters.state.transactionId;
402 public Transaction askWriteTransaction(final long transactionId)
403 throws ProCoreException {
404 return transactionManager.askWriteTransaction();
408 public long endTransaction(long transactionId) throws ProCoreException {
409 return transactionManager.endTransaction(transactionId);
413 public String execute(String command) throws ProCoreException {
414 // This is called only by WriteGraphImpl.commitAccessorChanges
415 // We can ignore this in Acorn
420 public byte[] getChangeSetMetadata(long changeSetId)
421 throws ProCoreException {
422 return clusters.getMetadata(changeSetId);
426 public ChangeSetData getChangeSetData(long minChangeSetId,
427 long maxChangeSetId, OnChangeSetUpdate onChangeSetupate)
428 throws ProCoreException {
430 new Exception("GetChangeSetDataFunction " + minChangeSetId + " " + maxChangeSetId).printStackTrace();;
436 public ChangeSetIds getChangeSetIds() throws ProCoreException {
437 throw new UnsupportedOperationException();
441 public Cluster getCluster(byte[] clusterId) throws ProCoreException {
442 throw new UnsupportedOperationException();
446 public ClusterChanges getClusterChanges(long changeSetId, byte[] clusterId)
447 throws ProCoreException {
448 throw new UnsupportedOperationException();
452 public ClusterIds getClusterIds() throws ProCoreException {
453 return clusters.getClusterIds();
457 public Information getInformation() throws ProCoreException {
458 return new Information() {
461 public String getServerId() {
466 public String getProtocolId() {
471 public String getDatabaseId() {
476 public long getFirstChangeSetId() {
484 public Refresh getRefresh(long changeSetId) throws ProCoreException {
486 final ClusterIds ids = getClusterIds();
488 return new Refresh() {
491 public long getHeadChangeSetId() {
492 return clusters.state.headChangeSetId;
496 public long[] getFirst() {
497 return ids.getFirst();
501 public long[] getSecond() {
502 return ids.getSecond();
509 public byte[] getResourceFile(final byte[] clusterUID, final int resourceIndex) throws ProCoreException {
510 return clusters.getResourceFile(clusterUID, resourceIndex);
514 public ResourceSegment getResourceSegment(final byte[] clusterUID,
515 final int resourceIndex, final long segmentOffset, short segmentSize) throws ProCoreException {
517 return clusters.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize);
522 public long reserveIds(int count) throws ProCoreException {
523 return clusters.state.reservedIds++;
527 public void updateCluster(byte[] operations) throws ProCoreException {
529 ClusterUpdateOperation operation = new ClusterUpdateOperation(clusters, operations);
530 ClusterInfo info = clusters.clusterLRU.getOrCreate(operation.uid, true);
531 if(info == null) throw new IllegalStateException();
534 info.scheduleUpdate();
535 mainProgram.schedule(operation);
536 } catch (Throwable t) {
537 throw new IllegalStateException(t);
544 private UndoClusterUpdateProcessor getUndoCSS(String ccsId) throws DatabaseException {
546 String[] ss = ccsId.split("\\.");
547 String chunkKey = ss[0];
548 int chunkOffset = Integer.parseInt(ss[1]);
549 ClusterStreamChunk chunk = clusters.streamLRU.getWithoutMutex(chunkKey);
550 if(chunk == null) throw new IllegalStateException("Cluster Stream Chunk " + chunkKey + " was not found.");
551 chunk.acquireMutex();
553 return chunk.getUndoProcessor(clusters, chunkOffset, ccsId);
554 } catch (Throwable t) {
555 throw new IllegalStateException(t);
557 chunk.releaseMutex();
562 private void performUndo(String ccsId, ArrayList<Pair<ClusterUID, byte[]>> clusterChanges, UndoClusterSupport support) throws ProCoreException, DatabaseException {
564 UndoClusterUpdateProcessor proc = getUndoCSS(ccsId);
566 int clusterKey = clusters.getClusterKeyByClusterUIDOrMakeWithoutMutex(proc.getClusterUID());
568 clusters.clusterLRU.acquireMutex();
571 ClusterChange cs = new ClusterChange(clusterChanges, proc.getClusterUID());
572 for(int i=0;i<proc.entries.size();i++) {
574 Entry e = proc.entries.get(proc.entries.size() - 1 - i);
575 e.process(clusters, cs, clusterKey);
582 clusters.clusterLRU.releaseMutex();
588 public boolean undo(long[] changeSetIds, OnChangeSetUpdate onChangeSetUpdate) throws SDBException {
590 final ArrayList<Pair<ClusterUID, byte[]>> clusterChanges = new ArrayList<Pair<ClusterUID, byte[]>>();
592 UndoClusterSupport support = new UndoClusterSupport(clusters);
594 final int changeSetId = clusters.state.headChangeSetId;
596 if(ClusterUpdateProcessorBase.DEBUG)
597 System.err.println(" === BEGIN UNDO ===");
599 for(int i=0;i<changeSetIds.length;i++) {
600 final long id = changeSetIds[changeSetIds.length-1-i];
601 ArrayList<String> ccss = clusters.getChanges(id);
602 for(int j=0;j<ccss.size();j++) {
604 if(ClusterUpdateProcessorBase.DEBUG)
605 System.err.println("performUndo " + ccss.get(ccss.size()-j-1));
606 performUndo(ccss.get(ccss.size()-j-1), clusterChanges, support);
607 } catch (DatabaseException e) {
613 if(ClusterUpdateProcessorBase.DEBUG)
614 System.err.println(" === END UNDO ===");
616 for(int i=0;i<clusterChanges.size();i++) {
618 final int changeSetIndex = i;
620 final Pair<ClusterUID, byte[]> pair = clusterChanges.get(i);
622 final ClusterUID cuid = pair.first;
623 final byte[] data = pair.second;
625 onChangeSetUpdate.onChangeSetUpdate(new ChangeSetUpdate() {
628 public long getChangeSetId() {
633 public int getChangeSetIndex() {
638 public int getNumberOfClusterChangeSets() {
639 return clusterChanges.size();
643 public int getIndexOfClusterChangeSet() {
644 return changeSetIndex;
648 public byte[] getClusterId() {
649 return cuid.asBytes();
653 public boolean getNewCluster() {
658 public byte[] getData() {
671 public static GraphClientImpl2 getInstance() {
675 public ServiceLocator getServiceLocator() {
680 public boolean refreshEnabled() {
694 ////////////////////////