import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.Semaphore;
+import java.util.function.Consumer;
import org.simantics.databoard.Bindings;
import org.simantics.db.ClusterCreator;
import org.simantics.db.Database;
+import org.simantics.db.Database.Session.ClusterChanges;
import org.simantics.db.DevelopmentKeys;
import org.simantics.db.SessionVariables;
-import org.simantics.db.Database.Session.ClusterChanges;
import org.simantics.db.common.utils.Logger;
import org.simantics.db.exception.ClusterDoesNotExistException;
import org.simantics.db.exception.DatabaseException;
import org.simantics.db.service.ClusterCollectorPolicy.CollectorCluster;
import org.simantics.db.service.ClusterUID;
import org.simantics.utils.Development;
-import org.simantics.utils.datastructures.Callback;
import fi.vtt.simantics.procore.DebugPolicy;
import fi.vtt.simantics.procore.internal.ClusterControlImpl.ClusterStateImpl;
public final class ClusterTable implements IClusterTable {
+ private static final boolean VALIDATE_SIZE = false;
+
int maximumBytes = 128 * 1024 * 1024;
int limit = (int)(0.8*(double)maximumBytes);
importanceMap.put(cluster.getImportance(), new ImportanceEntry((ClusterImpl)cluster));
if(collectorPolicy != null) collectorPolicy.added((ClusterImpl)cluster);
- if (!dirtySizeInBytes && !existing.isLoaded()) {
- sizeInBytes += cluster.getCachedSize();
+ if (!dirtySizeInBytes) {
+ if (existing != cluster) {
+ adjustCachedSize(-existing.getCachedSize(), existing);
+ }
+ // This will update sizeInBytes through adjustCachedSize
+ cluster.getCachedSize();
}
-
+ validateSize();
}
synchronized void release(CollectorCluster cluster) {
}
synchronized void release(long clusterId) {
+ //System.out.println("ClusterTable.release(" + clusterId + "): " + sizeInBytes);
+ //validateSize();
ClusterImpl clusterImpl = clusters.getClusterByClusterId(clusterId);
if (null == clusterImpl)
return;
if (sessionImpl.writeState != null)
sessionImpl.clusterStream.flush(clusterImpl.clusterUID);
if (!dirtySizeInBytes) {
- sizeInBytes -= clusterImpl.getCachedSize();
+ adjustCachedSize(-clusterImpl.getCachedSize(), clusterImpl);
}
}
ClusterImpl getNewResourceCluster(ClusterSupport cs, GraphSession graphSession, boolean writeOnly)
throws DatabaseException {
+ ClusterImpl result = null;
if (Constants.NewClusterId == newResourceClusterId) {
newResourceClusterId = graphSession.newClusterId();
- return getClusterByClusterIdOrMake(newResourceClusterId, writeOnly);
+ result = getClusterByClusterIdOrMake(newResourceClusterId, writeOnly);
} else {
ClusterImpl cluster = getClusterByClusterIdOrThrow(newResourceClusterId);
if (cluster.getNumberOfResources(cs) >= CLUSTER_FILL_SIZE) {
newResourceClusterId = graphSession.newClusterId();
cluster = getClusterByClusterIdOrMake(newResourceClusterId, writeOnly);
}
- return cluster;
+ result = cluster;
}
+ return ensureLoaded(result);
}
void flushCluster(GraphSession graphSession) {
TLongIntHashMap clusterLoadHistogram = new TLongIntHashMap();
int clusterLoadCounter = 0;
- @SuppressWarnings("unchecked")
- public final <T extends ClusterI> T getClusterByResourceKey(final int resourceKey) {
- int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey);
- if (ClusterTraitsBase.isVirtualClusterKey(clusterKey))
- throw new RuntimeException("Tried to get a persistent cluster for a virtual resource.");
- ClusterI c = clusterArray[clusterKey];
- if (c == null)
- return null;
- if (c.isLoaded()) {
- if ((counter++ & 4095) == 0)
- refreshImportance((ClusterImpl) c);
- return (T) c;
- }
- if (!(c instanceof ClusterSmall)) {
- Logger.defaultLogError("Proxy must be instance of ClusterSmall");
- return null;
- }
+ private <T extends ClusterI> T ensureLoaded(T c) {
ClusterI cluster;
- ClusterSmall cs = (ClusterSmall) c;
+ ClusterImpl cs = (ClusterImpl) c;
try {
if(DebugPolicy.REPORT_CLUSTER_LOADING) {
long start = System.nanoTime();
Logger.defaultLogError(e);
if (DebugPolicy.REPORT_CLUSTER_EVENTS)
e.printStackTrace();
- String msg = "Failed to load cluster " + cs.getClusterUID() + " for resource key " + resourceKey
- + " resourceId=" + (((cs.getClusterId() << 16 + (resourceKey & 65535))));
+ String msg = "Failed to load cluster " + cs.getClusterUID();// + " resourceId=" + (((cs.getClusterId() << 16 + (resourceKey & 65535))));
// TODO: this jams the system => needs refactoring.
throw new RuntimeDatabaseException(msg, e);
}
return (T) cluster;
}
+
+ @SuppressWarnings("unchecked")
+ public final <T extends ClusterI> T getClusterByResourceKey(final int resourceKey) {
+ int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey);
+ if (ClusterTraitsBase.isVirtualClusterKey(clusterKey))
+ throw new RuntimeException("Tried to get a persistent cluster for a virtual resource.");
+ ClusterI c = clusterArray[clusterKey];
+ if (c == null)
+ return null;
+ if (c.isLoaded()) {
+ if ((counter++ & 4095) == 0)
+ refreshImportance((ClusterImpl) c);
+ return (T) c;
+ }
+ if (!(c instanceof ClusterSmall)) {
+ Logger.defaultLogError("Proxy must be instance of ClusterSmall");
+ return null;
+ }
+ return ensureLoaded((T)c);
+ }
@SuppressWarnings("unchecked")
final <T extends ClusterI> T checkedGetClusterByResourceKey(final int resourceKey) {
final Semaphore s = new Semaphore(0);
final DatabaseException[] ex = new DatabaseException[1];
- load2(clusterId, clusterKey, new Callback<DatabaseException>() {
- @Override
- public void run(DatabaseException e) {
- ex[0] = e;
- s.release();
- }
+ load2(clusterId, clusterKey, e -> {
+ ex[0] = e;
+ s.release();
});
try {
s.acquire();
}
}
- public synchronized void load2(long clusterId, int clusterKey, final Callback<DatabaseException> runnable) {
+ public synchronized void load2(long clusterId, int clusterKey, final Consumer<DatabaseException> runnable) {
assert (Constants.ReservedClusterId != clusterId);
e = new DatabaseException("Load cluster failed.", t);
}
if (null == cluster) {
- runnable.run(e);
+ runnable.accept(e);
return;
}
// Can not be called with null argument.
replaceCluster(cluster);
sessionImpl.onClusterLoaded(clusterId);
- runnable.run(null);
+ runnable.accept(null);
}
if(exist != null) return exist;
ClusterI cluster = getClusterByResourceKey(id);
- boolean result = cluster == null ? false : cluster.getImmutable();
-
- markImmutable(cluster, result);
- return result;
-
+ if(cluster == null) {
+ return false;
+ } else {
+ boolean result = cluster.getImmutable();
+ markImmutable(cluster, result);
+ return result;
+ }
+
}
public void markImmutable(ClusterI cluster, boolean value) {
return getClusterKeyByClusterUIDOrMakeProxy(ClusterUID.make(first, second));
}
+ public void adjustCachedSize(long l, ClusterI cluster) {
+ if (l != 0) {
+ //System.out.println("ClusterTable: adjusting cluster table cached size by " + l + ": " + sizeInBytes + " -> "
+ // + (sizeInBytes + l) + ", for cluster " + cluster.getClusterId() + " (" + cluster + ")");
+ sizeInBytes += l;
+ }
+ }
+
+ private void validateSize() {
+ if (!VALIDATE_SIZE)
+ return;
+
+// System.out.println("validating cached cluster sizes: " + sizeInBytes + ", hashMap.size="
+// + clusters.hashMap.size() + ", importanceMap.size=" + importanceMap.size());
+
+ int i = clusterArray.length;
+ long size = 0;
+ for (int j = 0; j < i; ++j) {
+ ClusterI c = clusterArray[j];
+ if (c == null)
+ continue;
+ size += c.getCachedSize();
+ }
+ if (sizeInBytes != size) {
+ if (!dirtySizeInBytes)
+ System.out.println("BUG: CACHED CLUSTER SIZE DIFFERS FROM CALCULATED: " + sizeInBytes + " != " + size + ", delta = " + (sizeInBytes - size));
+ //else System.out.println("\"BUG?\": SIZES DIFFER: " + sizeInBytes + " != " + size + ", delta = " + (sizeInBytes - size));
+ }
+
+ int ims = importanceMap.size();
+ int[] hms = {0};
+ clusters.hashMap.forEachEntry((cid, c) -> {
+ if (c != null && !c.isWriteOnly() && !c.isEmpty() && c.isLoaded()) {
+ //System.out.println(cid + ": " + c);
+ hms[0]++;
+ }
+ return true;
+ });
+ if (Math.abs(ims-hms[0]) > 0) {
+ System.out.println("BUG2: hashmap and importanceMap sizes differ: " + hms[0] + " != " + ims + ", delta=" + (hms[0] - ims));
+ }
+ }
+
}