From: Tuukka Lehtonen Date: Sun, 5 Nov 2017 11:52:32 +0000 (+0200) Subject: Some fixes/cleanup for cluster table size caching logic. X-Git-Tag: v1.31.0~58 X-Git-Url: https://gerrit.simantics.org/r/gitweb?a=commitdiff_plain;h=0da8a106b00aead3a4bcdbaf1e911b4eb1845a24;p=simantics%2Fplatform.git Some fixes/cleanup for cluster table size caching logic. The previous code was not keeping the cached size properly up-to-date which resulted in the DB client thinking that the consumer cluster memory size is larger than it actually is and eventually winding up in a state where the LRU importanceMap is constantly almost empty and the code keeps throwing away clusters whenever the next cluster is loaded due the misconception of used cluster memory. The code is still not perfect/totally functional - I was still able to get the thrashing situation to reproduce with A6 model imports, but not as heavily as originally. I was able to import models with much more initial conditions stored than previously without these changes. refs #7598 Change-Id: I039fa2062908c05a61efb28e695daec01afd1725 --- diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java index 57dfe9f41..f20b38456 100644 --- a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java @@ -245,11 +245,9 @@ public class ClusterInfo extends LRUObject implements P } public ClusterImpl getForUpdate() throws SDBException { + acquireMutex(); try { - acquireMutex(); - assert(updateState != null); - makeResident(); setDirty(true); updateState.beginUpdate(); diff --git a/bundles/org.simantics.db.layer0/src/org/simantics/db/layer0/util/SessionGarbageCollection.java b/bundles/org.simantics.db.layer0/src/org/simantics/db/layer0/util/SessionGarbageCollection.java index 26e049db6..c168e2286 100644 --- a/bundles/org.simantics.db.layer0/src/org/simantics/db/layer0/util/SessionGarbageCollection.java +++ b/bundles/org.simantics.db.layer0/src/org/simantics/db/layer0/util/SessionGarbageCollection.java @@ -25,7 +25,6 @@ import org.simantics.db.service.ClusterControl; import org.simantics.db.service.LifecycleSupport; import org.simantics.db.service.QueryControl; import org.simantics.utils.DataContainer; -import org.simantics.utils.datastructures.Callback; /** * @author Tuukka Lehtonen @@ -132,15 +131,12 @@ public class SessionGarbageCollection { Logger.defaultLogError(e); } } else { - session.asyncRequest(request, new Callback() { - @Override - public void run(DatabaseException e) { - if (e != null) { - if (errorCallback != null) - errorCallback.accept(e); - else - Logger.defaultLogError(e); - } + session.asyncRequest(request, e -> { + if (e != null) { + if (errorCallback != null) + errorCallback.accept(e); + else + Logger.defaultLogError(e); } }); } diff --git a/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterTable.java b/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterTable.java index 123f346a4..99aa9004f 100644 --- a/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterTable.java +++ b/bundles/org.simantics.db.procore/src/fi/vtt/simantics/procore/internal/ClusterTable.java @@ -18,6 +18,7 @@ import java.util.HashMap; import java.util.Map; import java.util.TreeMap; import java.util.concurrent.Semaphore; +import java.util.function.Consumer; import org.simantics.databoard.Bindings; import org.simantics.db.ClusterCreator; @@ -46,7 +47,6 @@ import org.simantics.db.service.ClusterCollectorPolicy; import org.simantics.db.service.ClusterCollectorPolicy.CollectorCluster; import org.simantics.db.service.ClusterUID; import org.simantics.utils.Development; -import org.simantics.utils.datastructures.Callback; import fi.vtt.simantics.procore.DebugPolicy; import fi.vtt.simantics.procore.internal.ClusterControlImpl.ClusterStateImpl; @@ -60,6 +60,8 @@ import gnu.trove.set.hash.TIntHashSet; public final class ClusterTable implements IClusterTable { + private static final boolean VALIDATE_SIZE = false; + int maximumBytes = 128 * 1024 * 1024; int limit = (int)(0.8*(double)maximumBytes); @@ -297,10 +299,14 @@ public final class ClusterTable implements IClusterTable { importanceMap.put(cluster.getImportance(), new ImportanceEntry((ClusterImpl)cluster)); if(collectorPolicy != null) collectorPolicy.added((ClusterImpl)cluster); - if (!dirtySizeInBytes && !existing.isLoaded()) { - sizeInBytes += cluster.getCachedSize(); + if (!dirtySizeInBytes) { + if (existing != cluster) { + adjustCachedSize(-existing.getCachedSize(), existing); + } + // This will update sizeInBytes through adjustCachedSize + cluster.getCachedSize(); } - + validateSize(); } synchronized void release(CollectorCluster cluster) { @@ -309,6 +315,8 @@ public final class ClusterTable implements IClusterTable { } synchronized void release(long clusterId) { + //System.out.println("ClusterTable.release(" + clusterId + "): " + sizeInBytes); + //validateSize(); ClusterImpl clusterImpl = clusters.getClusterByClusterId(clusterId); if (null == clusterImpl) return; @@ -321,7 +329,7 @@ public final class ClusterTable implements IClusterTable { if (sessionImpl.writeState != null) sessionImpl.clusterStream.flush(clusterImpl.clusterUID); if (!dirtySizeInBytes) { - sizeInBytes -= clusterImpl.getCachedSize(); + adjustCachedSize(-clusterImpl.getCachedSize(), clusterImpl); } } @@ -941,12 +949,9 @@ public final class ClusterTable implements IClusterTable { final Semaphore s = new Semaphore(0); final DatabaseException[] ex = new DatabaseException[1]; - load2(clusterId, clusterKey, new Callback() { - @Override - public void run(DatabaseException e) { - ex[0] = e; - s.release(); - } + load2(clusterId, clusterKey, e -> { + ex[0] = e; + s.release(); }); try { s.acquire(); @@ -1019,7 +1024,7 @@ public final class ClusterTable implements IClusterTable { } } - public synchronized void load2(long clusterId, int clusterKey, final Callback runnable) { + public synchronized void load2(long clusterId, int clusterKey, final Consumer runnable) { assert (Constants.ReservedClusterId != clusterId); @@ -1061,7 +1066,7 @@ public final class ClusterTable implements IClusterTable { e = new DatabaseException("Load cluster failed.", t); } if (null == cluster) { - runnable.run(e); + runnable.accept(e); return; } @@ -1070,7 +1075,7 @@ public final class ClusterTable implements IClusterTable { // Can not be called with null argument. replaceCluster(cluster); sessionImpl.onClusterLoaded(clusterId); - runnable.run(null); + runnable.accept(null); } @@ -1162,4 +1167,47 @@ public final class ClusterTable implements IClusterTable { return getClusterKeyByClusterUIDOrMakeProxy(ClusterUID.make(first, second)); } + public void adjustCachedSize(long l, ClusterI cluster) { + if (l != 0) { + //System.out.println("ClusterTable: adjusting cluster table cached size by " + l + ": " + sizeInBytes + " -> " + // + (sizeInBytes + l) + ", for cluster " + cluster.getClusterId() + " (" + cluster + ")"); + sizeInBytes += l; + } + } + + private void validateSize() { + if (!VALIDATE_SIZE) + return; + +// System.out.println("validating cached cluster sizes: " + sizeInBytes + ", hashMap.size=" +// + clusters.hashMap.size() + ", importanceMap.size=" + importanceMap.size()); + + int i = clusterArray.length; + long size = 0; + for (int j = 0; j < i; ++j) { + ClusterI c = clusterArray[j]; + if (c == null) + continue; + size += c.getCachedSize(); + } + if (sizeInBytes != size) { + if (!dirtySizeInBytes) + System.out.println("BUG: CACHED CLUSTER SIZE DIFFERS FROM CALCULATED: " + sizeInBytes + " != " + size + ", delta = " + (sizeInBytes - size)); + //else System.out.println("\"BUG?\": SIZES DIFFER: " + sizeInBytes + " != " + size + ", delta = " + (sizeInBytes - size)); + } + + int ims = importanceMap.size(); + int[] hms = {0}; + clusters.hashMap.forEachEntry((cid, c) -> { + if (c != null && !c.isWriteOnly() && !c.isEmpty() && c.isLoaded()) { + //System.out.println(cid + ": " + c); + hms[0]++; + } + return true; + }); + if (Math.abs(ims-hms[0]) > 0) { + System.out.println("BUG2: hashmap and importanceMap sizes differ: " + hms[0] + " != " + ims + ", delta=" + (hms[0] - ims)); + } + } + } diff --git a/bundles/org.simantics.db.procore/src/org/simantics/db/procore/cluster/ClusterImpl.java b/bundles/org.simantics.db.procore/src/org/simantics/db/procore/cluster/ClusterImpl.java index 1baef88ac..4c8e19853 100644 --- a/bundles/org.simantics.db.procore/src/org/simantics/db/procore/cluster/ClusterImpl.java +++ b/bundles/org.simantics.db.procore/src/org/simantics/db/procore/cluster/ClusterImpl.java @@ -118,12 +118,17 @@ public abstract class ClusterImpl extends ClusterBase implements Modifier, Colle public long getCachedSize() { if(dirtySizeInBytes) { + long oldSize = sizeInBytes; + if (oldSize > 0) + clusterTable.adjustCachedSize(-oldSize, this); try { sizeInBytes = getUsedSpace(); - //System.err.println("recomputed size of cluster " + getClusterId() + " => " + sizeInBytes); + //System.out.println("recomputed size of cluster " + getClusterId() + ": " + oldSize + " => " + sizeInBytes); } catch (DatabaseException e) { Logger.defaultLogError(e); } + if (sizeInBytes != 0) + clusterTable.adjustCachedSize(sizeInBytes, this); dirtySizeInBytes = false; } return sizeInBytes;