The problem was that when the database is doing work, MainProgram.run
runs in pretty much busy loop mode and previously it was always
allocating a new TreeMap on each round and also calling Map.entrySet()
to get construct iterators for empty TreeMaps which eventually
accumulates up to somewhat signinificant amount of memory allocated.
Possibly an even more efficient way would be to have the Map be a closed
hashing hashmap instead and then sorting the data for iteration
separately.
gitlab #548
Change-Id: Ib2208dc35b270c9d682362d45f24f1fe01bb8969
this.updateSchedules = new ArrayList[CLUSTER_THREADS];
for(int i=0;i<clusterUpdateThreads.length;i++) {
clusterUpdateThreads[i] = Executors.newSingleThreadExecutor(new ClusterThreadFactory("Cluster Updater " + (i+1), false));
this.updateSchedules = new ArrayList[CLUSTER_THREADS];
for(int i=0;i<clusterUpdateThreads.length;i++) {
clusterUpdateThreads[i] = Executors.newSingleThreadExecutor(new ClusterThreadFactory("Cluster Updater " + (i+1), false));
- updateSchedules[i] = new ArrayList<ClusterUpdateOperation>();
+ updateSchedules[i] = new ArrayList<>();
+ TreeMap<ClusterUID, List<ClusterUpdateOperation>> updates = new TreeMap<>(clusterComparator);
+
- TreeMap<ClusterUID, List<ClusterUpdateOperation>> updates = new TreeMap<ClusterUID, List<ClusterUpdateOperation>>(clusterComparator);
-
+ if (!updates.isEmpty())
+ updates.clear();
operationQueue.pumpUpdates(updates);
if(updates.isEmpty()) {
operationQueue.pumpUpdates(updates);
if(updates.isEmpty()) {
for(int i=0;i<CLUSTER_THREADS;i++)
updateSchedules[i].clear();
for(int i=0;i<CLUSTER_THREADS;i++)
updateSchedules[i].clear();
+ if (updates.isEmpty())
+ return;
+
final Semaphore s = new Semaphore(0);
for(Map.Entry<ClusterUID, List<ClusterUpdateOperation>> entry : updates.entrySet()) {
final Semaphore s = new Semaphore(0);
for(Map.Entry<ClusterUID, List<ClusterUpdateOperation>> entry : updates.entrySet()) {