X-Git-Url: https://gerrit.simantics.org/r/gitweb?p=simantics%2Fplatform.git;a=blobdiff_plain;f=bundles%2Forg.simantics.acorn%2Fsrc%2Forg%2Fsimantics%2Facorn%2Flru%2FClusterInfo.java;fp=bundles%2Forg.simantics.acorn%2Fsrc%2Forg%2Fsimantics%2Facorn%2Flru%2FClusterInfo.java;h=1cd5822674ab4b14518707bc071ee85a2056d3d9;hp=0000000000000000000000000000000000000000;hb=a0687ce02bac73aad9e0d7ddc85625016604f0db;hpb=be5eb160a811a04ecd6364ebb58f24e8218d3f9c diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java new file mode 100644 index 000000000..1cd582267 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java @@ -0,0 +1,346 @@ +package org.simantics.acorn.lru; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Path; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.Persistable; +import org.simantics.acorn.cluster.ClusterImpl; +import org.simantics.acorn.cluster.ClusterSmall; +import org.simantics.acorn.cluster.ClusterImpl.ClusterTables; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.compressions.CompressionCodec; +import org.simantics.compressions.Compressions; +import org.simantics.db.ClusterCreator; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.datastructures.Pair; + +public class ClusterInfo extends LRUObject implements Persistable { + + final private ClusterManager manager; + private ClusterImpl cluster; + public int changeSetId; + private ClusterUpdateState updateState; + + public static final String COMPRESSION = "LZ4"; + + // Stub + public ClusterInfo(ClusterManager manager, LRU LRU, Path readDirectory, ClusterUID uid, int offset, int length) { + super(LRU, uid, readDirectory, uid.toString() + ".cluster", offset, length, false, false); + this.manager = manager; + this.cluster = null; + LRU.map(this); + } + + // New + public ClusterInfo(ClusterManager manager, LRU LRU, ClusterImpl cluster) { + super(LRU, cluster.getClusterUID(), LRU.getDirectory(), cluster.getClusterUID().toString() + ".cluster", true, true); + this.manager = manager; + this.cluster = cluster; + LRU.insert(this, accessTime); + LRU.swap(getKey()); + } + + public T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException { + + // Updates have been ensured at this point + + acquireMutex(); + + try { + if(isResident()) { + ClusterTables tables = cluster.store(); + return creator.create(uid, tables.bytes, tables.ints, tables.longs); + } + } catch (IOException e) { + throw new DatabaseException(e); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + + // Ensure pending updates here - this may take some time + LRU.waitPending(this, false); + + acquireMutex(); + try { + + if(isResident()) { + ClusterTables tables = cluster.store(); + return creator.create(uid, tables.bytes, tables.ints, tables.longs); + } else { + byte[] data = readFile(); + ClusterTables tables = new ClusterTables(); + loadCluster(getKey(), manager.support, data, tables); + return creator.create(uid, tables.bytes, tables.ints, tables.longs); + } + + } catch (IOException e) { + throw new DatabaseException(e); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + + } + + static class ClusterDecompressor { + + byte[] decompressBuffer = new byte[1024*1024]; + + public synchronized ClusterTables readCluster(ClusterUID uid, byte[] compressed) throws IOException { + + int deflatedSize = Bytes.readLE4(compressed, compressed.length-4); + + if(decompressBuffer.length < deflatedSize) + decompressBuffer = new byte[Math.max(3*decompressBuffer.length / 2, deflatedSize)]; + + CompressionCodec codec = Compressions.get(Compressions.LZ4); + + ByteBuffer input = ByteBuffer.wrap(compressed); + ByteBuffer output = ByteBuffer.wrap(decompressBuffer); + + int decompressedSize = codec.decompressBuffer(input, 0, compressed.length-4, output, 0, decompressBuffer.length); + assert(decompressedSize <= decompressBuffer.length); + + int byteLength = Bytes.readLE4(decompressBuffer, 0); + int intLength = Bytes.readLE4(decompressBuffer, 4); + int longLength = Bytes.readLE4(decompressBuffer, 8); + + byte[] bytes = new byte[byteLength]; + int[] ints = new int[intLength]; + long[] longs = new long[longLength]; + + System.arraycopy(decompressBuffer, 12, bytes, 0, byteLength); + + int offset = 12+byteLength; + for(int i=0;i toBytes() { + + try { + + byte[] raw = null; + + if(cluster instanceof ClusterSmall) { + raw = cluster.storeBytes(); + } else { + + ClusterTables tables = cluster.store(); + + raw = new byte[12 + tables.bytes.length + (tables.ints.length<<2) + (tables.longs.length<<3)]; + + Bytes.writeLE(raw, 0, tables.bytes.length); + Bytes.writeLE(raw, 4, tables.ints.length); + Bytes.writeLE(raw, 8, tables.longs.length); + + System.arraycopy(tables.bytes, 0, raw, 12, tables.bytes.length); + int offset = 12+tables.bytes.length; + for(int i=0;i