package org.simantics.acorn.lru; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import org.simantics.db.service.Bytes; import org.simantics.utils.datastructures.Pair; import gnu.trove.list.array.TByteArrayList; public class ChangeSetInfo extends LRUObject { private byte[] metadataBytes; private ArrayList clusterChangeSetIds; // Stub public ChangeSetInfo(LRU LRU, Path readDir, Long revision, int offset, int length) { super(LRU, revision, readDir, "clusterStream", offset, length, false, false); LRU.map(this); } // New public ChangeSetInfo(LRU LRU, Long revision, byte[] bytes, ArrayList clusterChangeSetIds) { super(LRU, revision, LRU.getDirectory(), "clusterStream", true, true); this.metadataBytes = bytes; this.metadataBytes = bytes; this.clusterChangeSetIds = clusterChangeSetIds; LRU.insert(this, accessTime); } public ArrayList getCSSIds() { if(VERIFY) verifyAccess(); return clusterChangeSetIds; } public byte[] getMetadataBytes() { if(VERIFY) verifyAccess(); makeResident(); return metadataBytes; } private static void writeLE(TByteArrayList bytes, int value) { bytes.add( (byte) (value & 0xFF)); bytes.add((byte) ((value >>> 8) & 0xFF)); bytes.add((byte) ((value >>> 16) & 0xFF)); bytes.add((byte) ((value >>> 24) & 0xFF)); } @Override protected Pair toBytes() { TByteArrayList result = new TByteArrayList(); writeLE(result, metadataBytes.length); result.add(metadataBytes); writeLE(result, clusterChangeSetIds.size()); for(String id : clusterChangeSetIds) { byte[] bb = id.getBytes(); writeLE(result, bb.length); result.add(bb); } release(); byte[] ret = result.toArray(); return Pair.make(ret, ret.length); } @Override void release() { clusterChangeSetIds = null; metadataBytes = null; } @Override public void fromFile(byte[] data) { clusterChangeSetIds = new ArrayList(); int metadataLength = Bytes.readLE4(data, 0); metadataBytes = Arrays.copyOfRange(data, 4, 4+metadataLength); int offset = 4+metadataLength; int numberOfChangeSets = Bytes.readLE4(data, offset); offset += 4; for(int i=0;i