]> gerrit.simantics Code Review - simantics/platform.git/blobdiff - bundles/org.simantics.db.procore/src/org/simantics/db/procore/cluster/FastLZ.java
Fixed all line endings of the repository
[simantics/platform.git] / bundles / org.simantics.db.procore / src / org / simantics / db / procore / cluster / FastLZ.java
index 460365f229643868667fc8be1bce3e7c8fe1298b..5888db74fcd82b399671178a06850ab1e5ae3360 100644 (file)
-/*******************************************************************************\r
- * Copyright (c) 2007, 2010 Association for Decentralized Information Management\r
- * in Industry THTH ry.\r
- * All rights reserved. This program and the accompanying materials\r
- * are made available under the terms of the Eclipse Public License v1.0\r
- * which accompanies this distribution, and is available at\r
- * http://www.eclipse.org/legal/epl-v10.html\r
- *\r
- * Contributors:\r
- *     VTT Technical Research Centre of Finland - initial API and implementation\r
- *******************************************************************************/\r
-package org.simantics.db.procore.cluster;\r
-\r
-import java.nio.ByteBuffer;\r
-import java.nio.ByteOrder;\r
-\r
-import org.simantics.db.exception.InternalException;\r
-\r
-public class FastLZ {\r
-    static private final int InLength = 1<<20;\r
-    \r
-       public static class DecompressStruct {\r
-               public long[] longs;\r
-               public int[] ints;\r
-               public byte[] bytes;\r
-       }\r
-       static class SourceData {\r
-               SourceData(byte[] data) {\r
-                       this.data = data;\r
-                       this.offset = 0;\r
-               }\r
-               int left() {\r
-                       return data.length - offset;\r
-               }\r
-               int readInt() {\r
-                   int inLength = 4; // sizeof(int)\r
-                   assert(left() >= inLength); // must have data for input\r
-                       ByteBuffer buffer = ByteBuffer.wrap(data, offset, inLength);\r
-                       buffer.order(ByteOrder.LITTLE_ENDIAN); // argh!\r
-                       int t = buffer.getInt();\r
-                       offset += inLength;\r
-                       return t;\r
-               }\r
-               void readBytes(byte[] bytes, int aOffset, int inLength) {\r
-                   assert(left() >= inLength); // must have data for input\r
-                       System.arraycopy(data, offset, bytes, aOffset, inLength);\r
-                       offset += inLength;\r
-               }\r
-               byte[] data;\r
-               int offset;\r
-       }\r
-       public static DecompressStruct decompress(byte[] data) throws InternalException {\r
-               assert(data.length > 12); // 3*(table size)\r
-               SourceData sourceData = new SourceData(data);\r
-               DecompressStruct struct = new DecompressStruct();\r
-               int longSize = sourceData.readInt();\r
-               int intSize = sourceData.readInt();\r
-               int byteSize = sourceData.readInt();\r
-               struct.longs = new long[longSize]; \r
-               try {\r
-                       decompress(sourceData, struct.longs);\r
-                       struct.ints = new int[intSize]; \r
-                       decompress(sourceData, struct.ints);\r
-                       struct.bytes = new byte[byteSize];\r
-                       decompress(sourceData, struct.bytes);\r
-               } catch (CompressionException e) {\r
-                       throw new InternalException("Failed to decompress.", e);\r
-               }\r
-               return struct;\r
-       }\r
-       private static void decompress(SourceData sourceData, long[] longs) throws CompressionException {\r
-               int length = longs.length * 8;\r
-               ByteBuffer bytes = ByteBuffer.allocate(length);\r
-               int size = decompressRaw(sourceData, bytes.array());\r
-               assert(size == length);\r
-               bytes.order(ByteOrder.LITTLE_ENDIAN); // argh\r
-               for (int i=0; i<longs.length; ++i)\r
-                       longs[i] = bytes.getLong();\r
-       }\r
-       private static void decompress(SourceData sourceData, int[] ints) throws CompressionException {\r
-               int length = ints.length * 4;\r
-               ByteBuffer bytes = ByteBuffer.allocate(length);\r
-               int size = decompressRaw(sourceData, bytes.array());\r
-               assert(size == length);\r
-               bytes.order(ByteOrder.LITTLE_ENDIAN); // argh\r
-               for (int i=0; i<ints.length; ++i)\r
-                       ints[i] = bytes.getInt();\r
-       }\r
-       private static void decompress(SourceData sourceData, byte[] bytes) throws CompressionException {\r
-               int size = decompressRaw(sourceData, bytes);\r
-               assert(size == bytes.length);\r
-       }\r
-       private static int decompressRaw(SourceData sourceData, byte[] bytes) throws CompressionException {\r
-               int aDstSize = bytes.length;\r
-               if (aDstSize < 1)\r
-                       return aDstSize;\r
-               int dstOffset = 0;\r
-               for (int dstSize = 0;;) {\r
-                       int dataLength = sourceData.readInt(); \r
-                       assert(dataLength <= InLength); // data is written in blocks\r
-                       if (0 == dataLength) // EOF\r
-                               return dstSize;\r
-                       dstSize += dataLength;\r
-               int inLength = sourceData.readInt();\r
-\r
-                       assert(aDstSize >= dstSize); // must have space for data\r
-                       assert(sourceData.left() >= inLength); // must have data for input\r
-                       assert(inLength > 0); // no data no block\r
-                       assert(inLength <= InLength); // input size is block size or less\r
-                       assert(inLength <= dataLength); // input size is never bigger than data size\r
-\r
-                       if (inLength < dataLength) {// block was actually compressed\r
-                               decompress(sourceData.data, sourceData.offset, inLength, bytes, dstOffset, dataLength);\r
-                               sourceData.offset += inLength;\r
-                               dstOffset += dataLength;\r
-               } else {\r
-                       sourceData.readBytes(bytes, dstOffset, inLength);\r
-                       dstOffset += inLength;\r
-               }\r
-           }\r
-       }\r
-       private static void decompress(byte[] in, int inOffset, int inLength, byte[] out, int outOffset, int outLength) throws CompressionException {\r
-           ByteBuffer inBuffer = ByteBuffer.allocateDirect(inLength);\r
-               inBuffer.put(in, inOffset, inLength);\r
-               inBuffer.flip();\r
-               ByteBuffer outBuffer = ByteBuffer.allocateDirect(outLength);\r
-               int inflateSize = org.simantics.fastlz.FastLZ.decompressBuffer(inBuffer, 0, inLength, outBuffer, 0, outLength);\r
-               if (inflateSize != outLength)\r
-                       throw new RuntimeException("Decompression error.");\r
-               //outBuffer.flip();\r
-               outBuffer.get(out, outOffset, outLength);\r
-       }\r
-}\r
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *     VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.db.procore.cluster;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import org.simantics.db.exception.InternalException;
+
+public class FastLZ {
+    static private final int InLength = 1<<20;
+    
+       public static class DecompressStruct {
+               public long[] longs;
+               public int[] ints;
+               public byte[] bytes;
+       }
+       static class SourceData {
+               SourceData(byte[] data) {
+                       this.data = data;
+                       this.offset = 0;
+               }
+               int left() {
+                       return data.length - offset;
+               }
+               int readInt() {
+                   int inLength = 4; // sizeof(int)
+                   assert(left() >= inLength); // must have data for input
+                       ByteBuffer buffer = ByteBuffer.wrap(data, offset, inLength);
+                       buffer.order(ByteOrder.LITTLE_ENDIAN); // argh!
+                       int t = buffer.getInt();
+                       offset += inLength;
+                       return t;
+               }
+               void readBytes(byte[] bytes, int aOffset, int inLength) {
+                   assert(left() >= inLength); // must have data for input
+                       System.arraycopy(data, offset, bytes, aOffset, inLength);
+                       offset += inLength;
+               }
+               byte[] data;
+               int offset;
+       }
+       public static DecompressStruct decompress(byte[] data) throws InternalException {
+               assert(data.length > 12); // 3*(table size)
+               SourceData sourceData = new SourceData(data);
+               DecompressStruct struct = new DecompressStruct();
+               int longSize = sourceData.readInt();
+               int intSize = sourceData.readInt();
+               int byteSize = sourceData.readInt();
+               struct.longs = new long[longSize]; 
+               try {
+                       decompress(sourceData, struct.longs);
+                       struct.ints = new int[intSize]; 
+                       decompress(sourceData, struct.ints);
+                       struct.bytes = new byte[byteSize];
+                       decompress(sourceData, struct.bytes);
+               } catch (CompressionException e) {
+                       throw new InternalException("Failed to decompress.", e);
+               }
+               return struct;
+       }
+       private static void decompress(SourceData sourceData, long[] longs) throws CompressionException {
+               int length = longs.length * 8;
+               ByteBuffer bytes = ByteBuffer.allocate(length);
+               int size = decompressRaw(sourceData, bytes.array());
+               assert(size == length);
+               bytes.order(ByteOrder.LITTLE_ENDIAN); // argh
+               for (int i=0; i<longs.length; ++i)
+                       longs[i] = bytes.getLong();
+       }
+       private static void decompress(SourceData sourceData, int[] ints) throws CompressionException {
+               int length = ints.length * 4;
+               ByteBuffer bytes = ByteBuffer.allocate(length);
+               int size = decompressRaw(sourceData, bytes.array());
+               assert(size == length);
+               bytes.order(ByteOrder.LITTLE_ENDIAN); // argh
+               for (int i=0; i<ints.length; ++i)
+                       ints[i] = bytes.getInt();
+       }
+       private static void decompress(SourceData sourceData, byte[] bytes) throws CompressionException {
+               int size = decompressRaw(sourceData, bytes);
+               assert(size == bytes.length);
+       }
+       private static int decompressRaw(SourceData sourceData, byte[] bytes) throws CompressionException {
+               int aDstSize = bytes.length;
+               if (aDstSize < 1)
+                       return aDstSize;
+               int dstOffset = 0;
+               for (int dstSize = 0;;) {
+                       int dataLength = sourceData.readInt(); 
+                       assert(dataLength <= InLength); // data is written in blocks
+                       if (0 == dataLength) // EOF
+                               return dstSize;
+                       dstSize += dataLength;
+               int inLength = sourceData.readInt();
+
+                       assert(aDstSize >= dstSize); // must have space for data
+                       assert(sourceData.left() >= inLength); // must have data for input
+                       assert(inLength > 0); // no data no block
+                       assert(inLength <= InLength); // input size is block size or less
+                       assert(inLength <= dataLength); // input size is never bigger than data size
+
+                       if (inLength < dataLength) {// block was actually compressed
+                               decompress(sourceData.data, sourceData.offset, inLength, bytes, dstOffset, dataLength);
+                               sourceData.offset += inLength;
+                               dstOffset += dataLength;
+               } else {
+                       sourceData.readBytes(bytes, dstOffset, inLength);
+                       dstOffset += inLength;
+               }
+           }
+       }
+       private static void decompress(byte[] in, int inOffset, int inLength, byte[] out, int outOffset, int outLength) throws CompressionException {
+           ByteBuffer inBuffer = ByteBuffer.allocateDirect(inLength);
+               inBuffer.put(in, inOffset, inLength);
+               inBuffer.flip();
+               ByteBuffer outBuffer = ByteBuffer.allocateDirect(outLength);
+               int inflateSize = org.simantics.fastlz.FastLZ.decompressBuffer(inBuffer, 0, inLength, outBuffer, 0, outLength);
+               if (inflateSize != outLength)
+                       throw new RuntimeException("Decompression error.");
+               //outBuffer.flip();
+               outBuffer.get(out, outOffset, outLength);
+       }
+}