1 package net.jpountz.lz4;
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 import java.io.FilterOutputStream;
18 import java.io.IOException;
19 import java.io.OutputStream;
20 import java.util.zip.Checksum;
22 import net.jpountz.util.SafeUtils;
23 import net.jpountz.xxhash.StreamingXXHash32;
24 import net.jpountz.xxhash.XXHashFactory;
29 * This class compresses data into fixed-size blocks of compressed data.
30 * @see LZ4BlockInputStream
32 public final class LZ4BlockOutputStream extends FilterOutputStream {
34 static final byte[] MAGIC = new byte[] { 'L', 'Z', '4', 'B', 'l', 'o', 'c', 'k' };
35 static final int MAGIC_LENGTH = MAGIC.length;
37 static final int HEADER_LENGTH =
38 MAGIC_LENGTH // magic bytes
40 + 4 // compressed length
41 + 4 // decompressed length
44 static final int COMPRESSION_LEVEL_BASE = 10;
45 static final int MIN_BLOCK_SIZE = 64;
46 static final int MAX_BLOCK_SIZE = 1 << (COMPRESSION_LEVEL_BASE + 0x0F);
48 static final int COMPRESSION_METHOD_RAW = 0x10;
49 static final int COMPRESSION_METHOD_LZ4 = 0x20;
51 static final int DEFAULT_SEED = 0x9747b28c;
53 private static int compressionLevel(int blockSize) {
54 if (blockSize < MIN_BLOCK_SIZE) {
55 throw new IllegalArgumentException("blockSize must be >= " + MIN_BLOCK_SIZE + ", got " + blockSize);
56 } else if (blockSize > MAX_BLOCK_SIZE) {
57 throw new IllegalArgumentException("blockSize must be <= " + MAX_BLOCK_SIZE + ", got " + blockSize);
59 int compressionLevel = 32 - Integer.numberOfLeadingZeros(blockSize - 1); // ceil of log2
60 assert (1 << compressionLevel) >= blockSize;
61 assert blockSize * 2 > (1 << compressionLevel);
62 compressionLevel = Math.max(0, compressionLevel - COMPRESSION_LEVEL_BASE);
63 assert compressionLevel >= 0 && compressionLevel <= 0x0F;
64 return compressionLevel;
67 private final int blockSize;
68 private final int compressionLevel;
69 private final LZ4Compressor compressor;
70 private final Checksum checksum;
71 private final byte[] buffer;
72 private final byte[] compressedBuffer;
73 private final boolean syncFlush;
74 private boolean finished;
78 * Create a new {@link OutputStream} with configurable block size. Large
79 * blocks require more memory at compression and decompression time but
80 * should improve the compression ratio.
82 * @param out the {@link OutputStream} to feed
83 * @param blockSize the maximum number of bytes to try to compress at once,
84 * must be >= 64 and <= 32 M
85 * @param compressor the {@link LZ4Compressor} instance to use to compress
87 * @param checksum the {@link Checksum} instance to use to check data for
89 * @param syncFlush true if pending data should also be flushed on {@link #flush()}
91 public LZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor, Checksum checksum, boolean syncFlush) {
93 this.blockSize = blockSize;
94 this.compressor = compressor;
95 this.checksum = checksum;
96 this.compressionLevel = compressionLevel(blockSize);
97 this.buffer = new byte[blockSize];
98 final int compressedBlockSize = HEADER_LENGTH + compressor.maxCompressedLength(blockSize);
99 this.compressedBuffer = new byte[compressedBlockSize];
100 this.syncFlush = syncFlush;
103 System.arraycopy(MAGIC, 0, compressedBuffer, 0, MAGIC_LENGTH);
107 * Create a new instance which checks stream integrity using
108 * {@link StreamingXXHash32} and doesn't sync flush.
109 * @see #LZ4BlockOutputStream(OutputStream, int, LZ4Compressor, Checksum, boolean)
110 * @see StreamingXXHash32#asChecksum()
112 public LZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor) {
113 this(out, blockSize, compressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum(), false);
117 * Create a new instance which compresses with the standard LZ4 compression
119 * @see #LZ4BlockOutputStream(OutputStream, int, LZ4Compressor)
120 * @see LZ4Factory#fastCompressor()
122 public LZ4BlockOutputStream(OutputStream out, int blockSize) {
123 this(out, blockSize, LZ4Factory.fastestInstance().fastCompressor());
127 * Create a new instance which compresses into blocks of 64 KB.
128 * @see #LZ4BlockOutputStream(OutputStream, int)
130 public LZ4BlockOutputStream(OutputStream out) {
134 private void ensureNotFinished() {
136 throw new IllegalStateException("This stream is already closed");
141 public void write(int b) throws IOException {
143 if (o == blockSize) {
146 buffer[o++] = (byte) b;
150 public void write(byte[] b, int off, int len) throws IOException {
151 SafeUtils.checkRange(b, off, len);
154 while (o + len > blockSize) {
155 final int l = blockSize - o;
156 System.arraycopy(b, off, buffer, o, blockSize - o);
162 System.arraycopy(b, off, buffer, o, len);
167 public void write(byte[] b) throws IOException {
169 write(b, 0, b.length);
173 public void close() throws IOException {
183 private void flushBufferedData() throws IOException {
188 checksum.update(buffer, 0, o);
189 final int check = (int) checksum.getValue();
190 int compressedLength = compressor.compress(buffer, 0, o, compressedBuffer, HEADER_LENGTH);
191 final int compressMethod;
192 if (compressedLength >= o) {
193 compressMethod = COMPRESSION_METHOD_RAW;
194 compressedLength = o;
195 System.arraycopy(buffer, 0, compressedBuffer, HEADER_LENGTH, o);
197 compressMethod = COMPRESSION_METHOD_LZ4;
200 compressedBuffer[MAGIC_LENGTH] = (byte) (compressMethod | compressionLevel);
201 writeIntLE(compressedLength, compressedBuffer, MAGIC_LENGTH + 1);
202 writeIntLE(o, compressedBuffer, MAGIC_LENGTH + 5);
203 writeIntLE(check, compressedBuffer, MAGIC_LENGTH + 9);
204 assert MAGIC_LENGTH + 13 == HEADER_LENGTH;
205 out.write(compressedBuffer, 0, HEADER_LENGTH + compressedLength);
210 * Flush this compressed {@link OutputStream}.
212 * If the stream has been created with <code>syncFlush=true</code>, pending
213 * data will be compressed and appended to the underlying {@link OutputStream}
214 * before calling {@link OutputStream#flush()} on the underlying stream.
215 * Otherwise, this method just flushes the underlying stream, so pending
216 * data might not be available for reading until {@link #finish()} or
217 * {@link #close()} is called.
220 public void flush() throws IOException {
230 * Same as {@link #close()} except that it doesn't close the underlying stream.
231 * This can be useful if you want to keep on using the underlying stream.
233 public void finish() throws IOException {
236 compressedBuffer[MAGIC_LENGTH] = (byte) (COMPRESSION_METHOD_RAW | compressionLevel);
237 writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 1);
238 writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 5);
239 writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 9);
240 assert MAGIC_LENGTH + 13 == HEADER_LENGTH;
241 out.write(compressedBuffer, 0, HEADER_LENGTH);
246 private static void writeIntLE(int i, byte[] buf, int off) {
247 buf[off++] = (byte) i;
248 buf[off++] = (byte) (i >>> 8);
249 buf[off++] = (byte) (i >>> 16);
250 buf[off++] = (byte) (i >>> 24);
254 public String toString() {
255 return getClass().getSimpleName() + "(out=" + out + ", blockSize=" + blockSize
256 + ", compressor=" + compressor + ", checksum=" + checksum + ")";