2 LZ4 - Fast LZ compression algorithm
\r
3 Copyright (C) 2011-2012, Yann Collet.
\r
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
\r
6 Redistribution and use in source and binary forms, with or without
\r
7 modification, are permitted provided that the following conditions are
\r
10 * Redistributions of source code must retain the above copyright
\r
11 notice, this list of conditions and the following disclaimer.
\r
12 * Redistributions in binary form must reproduce the above
\r
13 copyright notice, this list of conditions and the following disclaimer
\r
14 in the documentation and/or other materials provided with the
\r
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
\r
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
\r
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
\r
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
\r
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
\r
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
\r
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
\r
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
\r
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
\r
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
\r
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\r
29 You can contact the author at :
\r
30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
\r
31 - LZ4 source repository : http://code.google.com/p/lz4/
\r
34 //**************************************
\r
35 // Tuning parameters
\r
36 //**************************************
\r
37 // COMPRESSIONLEVEL :
\r
38 // Increasing this value improves compression ratio
\r
39 // Lowering this value reduces memory usage
\r
40 // Reduced memory usage typically improves speed, due to cache effect (ex : L1 32KB for Intel, L1 64KB for AMD)
\r
41 // Memory usage formula : N->2^(N+2) Bytes (examples : 12 -> 16KB ; 17 -> 512KB)
\r
42 #define COMPRESSIONLEVEL 12
\r
44 // NOTCOMPRESSIBLE_CONFIRMATION :
\r
45 // Decreasing this value will make the algorithm skip faster data segments considered "incompressible"
\r
46 // This may decrease compression ratio dramatically, but will be faster on incompressible data
\r
47 // Increasing this value will make the algorithm search more before declaring a segment "incompressible"
\r
48 // This could improve compression a bit, but will be slower on incompressible data
\r
49 // The default value (6) is recommended
\r
50 #define NOTCOMPRESSIBLE_CONFIRMATION 6
\r
52 // LZ4_COMPRESSMIN :
\r
53 // Compression function will *fail* if it is not successful at compressing input by at least LZ4_COMPRESSMIN bytes
\r
54 // Since the compression function stops working prematurely, it results in a speed gain
\r
55 // The output however is unusable. Compression function result will be zero.
\r
56 // Default : 0 = disabled
\r
57 #define LZ4_COMPRESSMIN 0
\r
59 // BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
\r
60 // This will provide a boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.
\r
61 // You can set this option to 1 in situations where data will stay within closed environment
\r
62 // This option is useless on Little_Endian CPU (such as x86)
\r
63 //#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
\r
67 //**************************************
\r
68 // CPU Feature Detection
\r
69 //**************************************
\r
71 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode
\r
72 # define LZ4_ARCH64 1
\r
74 # define LZ4_ARCH64 0
\r
77 // Little Endian or Big Endian ?
\r
78 // Note : overwrite the below #define if you know your architecture endianess
\r
79 #if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )
\r
80 # define LZ4_BIG_ENDIAN 1
\r
82 // Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
\r
85 // Unaligned memory access is automatically enabled for "common" CPU, such as x86.
\r
86 // For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
\r
87 // If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance
\r
88 #if defined(__ARM_FEATURE_UNALIGNED)
\r
89 # define LZ4_FORCE_UNALIGNED_ACCESS 1
\r
92 // Define this parameter if your target system or compiler does not support hardware bit count
\r
93 #if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
\r
94 # define LZ4_FORCE_SW_BITCOUNT
\r
98 //**************************************
\r
100 //**************************************
\r
101 #if __STDC_VERSION__ >= 199901L // C99
\r
102 /* "restrict" is a known keyword */
\r
104 # define restrict // Disable restrict
\r
107 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
\r
109 #ifdef _MSC_VER // Visual Studio
\r
110 # define inline __forceinline // Visual is not C99, but supports some kind of inline
\r
111 # if LZ4_ARCH64 // 64-bit
\r
112 # pragma intrinsic(_BitScanForward64) // For Visual 2005
\r
113 # pragma intrinsic(_BitScanReverse64) // For Visual 2005
\r
115 # pragma intrinsic(_BitScanForward) // For Visual 2005
\r
116 # pragma intrinsic(_BitScanReverse) // For Visual 2005
\r
121 # define lz4_bswap16(x) _byteswap_ushort(x)
\r
123 # define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
\r
126 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
\r
127 # define expect(expr,value) (__builtin_expect ((expr),(value)) )
\r
129 # define expect(expr,value) (expr)
\r
132 #define likely(expr) expect((expr) != 0, 1)
\r
133 #define unlikely(expr) expect((expr) != 0, 0)
\r
136 //**************************************
\r
138 //**************************************
\r
139 #include <stdlib.h> // for malloc
\r
140 #include <string.h> // for memset
\r
144 //**************************************
\r
146 //**************************************
\r
147 #if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively
\r
148 # define BYTE unsigned __int8
\r
149 # define U16 unsigned __int16
\r
150 # define U32 unsigned __int32
\r
151 # define S32 __int32
\r
152 # define U64 unsigned __int64
\r
154 # include <stdint.h>
\r
155 # define BYTE uint8_t
\r
156 # define U16 uint16_t
\r
157 # define U32 uint32_t
\r
158 # define S32 int32_t
\r
159 # define U64 uint64_t
\r
162 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
\r
163 # pragma pack(push, 1)
\r
166 typedef struct _U16_S { U16 v; } U16_S;
\r
167 typedef struct _U32_S { U32 v; } U32_S;
\r
168 typedef struct _U64_S { U64 v; } U64_S;
\r
170 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
\r
174 #define A64(x) (((U64_S *)(x))->v)
\r
175 #define A32(x) (((U32_S *)(x))->v)
\r
176 #define A16(x) (((U16_S *)(x))->v)
\r
179 //**************************************
\r
181 //**************************************
\r
184 #define HASH_LOG COMPRESSIONLEVEL
\r
185 #define HASHTABLESIZE (1 << HASH_LOG)
\r
186 #define HASH_MASK (HASHTABLESIZE - 1)
\r
188 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION>2?NOTCOMPRESSIBLE_CONFIRMATION:2)
\r
189 #define STACKLIMIT 13
\r
190 #define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).
\r
191 #define COPYLENGTH 8
\r
192 #define LASTLITERALS 5
\r
193 #define MFLIMIT (COPYLENGTH+MINMATCH)
\r
194 #define MINLENGTH (MFLIMIT+1)
\r
196 #define MAXD_LOG 16
\r
197 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
\r
200 #define ML_MASK ((1U<<ML_BITS)-1)
\r
201 #define RUN_BITS (8-ML_BITS)
\r
202 #define RUN_MASK ((1U<<RUN_BITS)-1)
\r
205 //**************************************
\r
206 // Architecture-specific macros
\r
207 //**************************************
\r
208 #if LZ4_ARCH64 // 64-bit
\r
209 # define STEPSIZE 8
\r
212 # define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
\r
213 # define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
\r
214 # define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)
\r
216 # define INITBASE(base) const BYTE* const base = ip
\r
218 # define STEPSIZE 4
\r
221 # define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
\r
222 # define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
\r
223 # define LZ4_SECURECOPY LZ4_WILDCOPY
\r
224 # define HTYPE const BYTE*
\r
225 # define INITBASE(base) const int base = 0
\r
228 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
\r
229 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
\r
230 # define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
\r
231 #else // Little Endian
\r
232 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
\r
233 # define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
\r
237 //**************************************
\r
238 // Local structures
\r
239 //**************************************
\r
242 HTYPE hashTable[HASHTABLESIZE];
\r
246 //**************************************
\r
248 //**************************************
\r
249 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
\r
250 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
\r
251 #define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
\r
252 #define LZ4_BLINDCOPY(s,d,l) { BYTE* e=(d)+l; LZ4_WILDCOPY(s,d,e); d=e; }
\r
255 //****************************
\r
256 // Private functions
\r
257 //****************************
\r
260 inline static int LZ4_NbCommonBytes (register U64 val)
\r
262 #if defined(LZ4_BIG_ENDIAN)
\r
263 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
264 unsigned long r = 0;
\r
265 _BitScanReverse64( &r, val );
\r
266 return (int)(r>>3);
\r
267 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
268 return (__builtin_clzll(val) >> 3);
\r
271 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
\r
272 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
\r
277 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
278 unsigned long r = 0;
\r
279 _BitScanForward64( &r, val );
\r
280 return (int)(r>>3);
\r
281 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
282 return (__builtin_ctzll(val) >> 3);
\r
284 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
\r
285 return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];
\r
292 inline static int LZ4_NbCommonBytes (register U32 val)
\r
294 #if defined(LZ4_BIG_ENDIAN)
\r
295 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
296 unsigned long r = 0;
\r
297 _BitScanReverse( &r, val );
\r
298 return (int)(r>>3);
\r
299 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
300 return (__builtin_clz(val) >> 3);
\r
303 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
\r
308 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
309 unsigned long r = 0;
\r
310 _BitScanForward( &r, val );
\r
311 return (int)(r>>3);
\r
312 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
313 return (__builtin_ctz(val) >> 3);
\r
315 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
\r
316 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
\r
324 //****************************
\r
325 // Public functions
\r
326 //****************************
\r
328 int LZ4_compressBound(int isize)
\r
330 return (isize + (isize/255) + 16);
\r
335 //******************************
\r
336 // Compression functions
\r
337 //******************************
\r
339 int LZ4_compressCtx(void** ctx,
\r
340 const char* source,
\r
345 struct refTables *srt = (struct refTables *) (*ctx);
\r
348 HTYPE HashTable[HASHTABLESIZE] = {0};
\r
351 const BYTE* ip = (BYTE*) source;
\r
353 const BYTE* anchor = ip;
\r
354 const BYTE* const iend = ip + isize;
\r
355 const BYTE* const mflimit = iend - MFLIMIT;
\r
356 #define matchlimit (iend - LASTLITERALS)
\r
358 BYTE* op = (BYTE*) dest;
\r
361 const int skipStrength = SKIPSTRENGTH;
\r
366 if (isize<MINLENGTH) goto _last_literals;
\r
370 srt = (struct refTables *) malloc ( sizeof(struct refTables) );
\r
371 *ctx = (void*) srt;
\r
373 HashTable = (HTYPE*)(srt->hashTable);
\r
374 memset((void*)HashTable, 0, sizeof(srt->hashTable));
\r
381 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
\r
382 ip++; forwardH = LZ4_HASH_VALUE(ip);
\r
387 int findMatchAttempts = (1U << skipStrength) + 3;
\r
388 const BYTE* forwardIp = ip;
\r
395 int step = findMatchAttempts++ >> skipStrength;
\r
397 forwardIp = ip + step;
\r
399 if unlikely(forwardIp > mflimit) { goto _last_literals; }
\r
401 forwardH = LZ4_HASH_VALUE(forwardIp);
\r
402 ref = base + HashTable[h];
\r
403 HashTable[h] = ip - base;
\r
405 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
\r
408 while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; }
\r
410 // Encode Literal length
\r
411 length = ip - anchor;
\r
413 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
\r
414 else *token = (length<<ML_BITS);
\r
417 LZ4_BLINDCOPY(anchor, op, length);
\r
421 LZ4_WRITE_LITTLEENDIAN_16(op,ip-ref);
\r
424 ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
\r
426 while likely(ip<matchlimit-(STEPSIZE-1))
\r
428 UARCH diff = AARCH(ref) ^ AARCH(ip);
\r
429 if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
\r
430 ip += LZ4_NbCommonBytes(diff);
\r
433 if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
\r
434 if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
\r
435 if ((ip<matchlimit) && (*ref == *ip)) ip++;
\r
438 // Encode MatchLength
\r
439 len = (ip - anchor);
\r
440 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
\r
441 else *token += len;
\r
443 // Test end of chunk
\r
444 if (ip > mflimit) { anchor = ip; break; }
\r
447 HashTable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base;
\r
449 // Test next position
\r
450 ref = base + HashTable[LZ4_HASH_VALUE(ip)];
\r
451 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
\r
452 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
\r
454 // Prepare next loop
\r
456 forwardH = LZ4_HASH_VALUE(ip);
\r
460 // Encode Last Literals
\r
462 int lastRun = iend - anchor;
\r
463 if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0;
\r
464 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
\r
465 else *op++ = (lastRun<<ML_BITS);
\r
466 memcpy(op, anchor, iend - anchor);
\r
471 return (int) (((char*)op)-dest);
\r
476 // Note : this function is valid only if isize < LZ4_64KLIMIT
\r
477 #define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
\r
478 #define HASHLOG64K (HASH_LOG+1)
\r
479 #define HASH64KTABLESIZE (1U<<HASHLOG64K)
\r
480 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))
\r
481 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
\r
482 int LZ4_compress64kCtx(void** ctx,
\r
483 const char* source,
\r
488 struct refTables *srt = (struct refTables *) (*ctx);
\r
491 U16 HashTable[HASH64KTABLESIZE] = {0};
\r
494 const BYTE* ip = (BYTE*) source;
\r
495 const BYTE* anchor = ip;
\r
496 const BYTE* const base = ip;
\r
497 const BYTE* const iend = ip + isize;
\r
498 const BYTE* const mflimit = iend - MFLIMIT;
\r
499 #define matchlimit (iend - LASTLITERALS)
\r
501 BYTE* op = (BYTE*) dest;
\r
504 const int skipStrength = SKIPSTRENGTH;
\r
509 if (isize<MINLENGTH) goto _last_literals;
\r
513 srt = (struct refTables *) malloc ( sizeof(struct refTables) );
\r
514 *ctx = (void*) srt;
\r
516 HashTable = (U16*)(srt->hashTable);
\r
517 memset((void*)HashTable, 0, sizeof(srt->hashTable));
\r
524 ip++; forwardH = LZ4_HASH64K_VALUE(ip);
\r
529 int findMatchAttempts = (1U << skipStrength) + 3;
\r
530 const BYTE* forwardIp = ip;
\r
537 int step = findMatchAttempts++ >> skipStrength;
\r
539 forwardIp = ip + step;
\r
541 if (forwardIp > mflimit) { goto _last_literals; }
\r
543 forwardH = LZ4_HASH64K_VALUE(forwardIp);
\r
544 ref = base + HashTable[h];
\r
545 HashTable[h] = ip - base;
\r
547 } while (A32(ref) != A32(ip));
\r
550 while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }
\r
552 // Encode Literal length
\r
553 length = ip - anchor;
\r
555 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
\r
556 else *token = (length<<ML_BITS);
\r
559 LZ4_BLINDCOPY(anchor, op, length);
\r
563 LZ4_WRITE_LITTLEENDIAN_16(op,ip-ref);
\r
566 ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
\r
568 while (ip<matchlimit-(STEPSIZE-1))
\r
570 UARCH diff = AARCH(ref) ^ AARCH(ip);
\r
571 if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
\r
572 ip += LZ4_NbCommonBytes(diff);
\r
575 if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
\r
576 if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
\r
577 if ((ip<matchlimit) && (*ref == *ip)) ip++;
\r
580 // Encode MatchLength
\r
581 len = (ip - anchor);
\r
582 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
\r
583 else *token += len;
\r
585 // Test end of chunk
\r
586 if (ip > mflimit) { anchor = ip; break; }
\r
589 HashTable[LZ4_HASH64K_VALUE(ip-2)] = ip - 2 - base;
\r
591 // Test next position
\r
592 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
\r
593 HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
\r
594 if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; }
\r
596 // Prepare next loop
\r
598 forwardH = LZ4_HASH64K_VALUE(ip);
\r
602 // Encode Last Literals
\r
604 int lastRun = iend - anchor;
\r
605 if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0;
\r
606 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
\r
607 else *op++ = (lastRun<<ML_BITS);
\r
608 memcpy(op, anchor, iend - anchor);
\r
613 return (int) (((char*)op)-dest);
\r
618 int LZ4_compress(const char* source,
\r
623 void* ctx = malloc(sizeof(struct refTables));
\r
625 if (isize < LZ4_64KLIMIT)
\r
626 result = LZ4_compress64kCtx(&ctx, source, dest, isize);
\r
627 else result = LZ4_compressCtx(&ctx, source, dest, isize);
\r
631 if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize);
\r
632 return LZ4_compressCtx(NULL, source, dest, isize);
\r
639 //****************************
\r
640 // Decompression functions
\r
641 //****************************
\r
643 // Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize()
\r
644 // are safe against "buffer overflow" attack type.
\r
645 // They will never write nor read outside of the provided output buffers.
\r
646 // LZ4_uncompress_unknownOutputSize() also insures that it will never read outside of the input buffer.
\r
647 // A corrupted input will produce an error result, a negative int, indicating the position of the error within input stream.
\r
649 int LZ4_uncompress(const char* source,
\r
654 const BYTE* restrict ip = (const BYTE*) source;
\r
655 const BYTE* restrict ref;
\r
657 BYTE* restrict op = (BYTE*) dest;
\r
658 BYTE* const oend = op + osize;
\r
664 size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};
\r
672 if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }
\r
676 if unlikely(cpy>oend-COPYLENGTH)
\r
678 if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
\r
679 memcpy(op, ip, length);
\r
681 break; // Necessarily EOF
\r
683 LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
\r
686 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
\r
687 if (ref < (BYTE* const)dest) goto _output_error; // Error : offset create reference outside destination buffer
\r
690 if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; }
\r
692 // copy repeated sequence
\r
693 if unlikely(op-ref<STEPSIZE)
\r
696 size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};
\r
697 size_t dec2 = dec2table[op-ref];
\r
699 const int dec2 = 0;
\r
705 ref -= dec[op-ref];
\r
706 A32(op)=A32(ref); op += STEPSIZE-4;
\r
708 } else { LZ4_COPYSTEP(ref,op); }
\r
709 cpy = op + length - (STEPSIZE-4);
\r
710 if (cpy>oend-COPYLENGTH)
\r
712 if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
\r
713 LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
\r
714 while(op<cpy) *op++=*ref++;
\r
716 if (op == oend) break; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
\r
719 LZ4_SECURECOPY(ref, op, cpy);
\r
720 op=cpy; // correction
\r
724 return (int) (((char*)ip)-source);
\r
726 // write overflow error detected
\r
728 return (int) (-(((char*)ip)-source));
\r
732 int LZ4_uncompress_unknownOutputSize(
\r
733 const char* source,
\r
739 const BYTE* restrict ip = (const BYTE*) source;
\r
740 const BYTE* const iend = ip + isize;
\r
741 const BYTE* restrict ref;
\r
743 BYTE* restrict op = (BYTE*) dest;
\r
744 BYTE* const oend = op + maxOutputSize;
\r
747 size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};
\r
758 if ((length=(token>>ML_BITS)) == RUN_MASK) { int s=255; while ((ip<iend) && (s==255)) { s=*ip++; length += s; } }
\r
762 if ((cpy>oend-COPYLENGTH) || (ip+length>iend-COPYLENGTH))
\r
764 if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
\r
765 if (ip+length > iend) goto _output_error; // Error : request to read beyond source buffer
\r
766 memcpy(op, ip, length);
\r
769 if (ip<iend) goto _output_error; // Error : LZ4 format violation
\r
770 break; // Necessarily EOF, due to parsing restrictions
\r
772 LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
\r
775 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
\r
776 if (ref < (BYTE* const)dest) goto _output_error; // Error : offset creates reference outside of destination buffer
\r
779 if ((length=(token&ML_MASK)) == ML_MASK) { while (ip<iend) { int s = *ip++; length +=s; if (s==255) continue; break; } }
\r
781 // copy repeated sequence
\r
782 if unlikely(op-ref<STEPSIZE)
\r
785 size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};
\r
786 size_t dec2 = dec2table[op-ref];
\r
788 const int dec2 = 0;
\r
794 ref -= dec[op-ref];
\r
795 A32(op)=A32(ref); op += STEPSIZE-4;
\r
797 } else { LZ4_COPYSTEP(ref,op); }
\r
798 cpy = op + length - (STEPSIZE-4);
\r
799 if (cpy>oend-COPYLENGTH)
\r
801 if (cpy > oend) goto _output_error; // Error : request to write outside of destination buffer
\r
802 LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
\r
803 while(op<cpy) *op++=*ref++;
\r
805 if (op == oend) break; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
\r
808 LZ4_SECURECOPY(ref, op, cpy);
\r
809 op=cpy; // correction
\r
813 return (int) (((char*)op)-dest);
\r
815 // write overflow error detected
\r
817 return (int) (-(((char*)ip)-source));
\r