2 LZ4 HC - High Compression Mode of LZ4
\r
3 Copyright (C) 2011-2012, Yann Collet.
\r
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
\r
6 Redistribution and use in source and binary forms, with or without
\r
7 modification, are permitted provided that the following conditions are
\r
10 * Redistributions of source code must retain the above copyright
\r
11 notice, this list of conditions and the following disclaimer.
\r
12 * Redistributions in binary form must reproduce the above
\r
13 copyright notice, this list of conditions and the following disclaimer
\r
14 in the documentation and/or other materials provided with the
\r
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
\r
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
\r
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
\r
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
\r
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
\r
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
\r
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
\r
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
\r
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
\r
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
\r
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\r
29 You can contact the author at :
\r
30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
\r
31 - LZ4 source repository : http://code.google.com/p/lz4/
\r
35 //**************************************
\r
36 // CPU Feature Detection
\r
37 //**************************************
\r
39 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode
\r
40 #define LZ4_ARCH64 1
\r
42 #define LZ4_ARCH64 0
\r
45 // Little Endian or Big Endian ?
\r
46 #if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )
\r
47 #define LZ4_BIG_ENDIAN 1
\r
49 // Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
\r
52 // Unaligned memory access is automatically enabled for "common" CPU, such as x86.
\r
53 // For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
\r
54 // If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance
\r
55 #if defined(__ARM_FEATURE_UNALIGNED)
\r
56 #define LZ4_FORCE_UNALIGNED_ACCESS 1
\r
60 //**************************************
\r
62 //**************************************
\r
63 #if __STDC_VERSION__ >= 199901L // C99
\r
64 /* "restrict" is a known keyword */
\r
66 #define restrict // Disable restrict
\r
70 #define inline __forceinline // Visual is not C99, but supports some kind of inline
\r
73 #ifdef _MSC_VER // Visual Studio
\r
74 #define bswap16(x) _byteswap_ushort(x)
\r
76 #define bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
\r
80 //**************************************
\r
82 //**************************************
\r
83 #include <stdlib.h> // calloc, free
\r
84 #include <string.h> // memset, memcpy
\r
87 #define ALLOCATOR(s) calloc(1,s)
\r
88 #define FREEMEM free
\r
89 #define MEM_INIT memset
\r
92 //**************************************
\r
94 //**************************************
\r
95 #if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively
\r
96 #define BYTE unsigned __int8
\r
97 #define U16 unsigned __int16
\r
98 #define U32 unsigned __int32
\r
100 #define U64 unsigned __int64
\r
102 #include <stdint.h>
\r
103 #define BYTE uint8_t
\r
104 #define U16 uint16_t
\r
105 #define U32 uint32_t
\r
106 #define S32 int32_t
\r
107 #define U64 uint64_t
\r
110 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
\r
111 #pragma pack(push, 1)
\r
114 typedef struct _U16_S { U16 v; } U16_S;
\r
115 typedef struct _U32_S { U32 v; } U32_S;
\r
116 typedef struct _U64_S { U64 v; } U64_S;
\r
118 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
\r
122 #define A64(x) (((U64_S *)(x))->v)
\r
123 #define A32(x) (((U32_S *)(x))->v)
\r
124 #define A16(x) (((U16_S *)(x))->v)
\r
127 //**************************************
\r
129 //**************************************
\r
132 #define DICTIONARY_LOGSIZE 16
\r
133 #define MAXD (1<<DICTIONARY_LOGSIZE)
\r
134 #define MAXD_MASK ((U32)(MAXD - 1))
\r
135 #define MAX_DISTANCE (MAXD - 1)
\r
137 #define HASH_LOG (DICTIONARY_LOGSIZE-1)
\r
138 #define HASHTABLESIZE (1 << HASH_LOG)
\r
139 #define HASH_MASK (HASHTABLESIZE - 1)
\r
141 #define MAX_NB_ATTEMPTS 256
\r
144 #define ML_MASK (size_t)((1U<<ML_BITS)-1)
\r
145 #define RUN_BITS (8-ML_BITS)
\r
146 #define RUN_MASK ((1U<<RUN_BITS)-1)
\r
148 #define COPYLENGTH 8
\r
149 #define LASTLITERALS 5
\r
150 #define MFLIMIT (COPYLENGTH+MINMATCH)
\r
151 #define MINLENGTH (MFLIMIT+1)
\r
152 #define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
\r
155 //**************************************
\r
156 // Architecture-specific macros
\r
157 //**************************************
\r
158 #if LZ4_ARCH64 // 64-bit
\r
160 #define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
\r
161 #define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
\r
165 #define INITBASE(b,s) const BYTE* const b = s
\r
168 #define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
\r
169 #define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
\r
172 #define HTYPE const BYTE*
\r
173 #define INITBASE(b,s) const int b = 0
\r
176 #if defined(LZ4_BIG_ENDIAN)
\r
177 #define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = bswap16(v); d = (s) - v; }
\r
178 #define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = bswap16(v); A16(p) = v; p+=2; }
\r
179 #else // Little Endian
\r
180 #define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
\r
181 #define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
\r
185 //************************************************************
\r
187 //************************************************************
\r
191 HTYPE hashTable[HASHTABLESIZE];
\r
192 U16 chainTable[MAXD];
\r
193 const BYTE* nextToUpdate;
\r
194 } LZ4HC_Data_Structure;
\r
197 //**************************************
\r
199 //**************************************
\r
200 #define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
\r
201 #define LZ4_BLINDCOPY(s,d,l) { BYTE* e=d+l; LZ4_WILDCOPY(s,d,e); d=e; }
\r
202 #define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
\r
203 #define HASH_VALUE(p) HASH_FUNCTION(*(U32*)(p))
\r
204 #define HASH_POINTER(p) (HashTable[HASH_VALUE(p)] + base)
\r
205 #define DELTANEXT(p) chainTable[(size_t)(p) & MAXD_MASK]
\r
206 #define GETNEXT(p) ((p) - (size_t)DELTANEXT(p))
\r
207 #define ADD_HASH(p) { size_t delta = (p) - HASH_POINTER(p); if (delta>MAX_DISTANCE) delta = MAX_DISTANCE; DELTANEXT(p) = (U16)delta; HashTable[HASH_VALUE(p)] = (p) - base; }
\r
210 //**************************************
\r
211 // Private functions
\r
212 //**************************************
\r
215 inline static int LZ4_NbCommonBytes (register U64 val)
\r
217 #if defined(LZ4_BIG_ENDIAN)
\r
218 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
219 unsigned long r = 0;
\r
220 _BitScanReverse64( &r, val );
\r
221 return (int)(r>>3);
\r
222 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
223 return (__builtin_clzll(val) >> 3);
\r
226 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
\r
227 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
\r
232 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
233 unsigned long r = 0;
\r
234 _BitScanForward64( &r, val );
\r
235 return (int)(r>>3);
\r
236 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
237 return (__builtin_ctzll(val) >> 3);
\r
239 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
\r
240 return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];
\r
247 inline static int LZ4_NbCommonBytes (register U32 val)
\r
249 #if defined(LZ4_BIG_ENDIAN)
\r
250 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
251 unsigned long r = 0;
\r
252 _BitScanReverse( &r, val );
\r
253 return (int)(r>>3);
\r
254 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
255 return (__builtin_clz(val) >> 3);
\r
258 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
\r
263 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
264 unsigned long r = 0;
\r
265 _BitScanForward( &r, val );
\r
266 return (int)(r>>3);
\r
267 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
\r
268 return (__builtin_ctz(val) >> 3);
\r
270 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
\r
271 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
\r
279 inline static int LZ4HC_Init (LZ4HC_Data_Structure* hc4, const BYTE* base)
\r
281 MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
\r
282 MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
\r
283 hc4->nextToUpdate = base + LZ4_ARCH64;
\r
289 inline static void* LZ4HC_Create (const BYTE* base)
\r
291 void* hc4 = ALLOCATOR(sizeof(LZ4HC_Data_Structure));
\r
293 LZ4HC_Init (hc4, base);
\r
298 inline static int LZ4HC_Free (void** LZ4HC_Data)
\r
300 FREEMEM(*LZ4HC_Data);
\r
301 *LZ4HC_Data = NULL;
\r
306 inline static void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const BYTE* ip)
\r
308 U16* chainTable = hc4->chainTable;
\r
309 HTYPE* HashTable = hc4->hashTable;
\r
310 INITBASE(base,hc4->base);
\r
312 while(hc4->nextToUpdate < ip)
\r
314 ADD_HASH(hc4->nextToUpdate);
\r
315 hc4->nextToUpdate++;
\r
320 inline static int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* const matchlimit, const BYTE** matchpos)
\r
322 U16* const chainTable = hc4->chainTable;
\r
323 HTYPE* const HashTable = hc4->hashTable;
\r
325 INITBASE(base,hc4->base);
\r
326 int nbAttempts=MAX_NB_ATTEMPTS;
\r
329 // HC4 match finder
\r
330 LZ4HC_Insert(hc4, ip);
\r
331 ref = HASH_POINTER(ip);
\r
332 while ((ref > (ip-MAX_DISTANCE)) && (nbAttempts))
\r
335 if (*(ref+ml) == *(ip+ml))
\r
336 if (*(U32*)ref == *(U32*)ip)
\r
338 const BYTE* reft = ref+MINMATCH;
\r
339 const BYTE* ipt = ip+MINMATCH;
\r
341 while (ipt<matchlimit-(STEPSIZE-1))
\r
343 UARCH diff = AARCH(reft) ^ AARCH(ipt);
\r
344 if (!diff) { ipt+=STEPSIZE; reft+=STEPSIZE; continue; }
\r
345 ipt += LZ4_NbCommonBytes(diff);
\r
348 if (LZ4_ARCH64) if ((ipt<(matchlimit-3)) && (A32(reft) == A32(ipt))) { ipt+=4; reft+=4; }
\r
349 if ((ipt<(matchlimit-1)) && (A16(reft) == A16(ipt))) { ipt+=2; reft+=2; }
\r
350 if ((ipt<matchlimit) && (*reft == *ipt)) ipt++;
\r
353 if (ipt-ip > ml) { ml = ipt-ip; *matchpos = ref; }
\r
355 ref = GETNEXT(ref);
\r
362 inline static int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* startLimit, const BYTE* matchlimit, int longest, const BYTE** matchpos, const BYTE** startpos)
\r
364 U16* const chainTable = hc4->chainTable;
\r
365 HTYPE* const HashTable = hc4->hashTable;
\r
366 INITBASE(base,hc4->base);
\r
368 int nbAttempts = MAX_NB_ATTEMPTS;
\r
369 int delta = ip-startLimit;
\r
372 LZ4HC_Insert(hc4, ip);
\r
373 ref = HASH_POINTER(ip);
\r
375 while ((ref > ip-MAX_DISTANCE) && (ref >= hc4->base) && (nbAttempts))
\r
378 if (*(startLimit + longest) == *(ref - delta + longest))
\r
379 if (*(U32*)ref == *(U32*)ip)
\r
381 const BYTE* reft = ref+MINMATCH;
\r
382 const BYTE* ipt = ip+MINMATCH;
\r
383 const BYTE* startt = ip;
\r
385 while (ipt<matchlimit-(STEPSIZE-1))
\r
387 UARCH diff = AARCH(reft) ^ AARCH(ipt);
\r
388 if (!diff) { ipt+=STEPSIZE; reft+=STEPSIZE; continue; }
\r
389 ipt += LZ4_NbCommonBytes(diff);
\r
392 if (LZ4_ARCH64) if ((ipt<(matchlimit-3)) && (A32(reft) == A32(ipt))) { ipt+=4; reft+=4; }
\r
393 if ((ipt<(matchlimit-1)) && (A16(reft) == A16(ipt))) { ipt+=2; reft+=2; }
\r
394 if ((ipt<matchlimit) && (*reft == *ipt)) ipt++;
\r
398 while ((startt>startLimit) && (reft > hc4->base) && (startt[-1] == reft[-1])) {startt--; reft--;}
\r
400 if ((ipt-startt) > longest)
\r
402 longest = ipt-startt;
\r
404 *startpos = startt;
\r
407 ref = GETNEXT(ref);
\r
414 inline static int LZ4_encodeSequence(const BYTE** ip, BYTE** op, const BYTE** anchor, int ml, const BYTE* ref)
\r
419 // Encode Literal length
\r
420 length = *ip - *anchor;
\r
422 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; }
\r
423 else *token = (length<<ML_BITS);
\r
426 LZ4_BLINDCOPY(*anchor, *op, length);
\r
429 LZ4_WRITE_LITTLEENDIAN_16(*op,*ip-ref);
\r
431 // Encode MatchLength
\r
432 len = (int)(ml-MINMATCH);
\r
433 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *(*op)++ = 255; *(*op)++ = 255; } if (len > 254) { len-=255; *(*op)++ = 255; } *(*op)++ = (BYTE)len; }
\r
434 else *token += len;
\r
436 // Prepare next loop
\r
444 //****************************
\r
445 // Compression CODE
\r
446 //****************************
\r
448 int LZ4_compressHCCtx(LZ4HC_Data_Structure* ctx,
\r
449 const char* source,
\r
453 const BYTE* ip = (const BYTE*) source;
\r
454 const BYTE* anchor = ip;
\r
455 const BYTE* const iend = ip + isize;
\r
456 const BYTE* const mflimit = iend - MFLIMIT;
\r
457 const BYTE* const matchlimit = (iend - LASTLITERALS);
\r
459 BYTE* op = (BYTE*) dest;
\r
461 int ml, ml2, ml3, ml0;
\r
462 const BYTE* ref=NULL;
\r
463 const BYTE* start2=NULL;
\r
464 const BYTE* ref2=NULL;
\r
465 const BYTE* start3=NULL;
\r
466 const BYTE* ref3=NULL;
\r
467 const BYTE* start0;
\r
473 while (ip < mflimit)
\r
475 ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref));
\r
476 if (!ml) { ip++; continue; }
\r
478 // saved, in case we would skip too much
\r
484 if (ip+ml < mflimit)
\r
485 ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2);
\r
488 if (ml2 == ml) // No better match
\r
490 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);
\r
496 if (start2 < ip + ml0) // empirical
\r
504 // Here, start0==ip
\r
505 if ((start2 - ip) < 3) // First Match too small : removed
\r
514 // Currently we have :
\r
516 // ip1+3 <= ip2 (usually < ip1+ml1)
\r
517 if ((start2 - ip) < OPTIMAL_ML)
\r
521 if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
\r
522 if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = start2 - ip + ml2 - MINMATCH;
\r
523 correction = new_ml - (start2 - ip);
\r
524 if (correction > 0)
\r
526 start2 += correction;
\r
527 ref2 += correction;
\r
531 // Now, we have start2 = ip+new_ml, with new_ml=min(ml, OPTIMAL_ML=18)
\r
533 if (start2 + ml2 < mflimit)
\r
534 ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3);
\r
537 if (ml3 == ml2) // No better match : 2 sequences to encode
\r
539 // ip & ref are known; Now for ml
\r
540 if (start2 < ip+ml)
\r
542 if ((start2 - ip) < OPTIMAL_ML)
\r
545 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
\r
546 if (ip+ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH;
\r
547 correction = ml - (start2 - ip);
\r
548 if (correction > 0)
\r
550 start2 += correction;
\r
551 ref2 += correction;
\r
560 // Now, encode 2 sequences
\r
561 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);
\r
563 LZ4_encodeSequence(&ip, &op, &anchor, ml2, ref2);
\r
567 if (start3 < ip+ml+3) // Not enough space for match 2 : remove it
\r
569 if (start3 >= (ip+ml)) // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1
\r
571 if (start2 < ip+ml)
\r
573 int correction = (ip+ml) - start2;
\r
574 start2 += correction;
\r
575 ref2 += correction;
\r
577 if (ml2 < MINMATCH)
\r
585 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);
\r
602 // OK, now we have 3 ascending matches; let's write at least the first one
\r
603 // ip & ref are known; Now for ml
\r
604 if (start2 < ip+ml)
\r
606 if ((start2 - ip) < (int)ML_MASK)
\r
609 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
\r
610 if (ip + ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH;
\r
611 correction = ml - (start2 - ip);
\r
612 if (correction > 0)
\r
614 start2 += correction;
\r
615 ref2 += correction;
\r
624 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);
\r
638 // Encode Last Literals
\r
640 int lastRun = iend - anchor;
\r
641 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
\r
642 else *op++ = (lastRun<<ML_BITS);
\r
643 memcpy(op, anchor, iend - anchor);
\r
648 return (int) (((char*)op)-dest);
\r
652 int LZ4_compressHC(const char* source,
\r
656 void* ctx = LZ4HC_Create((const BYTE*)source);
\r
657 int result = LZ4_compressHCCtx(ctx, source, dest, isize);
\r