Update to latest LZ4.

Update a couple of comments.
This commit is contained in:
Moinak Ghosh 2012-12-11 11:38:42 +05:30
parent 224fb529e9
commit 03840b31c5
5 changed files with 564 additions and 565 deletions

239
lz4/lz4.c
View file

@ -34,31 +34,24 @@
//**************************************
// Tuning parameters
//**************************************
// COMPRESSIONLEVEL :
// Increasing this value improves compression ratio
// Lowering this value reduces memory usage
// Reduced memory usage typically improves speed, due to cache effect (ex : L1 32KB for Intel, L1 64KB for AMD)
// Memory usage formula : N->2^(N+2) Bytes (examples : 12 -> 16KB ; 17 -> 512KB)
#define COMPRESSIONLEVEL 12
// MEMORY_USAGE :
// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
// Increasing memory usage improves compression ratio
// Reduced memory usage can improve speed, due to cache effect
// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
#define MEMORY_USAGE 14
// NOTCOMPRESSIBLE_CONFIRMATION :
// NOTCOMPRESSIBLE_DETECTIONLEVEL :
// Decreasing this value will make the algorithm skip faster data segments considered "incompressible"
// This may decrease compression ratio dramatically, but will be faster on incompressible data
// Increasing this value will make the algorithm search more before declaring a segment "incompressible"
// This could improve compression a bit, but will be slower on incompressible data
// The default value (6) is recommended
#define NOTCOMPRESSIBLE_CONFIRMATION 6
// LZ4_COMPRESSMIN :
// Compression function will *fail* if it is not successful at compressing input by at least LZ4_COMPRESSMIN bytes
// Since the compression function stops working prematurely, it results in a speed gain
// The output however is unusable. Compression function result will be zero.
// Default : 0 = disabled
#define LZ4_COMPRESSMIN 0
#define NOTCOMPRESSIBLE_DETECTIONLEVEL 6
// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
// This will provide a boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.
// You can set this option to 1 in situations where data will stay within closed environment
// This will provide a small boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.
// You can set this option to 1 in situations where data will remain within closed environment
// This option is useless on Little_Endian CPU (such as x86)
//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
@ -107,7 +100,7 @@
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#ifdef _MSC_VER // Visual Studio
# define inline __forceinline // Visual is not C99, but supports some kind of inline
# include <intrin.h> // For Visual 2005
# if LZ4_ARCH64 // 64-bit
# pragma intrinsic(_BitScanForward64) // For Visual 2005
# pragma intrinsic(_BitScanReverse64) // For Visual 2005
@ -181,11 +174,11 @@ typedef struct _U64_S { U64 v; } U64_S;
//**************************************
#define MINMATCH 4
#define HASH_LOG COMPRESSIONLEVEL
#define HASH_LOG (MEMORY_USAGE-2)
#define HASHTABLESIZE (1 << HASH_LOG)
#define HASH_MASK (HASHTABLESIZE - 1)
#define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION>2?NOTCOMPRESSIBLE_CONFIRMATION:2)
#define SKIPSTRENGTH (NOTCOMPRESSIBLE_DETECTIONLEVEL>2?NOTCOMPRESSIBLE_DETECTIONLEVEL:2)
#define STACKLIMIT 13
#define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).
#define COPYLENGTH 8
@ -257,7 +250,7 @@ struct refTables
//****************************
#if LZ4_ARCH64
inline static int LZ4_NbCommonBytes (register U64 val)
static inline int LZ4_NbCommonBytes (register U64 val)
{
#if defined(LZ4_BIG_ENDIAN)
#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
@ -289,7 +282,7 @@ inline static int LZ4_NbCommonBytes (register U64 val)
#else
inline static int LZ4_NbCommonBytes (register U32 val)
static inline int LZ4_NbCommonBytes (register U32 val)
{
#if defined(LZ4_BIG_ENDIAN)
#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
@ -321,25 +314,22 @@ inline static int LZ4_NbCommonBytes (register U32 val)
#endif
//****************************
// Public functions
//****************************
int LZ4_compressBound(int isize)
{
return (isize + (isize/255) + 16);
}
//******************************
// Compression functions
//******************************
int LZ4_compressCtx(void** ctx,
// LZ4_compressCtx :
// -----------------
// Compress 'isize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
// If it cannot achieve it, compression will stop, and result of the function will be zero.
// return : the number of bytes written in buffer 'dest', or 0 if the compression fails
static inline int LZ4_compressCtx(void** ctx,
const char* source,
char* dest,
int isize)
int isize,
int maxOutputSize)
{
#if HEAPMODE
struct refTables *srt = (struct refTables *) (*ctx);
@ -356,6 +346,7 @@ int LZ4_compressCtx(void** ctx,
#define matchlimit (iend - LASTLITERALS)
BYTE* op = (BYTE*) dest;
BYTE* const oend = op + maxOutputSize;
int len, length;
const int skipStrength = SKIPSTRENGTH;
@ -408,17 +399,37 @@ int LZ4_compressCtx(void** ctx,
while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; }
// Encode Literal length
length = ip - anchor;
length = (int)(ip - anchor);
token = op++;
if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) > oend) return 0; // Check output limit
#ifdef _MSC_VER
if (length>=(int)RUN_MASK)
{
int len = length-RUN_MASK;
*token=(RUN_MASK<<ML_BITS);
if (len>254)
{
do { *op++ = 255; len -= 255; } while (len>254);
*op++ = (BYTE)len;
memcpy(op, anchor, length);
op += length;
goto _next_match;
}
else
*op++ = (BYTE)len;
}
else *token = (length<<ML_BITS);
#else
if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
else *token = (length<<ML_BITS);
#endif
// Copy Literals
LZ4_BLINDCOPY(anchor, op, length);
_next_match:
// Encode Offset
LZ4_WRITE_LITTLEENDIAN_16(op,ip-ref);
LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
// Start Counting
ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
@ -436,7 +447,8 @@ _next_match:
_endCount:
// Encode MatchLength
len = (ip - anchor);
len = (int)(ip - anchor);
if unlikely(op + (1 + LASTLITERALS) + (len>>8) > oend) return 0; // Check output limit
if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
else *token += len;
@ -459,8 +471,8 @@ _endCount:
_last_literals:
// Encode Last Literals
{
int lastRun = iend - anchor;
if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0;
int lastRun = (int)(iend - anchor);
if (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) return 0;
if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
else *op++ = (lastRun<<ML_BITS);
memcpy(op, anchor, iend - anchor);
@ -479,10 +491,11 @@ _last_literals:
#define HASH64KTABLESIZE (1U<<HASHLOG64K)
#define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))
#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
int LZ4_compress64kCtx(void** ctx,
static inline int LZ4_compress64kCtx(void** ctx,
const char* source,
char* dest,
int isize)
int isize,
int maxOutputSize)
{
#if HEAPMODE
struct refTables *srt = (struct refTables *) (*ctx);
@ -499,6 +512,7 @@ int LZ4_compress64kCtx(void** ctx,
#define matchlimit (iend - LASTLITERALS)
BYTE* op = (BYTE*) dest;
BYTE* const oend = op + maxOutputSize;
int len, length;
const int skipStrength = SKIPSTRENGTH;
@ -542,7 +556,7 @@ int LZ4_compress64kCtx(void** ctx,
forwardH = LZ4_HASH64K_VALUE(forwardIp);
ref = base + HashTable[h];
HashTable[h] = ip - base;
HashTable[h] = (U16)(ip - base);
} while (A32(ref) != A32(ip));
@ -550,17 +564,37 @@ int LZ4_compress64kCtx(void** ctx,
while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }
// Encode Literal length
length = ip - anchor;
length = (int)(ip - anchor);
token = op++;
if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) > oend) return 0; // Check output limit
#ifdef _MSC_VER
if (length>=(int)RUN_MASK)
{
int len = length-RUN_MASK;
*token=(RUN_MASK<<ML_BITS);
if (len>254)
{
do { *op++ = 255; len -= 255; } while (len>254);
*op++ = (BYTE)len;
memcpy(op, anchor, length);
op += length;
goto _next_match;
}
else
*op++ = (BYTE)len;
}
else *token = (length<<ML_BITS);
#else
if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
else *token = (length<<ML_BITS);
#endif
// Copy Literals
LZ4_BLINDCOPY(anchor, op, length);
_next_match:
// Encode Offset
LZ4_WRITE_LITTLEENDIAN_16(op,ip-ref);
LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
// Start Counting
ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
@ -578,7 +612,8 @@ _next_match:
_endCount:
// Encode MatchLength
len = (ip - anchor);
len = (int)(ip - anchor);
if unlikely(op + (1 + LASTLITERALS) + (len>>8) > oend) return 0; // Check output limit
if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
else *token += len;
@ -586,11 +621,11 @@ _endCount:
if (ip > mflimit) { anchor = ip; break; }
// Fill table
HashTable[LZ4_HASH64K_VALUE(ip-2)] = ip - 2 - base;
HashTable[LZ4_HASH64K_VALUE(ip-2)] = (U16)(ip - 2 - base);
// Test next position
ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
HashTable[LZ4_HASH64K_VALUE(ip)] = (U16)(ip - base);
if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; }
// Prepare next loop
@ -601,8 +636,8 @@ _endCount:
_last_literals:
// Encode Last Literals
{
int lastRun = iend - anchor;
if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0;
int lastRun = (int)(iend - anchor);
if (op + lastRun + 1 + (lastRun-RUN_MASK+255)/255 > oend) return 0;
if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
else *op++ = (lastRun<<ML_BITS);
memcpy(op, anchor, iend - anchor);
@ -614,26 +649,34 @@ _last_literals:
}
int LZ4_compress(const char* source,
int LZ4_compress_limitedOutput(const char* source,
char* dest,
int isize)
int isize,
int maxOutputSize)
{
#if HEAPMODE
void* ctx = malloc(sizeof(struct refTables));
int result;
if (isize < LZ4_64KLIMIT)
result = LZ4_compress64kCtx(&ctx, source, dest, isize);
else result = LZ4_compressCtx(&ctx, source, dest, isize);
result = LZ4_compress64kCtx(&ctx, source, dest, isize, maxOutputSize);
else result = LZ4_compressCtx(&ctx, source, dest, isize, maxOutputSize);
free(ctx);
return result;
#else
if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize);
return LZ4_compressCtx(NULL, source, dest, isize);
if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize, maxOutputSize);
return LZ4_compressCtx(NULL, source, dest, isize, maxOutputSize);
#endif
}
int LZ4_compress(const char* source,
char* dest,
int isize)
{
return LZ4_compress_limitedOutput(source, dest, isize, LZ4_compressBound(isize));
}
//****************************
@ -652,16 +695,19 @@ int LZ4_uncompress(const char* source,
{
// Local Variables
const BYTE* restrict ip = (const BYTE*) source;
const BYTE* restrict ref;
const BYTE* ref;
BYTE* restrict op = (BYTE*) dest;
BYTE* op = (BYTE*) dest;
BYTE* const oend = op + osize;
BYTE* cpy;
BYTE token;
unsigned token;
int len, length;
size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};
size_t length;
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
// Main Loop
@ -669,42 +715,41 @@ int LZ4_uncompress(const char* source,
{
// get runlength
token = *ip++;
if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }
if ((length=(token>>ML_BITS)) == RUN_MASK) { size_t len; for (;(len=*ip++)==255;length+=255){} length += len; }
// copy literals
cpy = op+length;
if unlikely(cpy>oend-COPYLENGTH)
{
if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
if (cpy != oend) goto _output_error; // Error : we must necessarily stand at EOF
memcpy(op, ip, length);
ip += length;
break; // Necessarily EOF
break; // EOF
}
LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
// get offset
LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
if (ref < (BYTE* const)dest) goto _output_error; // Error : offset create reference outside destination buffer
if unlikely(ref < (BYTE* const)dest) goto _output_error; // Error : offset create reference outside destination buffer
// get matchlength
if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; }
// copy repeated sequence
if unlikely(op-ref<STEPSIZE)
if unlikely((op-ref)<STEPSIZE)
{
#if LZ4_ARCH64
size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};
size_t dec2 = dec2table[op-ref];
size_t dec64 = dec64table[op-ref];
#else
const int dec2 = 0;
const int dec64 = 0;
#endif
*op++ = *ref++;
*op++ = *ref++;
*op++ = *ref++;
*op++ = *ref++;
ref -= dec[op-ref];
A32(op)=A32(ref); op += STEPSIZE-4;
ref -= dec2;
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4, ref += 4; ref -= dec32table[op-ref];
A32(op) = A32(ref);
op += STEPSIZE-4; ref -= dec64;
} else { LZ4_COPYSTEP(ref,op); }
cpy = op + length - (STEPSIZE-4);
if (cpy>oend-COPYLENGTH)
@ -713,7 +758,7 @@ int LZ4_uncompress(const char* source,
LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
while(op<cpy) *op++=*ref++;
op=cpy;
if (op == oend) break; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
if (op == oend) goto _output_error; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
@ -738,20 +783,23 @@ int LZ4_uncompress_unknownOutputSize(
// Local Variables
const BYTE* restrict ip = (const BYTE*) source;
const BYTE* const iend = ip + isize;
const BYTE* restrict ref;
const BYTE* ref;
BYTE* restrict op = (BYTE*) dest;
BYTE* op = (BYTE*) dest;
BYTE* const oend = op + maxOutputSize;
BYTE* cpy;
size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
// Main Loop
while (ip<iend)
{
BYTE token;
int length;
unsigned token;
size_t length;
// get runlength
token = *ip++;
@ -761,12 +809,10 @@ int LZ4_uncompress_unknownOutputSize(
cpy = op+length;
if ((cpy>oend-COPYLENGTH) || (ip+length>iend-COPYLENGTH))
{
if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
if (ip+length > iend) goto _output_error; // Error : request to read beyond source buffer
if (cpy > oend) goto _output_error; // Error : writes beyond output buffer
if (ip+length != iend) goto _output_error; // Error : LZ4 format requires to consume all input at this stage
memcpy(op, ip, length);
op += length;
ip += length;
if (ip<iend) goto _output_error; // Error : LZ4 format violation
break; // Necessarily EOF, due to parsing restrictions
}
LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
@ -782,18 +828,17 @@ int LZ4_uncompress_unknownOutputSize(
if unlikely(op-ref<STEPSIZE)
{
#if LZ4_ARCH64
size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};
size_t dec2 = dec2table[op-ref];
size_t dec64 = dec64table[op-ref];
#else
const int dec2 = 0;
const int dec64 = 0;
#endif
*op++ = *ref++;
*op++ = *ref++;
*op++ = *ref++;
*op++ = *ref++;
ref -= dec[op-ref];
A32(op)=A32(ref); op += STEPSIZE-4;
ref -= dec2;
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4, ref += 4; ref -= dec32table[op-ref];
A32(op) = A32(ref);
op += STEPSIZE-4; ref -= dec64;
} else { LZ4_COPYSTEP(ref,op); }
cpy = op + length - (STEPSIZE-4);
if (cpy>oend-COPYLENGTH)
@ -802,7 +847,7 @@ int LZ4_uncompress_unknownOutputSize(
LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
while(op<cpy) *op++=*ref++;
op=cpy;
if (op == oend) break; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
if (op == oend) goto _output_error; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
continue;
}
LZ4_SECURECOPY(ref, op, cpy);

View file

@ -31,28 +31,6 @@
- LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- LZ4 source repository : http://code.google.com/p/lz4/
*/
/*
* This file is a part of Pcompress, a chunked parallel multi-
* algorithm lossless compression and decompression program.
*
* Copyright (C) 2012 Moinak Ghosh. All rights reserved.
* Use is subject to license terms.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* moinakg@belenix.org, http://moinakg.wordpress.com/
*
*/
#pragma once
#if defined (__cplusplus)
@ -60,6 +38,14 @@ extern "C" {
#endif
//**************************************
// Compiler Options
//**************************************
#ifdef _MSC_VER // Visual Studio
# define inline __inline // Visual is not C99, but supports some kind of inline
#endif
//****************************
// Simple Functions
//****************************
@ -69,19 +55,22 @@ int LZ4_uncompress (const char* source, char* dest, int osize);
/*
LZ4_compress() :
Compresses 'isize' bytes from 'source' into 'dest'.
Destination buffer must be already allocated,
and must be sized to handle worst cases situations (input data not compressible)
Worst case size evaluation is provided by function LZ4_compressBound()
isize : is the input size. Max supported value is ~1.9GB
return : the number of bytes written in buffer dest
or 0 if the compression fails (if LZ4_COMPRESSMIN is set)
note : destination buffer must be already allocated.
destination buffer must be sized to handle worst cases situations (input data not compressible)
worst case size evaluation is provided by function LZ4_compressBound()
LZ4_uncompress() :
osize : is the output size, therefore the original size
return : the number of bytes read in the source buffer
If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction
This function never writes beyond dest + osize, and is therefore protected against malicious data packets
note : destination buffer must be already allocated
This function never writes outside of provided buffers, and never modifies input buffer.
note : destination buffer must be already allocated.
its size must be a minimum of 'osize' bytes.
*/
@ -89,12 +78,15 @@ LZ4_uncompress() :
// Advanced Functions
//****************************
int LZ4_compressBound(int isize);
static inline int LZ4_compressBound(int isize) { return ((isize) + ((isize)/255) + 16); }
#define LZ4_COMPRESSBOUND( isize) ((isize) + ((isize)/255) + 16)
/*
LZ4_compressBound() :
Provides the maximum size that LZ4 may output in a "worst case" scenario (input data not compressible)
primarily useful for memory allocation of output buffer.
inline function is recommended for the general case,
but macro is also provided when results need to be evaluated at compile time (such as table size allocation).
isize : is the input size. Max supported value is ~1.9GB
return : maximum output size in a "worst case" scenario
@ -102,6 +94,21 @@ LZ4_compressBound() :
*/
int LZ4_compress_limitedOutput (const char* source, char* dest, int isize, int maxOutputSize);
/*
LZ4_compress_limitedOutput() :
Compress 'isize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
If it cannot achieve it, compression will stop, and result of the function will be zero.
This function never writes outside of provided output buffer.
isize : is the input size. Max supported value is ~1.9GB
maxOutputSize : is the size of the destination buffer (which must be already allocated)
return : the number of bytes written in buffer 'dest'
or 0 if the compression fails
*/
int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
/*
@ -116,27 +123,6 @@ LZ4_uncompress_unknownOutputSize() :
*/
int LZ4_compressCtx(void** ctx, const char* source, char* dest, int isize);
int LZ4_compress64kCtx(void** ctx, const char* source, char* dest, int isize);
/*
LZ4_compressCtx() :
This function explicitly handles the CTX memory structure.
It avoids allocating/deallocating memory between each call, improving performance when malloc is heavily invoked.
This function is only useful when memory is allocated into the heap (HASH_LOG value beyond STACK_LIMIT)
Performance difference will be noticeable only when repetitively calling the compression function over many small segments.
Note : by default, memory is allocated into the stack, therefore "malloc" is not invoked.
LZ4_compress64kCtx() :
Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
isize *Must* be <64KB, otherwise the output will be corrupted.
On first call : provide a *ctx=NULL; It will be automatically allocated.
On next calls : reuse the same ctx pointer.
Use different pointers for different threads when doing multi-threading.
*/
#if defined (__cplusplus)
}
#endif

View file

@ -31,26 +31,6 @@
- LZ4 source repository : http://code.google.com/p/lz4/
*/
/*
* This file is a part of Pcompress, a chunked parallel multi-
* algorithm lossless compression and decompression program.
*
* Copyright (C) 2012 Moinak Ghosh. All rights reserved.
* Use is subject to license terms.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* moinakg@belenix.org, http://moinakg.wordpress.com/
*
*/
//**************************************
// CPU Feature Detection
@ -88,12 +68,20 @@
#ifdef _MSC_VER
#define inline __forceinline // Visual is not C99, but supports some kind of inline
#include <intrin.h> // For Visual 2005
# if LZ4_ARCH64 // 64-bit
# pragma intrinsic(_BitScanForward64) // For Visual 2005
# pragma intrinsic(_BitScanReverse64) // For Visual 2005
# else
# pragma intrinsic(_BitScanForward) // For Visual 2005
# pragma intrinsic(_BitScanReverse) // For Visual 2005
# endif
#endif
#ifdef _MSC_VER // Visual Studio
#define bswap16(x) _byteswap_ushort(x)
#define lz4_bswap16(x) _byteswap_ushort(x)
#else
#define bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
#define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
#endif
@ -197,8 +185,8 @@ typedef struct _U64_S { U64 v; } U64_S;
#endif
#if defined(LZ4_BIG_ENDIAN)
#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = bswap16(v); d = (s) - v; }
#define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = bswap16(v); A16(p) = v; p+=2; }
#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
#define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
#else // Little Endian
#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
#define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
@ -352,7 +340,7 @@ inline static int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const
// HC4 match finder
LZ4HC_Insert(hc4, ip);
ref = HASH_POINTER(ip);
while ((ref > (ip-MAX_DISTANCE)) && (nbAttempts))
while ((ref >= (ip-MAX_DISTANCE)) && (nbAttempts))
{
nbAttempts--;
if (*(ref+ml) == *(ip+ml))
@ -384,7 +372,7 @@ inline static int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const
if ((ipt<matchlimit) && (*reft == *ipt)) ipt++;
_endCount:
if (ipt-ip > ml) { ml = ipt-ip; *matchpos = ref; }
if (ipt-ip > ml) { ml = (int)(ipt-ip); *matchpos = ref; }
}
ref = GETNEXT(ref);
}
@ -400,13 +388,13 @@ inline static int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const
INITBASE(base,hc4->base);
const BYTE* ref;
int nbAttempts = MAX_NB_ATTEMPTS;
int delta = ip-startLimit;
int delta = (int)(ip-startLimit);
// First Match
LZ4HC_Insert(hc4, ip);
ref = HASH_POINTER(ip);
while ((ref > ip-MAX_DISTANCE) && (ref >= hc4->base) && (nbAttempts))
while ((ref >= ip-MAX_DISTANCE) && (ref >= hc4->base) && (nbAttempts))
{
nbAttempts--;
if (*(startLimit + longest) == *(ref - delta + longest))
@ -444,7 +432,7 @@ _endCount:
if ((ipt-startt) > longest)
{
longest = ipt-startt;
longest = (int)(ipt-startt);
*matchpos = reft;
*startpos = startt;
}
@ -462,7 +450,7 @@ inline static int LZ4_encodeSequence(const BYTE** ip, BYTE** op, const BYTE** an
BYTE* token;
// Encode Literal length
length = *ip - *anchor;
length = (int)(*ip - *anchor);
token = (*op)++;
if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; }
else *token = (length<<ML_BITS);
@ -471,7 +459,7 @@ inline static int LZ4_encodeSequence(const BYTE** ip, BYTE** op, const BYTE** an
LZ4_BLINDCOPY(*anchor, *op, length);
// Encode Offset
LZ4_WRITE_LITTLEENDIAN_16(*op,*ip-ref);
LZ4_WRITE_LITTLEENDIAN_16(*op,(U16)(*ip-ref));
// Encode MatchLength
len = (int)(ml-MINMATCH);
@ -564,8 +552,8 @@ _Search3:
int correction;
int new_ml = ml;
if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = start2 - ip + ml2 - MINMATCH;
correction = new_ml - (start2 - ip);
if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
correction = new_ml - (int)(start2 - ip);
if (correction > 0)
{
start2 += correction;
@ -588,8 +576,8 @@ _Search3:
{
int correction;
if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
if (ip+ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH;
correction = ml - (start2 - ip);
if (ip+ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
correction = ml - (int)(start2 - ip);
if (correction > 0)
{
start2 += correction;
@ -599,7 +587,7 @@ _Search3:
}
else
{
ml = start2 - ip;
ml = (int)(start2 - ip);
}
}
// Now, encode 2 sequences
@ -615,7 +603,7 @@ _Search3:
{
if (start2 < ip+ml)
{
int correction = (ip+ml) - start2;
int correction = (int)(ip+ml - start2);
start2 += correction;
ref2 += correction;
ml2 -= correction;
@ -652,8 +640,8 @@ _Search3:
{
int correction;
if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
if (ip + ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH;
correction = ml - (start2 - ip);
if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
correction = ml - (int)(start2 - ip);
if (correction > 0)
{
start2 += correction;
@ -663,7 +651,7 @@ _Search3:
}
else
{
ml = start2 - ip;
ml = (int)(start2 - ip);
}
}
LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);
@ -682,7 +670,7 @@ _Search3:
// Encode Last Literals
{
int lastRun = iend - anchor;
int lastRun = (int)(iend - anchor);
if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
else *op++ = (lastRun<<ML_BITS);
memcpy(op, anchor, iend - anchor);

View file

@ -31,28 +31,6 @@
- LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- LZ4 source repository : http://code.google.com/p/lz4/
*/
/*
* This file is a part of Pcompress, a chunked parallel multi-
* algorithm lossless compression and decompression program.
*
* Copyright (C) 2012 Moinak Ghosh. All rights reserved.
* Use is subject to license terms.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* moinakg@belenix.org, http://moinakg.wordpress.com/
*
*/
#pragma once

12
main.c
View file

@ -208,6 +208,7 @@ preproc_compress(compress_func_ptr cmp_func, void *src, uint64_t srclen, void *d
} else {
/*
* Execution won't come here but just in case ...
* Even Delta2 encoding below enables LZP.
*/
fprintf(stderr, "Invalid preprocessing mode\n");
return (-1);
@ -541,11 +542,12 @@ cont:
*
* Chunk Flags, 8 bits:
* I I I I I I I I
* | | | | |
* | '-----' | `- 0 - Uncompressed
* | | | 1 - Compressed
* | | |
* | | `---- 1 - Chunk was Deduped
* | | | | | |
* | '-----' | | `- 0 - Uncompressed
* | | | | 1 - Compressed
* | | | |
* | | | `---- 1 - Chunk was Deduped
* | | `------- 1 - Chunk was pre-compressed
* | |
* | | 1 - Bzip2 (Adaptive Mode)
* | `---------------- 2 - Lzma (Adaptive Mode)