From owner-svn-src-all@freebsd.org Thu Aug 8 15:30:51 2019
Return-Path: zstd 1.4.0 Manual
+zstd 1.4.1 Manual
Contents
Introduction
@@ -78,12 +68,8 @@
unsigned ZSTD_versionNumber(void); /**< to check runtime library version */
-Default constant
+Simple API
-Constants
-
-Simple API
-
size_t ZSTD_compress( void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
@@ -152,12 +138,17 @@ const char* ZSTD_getErrorName(size_t code); /*
int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */
int ZSTD_maxCLevel(void); /*!< maximum compression level available */
-Explicit context
+Explicit context
Compression context
When compressing many times,
- it is recommended to allocate a context just once, and re-use it for each successive compression operation.
+ it is recommended to allocate a context just once,
+ and re-use it for each successive compression operation.
This will make workload friendlier for system's memory.
- Use one context per thread for parallel execution in multi-threaded environments.
+ Note : re-using context is just a speed / resource optimization.
+ It doesn't change the compression ratio, which remains identical.
+ Note 2 : In multi-threaded environments,
+ use one different context per thread for parallel execution.
+
typedef struct ZSTD_CCtx_s ZSTD_CCtx;
ZSTD_CCtx* ZSTD_createCCtx(void);
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
@@ -189,7 +180,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
typedef enum { ZSTD_fast=1, ZSTD_dfast=2, @@ -332,6 +323,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); * ZSTD_c_forceMaxWindow * ZSTD_c_forceAttachDict * ZSTD_c_literalCompressionMode + * ZSTD_c_targetCBlockSize * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; * also, the enums values themselves are unstable and can still change. @@ -341,6 +333,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); ZSTD_c_experimentalParam3=1000, ZSTD_c_experimentalParam4=1001, ZSTD_c_experimentalParam5=1002, + ZSTD_c_experimentalParam6=1003, } ZSTD_cParameter;
typedef struct { @@ -424,7 +417,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
typedef enum { @@ -472,7 +465,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
typedef struct ZSTD_inBuffer_s { const void* src; /**< start of input buffer */ @@ -486,7 +479,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ } ZSTD_outBuffer;
+Streaming compression - HowTo
A ZSTD_CStream object is required to track streaming operation. Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources. ZSTD_CStream objects can be reused multiple times on consecutive compression operations. @@ -592,31 +585,28 @@ size_t ZSTD_freeCStream(ZSTD_CStream* zcs);size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */
-size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */ +size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
-This is a legacy streaming API, and can be replaced by ZSTD_CCtx_reset() and
ZSTD_compressStream2(). It is redundent, but is still fully supported. - Advanced parameters and dictionary compression can only be used through the - new API. -- -Equivalent to:
+size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); +/*! + * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). + * NOTE: The return value is different. ZSTD_compressStream() returns a hint for + * the next read size (if non-zero and not an error). ZSTD_compressStream2() + * returns the minimum nb of bytes left to flush (if non-zero and not an error). + */ +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input); +/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */ +size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); +/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */ +size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); ++ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); -
-Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
NOTE: The return value is different. ZSTD_compressStream() returns a hint for - the next read size (if non-zero and not an error). ZSTD_compressStream2() - returns the number of bytes left to flush (if non-zero and not an error). - -- -Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush).
- -Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end).
- -Streaming decompression - HowTo
+/**< Always emit uncompressed literals. */ } ZSTD_literalCompressionMode_e;Streaming decompression - HowTo
A ZSTD_DStream object is required to track streaming operations. Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources. ZSTD_DStream objects can be re-used multiple times. @@ -647,14 +637,12 @@ size_t ZSTD_freeCStream(ZSTD_CStream* zcs);ZSTD_DStream management functions
ZSTD_DStream* ZSTD_createDStream(void); size_t ZSTD_freeDStream(ZSTD_DStream* zds);
-Streaming decompression functions
size_t ZSTD_initDStream(ZSTD_DStream* zds); -size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); -
+Streaming decompression functions
size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
-Simple dictionary API
+Simple dictionary API
size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, @@ -680,7 +668,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_o Note : When `dict == NULL || dictSize < 8` no dictionary is used.
-Bulk processing dictionary API
+Bulk processing dictionary API
ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, int compressionLevel); @@ -723,7 +711,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_o Recommended when same dictionary is used multiple times.
-Dictionary helper functions
+Dictionary helper functions
unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);Provides the dictID stored within dictionary. @@ -749,7 +737,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_o When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code.
-Advanced dictionary and prefix API
+Advanced dictionary and prefix API
This API allows dictionaries to be used with ZSTD_compress2(), ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and only reset with the context is reset with ZSTD_reset_parameters or @@ -867,15 +855,7 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); Note that object memory usage can evolve (increase or decrease) over time.
-ADVANCED AND EXPERIMENTAL FUNCTIONS
- The definitions in the following section are considered experimental. - They are provided for advanced scenarios. - They should never be used with a dynamic library, as prototypes may change in the future. - Use them only in association with static linking. - -- -experimental API (static linking only)
+experimental API (static linking only)
The following symbols and constants are not planned to join "stable API" status in the near future. They can still change in future versions. @@ -973,7 +953,7 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); ZSTD_lcm_uncompressed = 2,
-Frame size functions
+Frame size functions
unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);`src` should point to the start of a series of ZSTD encoded and/or skippable frames @@ -998,7 +978,8 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); however it does mean that all frame data must be present and valid.
-ZSTD_decompressBound() :
`src` should point to the start of a series of ZSTD encoded and/or skippable frames +unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); ++`src` should point to the start of a series of ZSTD encoded and/or skippable frames `srcSize` must be the _exact_ size of this series (i.e. there should be a frame boundary at `src + srcSize`) @return : - upper-bound for the decompressed size of all data in all successive frames @@ -1010,7 +991,7 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by: upper-bound = # blocks * min(128 KB, Window_Size) -
size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. @@ -1018,7 +999,7 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); or an error code (if srcSize is too small)
-Memory management
+Memory management
size_t ZSTD_estimateCCtxSize(int compressionLevel); size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); @@ -1098,7 +1079,7 @@ static ZSTD_customMem const ZSTD_defaultCMem = { NULL,
-Advanced compression functions
+Advanced compression functions
ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);Create a digested dictionary for compression @@ -1243,7 +1224,7 @@ size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
-Advanced decompression functions
+Advanced decompression functions
unsigned ZSTD_isFrame(const void* buffer, size_t size);Tells if the content of `buffer` starts with a valid Frame Identifier. @@ -1305,7 +1286,7 @@ size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
-Advanced streaming functions
Warning : most of these functions are now redundant with the Advanced API. +Advanced streaming functions
Warning : most of these functions are now redundant with the Advanced API. Once Advanced API reaches "stable" status, redundant functions will be deprecated, and then at some point removed.@@ -1407,18 +1388,41 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStre
-Advanced Streaming decompression functions
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);/**< note: no dictionary will be used if dict == NULL or dictSize < 8 */ -size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /**< note : ddict is referenced, it must outlive decompression session */ -size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompression parameters from previous init; saves dictionary loading */ +Advanced Streaming decompression functions
/** + * This function is deprecated, and is equivalent to: + * + * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); + * ZSTD_DCtx_loadDictionary(zds, dict, dictSize); + * + * note: no dictionary will be used if dict == NULL or dictSize < 8 + */ +size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); +/** + * This function is deprecated, and is equivalent to: + * + * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); + * ZSTD_DCtx_refDDict(zds, ddict); + * + * note : ddict is referenced, it must outlive decompression session + */ +size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); +/** + * This function is deprecated, and is equivalent to: + * + * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); + * + * re-use decompression parameters from previous init; saves dictionary loading + */ +size_t ZSTD_resetDStream(ZSTD_DStream* zds);
-Buffer-less and synchronous inner streaming functions
+Buffer-less and synchronous inner streaming functions
This is an advanced API, giving full control over buffer management, for users which need direct control over memory. But it's also a complex one, with several restrictions, documented below. Prefer normal streaming API for an easier experience.-Buffer-less streaming compression (synchronous mode)
+Buffer-less streaming compression (synchronous mode)
A ZSTD_CCtx object is required to track streaming operations. Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. ZSTD_CCtx object can be re-used multiple times within successive compression operations. @@ -1454,7 +1458,7 @@ size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
-Buffer-less streaming decompression (synchronous mode)
+Buffer-less streaming decompression (synchronous mode)
A ZSTD_DCtx object is required to track streaming operations. Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. A ZSTD_DCtx object can be re-used multiple times. @@ -1536,23 +1540,21 @@ typedef struct { unsigned checksumFlag; } ZSTD_frameHeader;
-ZSTD_getFrameHeader() :
decode Frame Header, or requires larger `srcSize`. +size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ +/*! ZSTD_getFrameHeader_advanced() : + * same as ZSTD_getFrameHeader(), + * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ +size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); +size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ +- -decode Frame Header, or requires larger `srcSize`. @return : 0, `zfhPtr` is correctly filled, >0, `srcSize` is too small, value is wanted `srcSize` amount, or an error code, which can be tested using ZSTD_isError() -
size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ -
-size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); -size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ -same as ZSTD_getFrameHeader(), - with added capability to select a format (like ZSTD_f_zstd1_magicless)
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
-Block level API
+Block level API
Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes). User will have to take in charge required information to regenerate data, such as compressed and content sizes. Modified: vendor/zstd/dist/examples/Makefile ============================================================================== --- vendor/zstd/dist/examples/Makefile Thu Aug 8 15:11:37 2019 (r350751) +++ vendor/zstd/dist/examples/Makefile Thu Aug 8 15:30:49 2019 (r350752) @@ -77,7 +77,6 @@ test: all @echo -- Edge cases detection ! ./streaming_decompression tmp # invalid input, must fail ! ./simple_decompression tmp # invalid input, must fail - ! ./simple_decompression tmp.zst # unknown input size, must fail touch tmpNull # create 0-size file ./simple_compression tmpNull ./simple_decompression tmpNull.zst # 0-size frame : must work Modified: vendor/zstd/dist/lib/Makefile ============================================================================== --- vendor/zstd/dist/lib/Makefile Thu Aug 8 15:11:37 2019 (r350751) +++ vendor/zstd/dist/lib/Makefile Thu Aug 8 15:30:49 2019 (r350752) @@ -17,6 +17,7 @@ LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT)) LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT)) LIBVER := $(shell echo $(LIBVER_SCRIPT)) VERSION?= $(LIBVER) +CCVER := $(shell $(CC) --version) CPPFLAGS+= -I. -I./common -DXXH_NAMESPACE=ZSTD_ ifeq ($(OS),Windows_NT) # MinGW assumed @@ -44,6 +45,10 @@ ZSTDDECOMP_FILES := $(sort $(wildcard decompress/*.c)) ZDICT_FILES := $(sort $(wildcard dictBuilder/*.c)) ZDEPR_FILES := $(sort $(wildcard deprecated/*.c)) ZSTD_FILES := $(ZSTDCOMMON_FILES) + +ifeq ($(findstring GCC,$(CCVER)),GCC) +decompress/zstd_decompress_block.o : CFLAGS+=-fno-tree-vectorize +endif ZSTD_LEGACY_SUPPORT ?= 5 ZSTD_LIB_COMPRESSION ?= 1 Modified: vendor/zstd/dist/lib/common/compiler.h ============================================================================== --- vendor/zstd/dist/lib/common/compiler.h Thu Aug 8 15:11:37 2019 (r350751) +++ vendor/zstd/dist/lib/common/compiler.h Thu Aug 8 15:30:49 2019 (r350752) @@ -127,6 +127,13 @@ } \ } +/* vectorization */ +#if !defined(__clang__) && defined(__GNUC__) +# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize"))) +#else +# define DONT_VECTORIZE +#endif + /* disable warnings */ #ifdef _MSC_VER /* Visual Studio */ # include
/* For Visual 2005 */ Modified: vendor/zstd/dist/lib/common/zstd_internal.h ============================================================================== --- vendor/zstd/dist/lib/common/zstd_internal.h Thu Aug 8 15:11:37 2019 (r350751) +++ vendor/zstd/dist/lib/common/zstd_internal.h Thu Aug 8 15:30:49 2019 (r350752) @@ -34,7 +34,6 @@ #endif #include "xxhash.h" /* XXH_reset, update, digest */ - #if defined (__cplusplus) extern "C" { #endif @@ -193,19 +192,72 @@ static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG * Shared functions to include for inlining *********************************************/ static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); } + #define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; } +static void ZSTD_copy16(void* dst, const void* src) { memcpy(dst, src, 16); } +#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; } +#define WILDCOPY_OVERLENGTH 8 +#define VECLEN 16 + +typedef enum { + ZSTD_no_overlap, + ZSTD_overlap_src_before_dst, + /* ZSTD_overlap_dst_before_src, */ +} ZSTD_overlap_e; + /*! ZSTD_wildcopy() : * custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */ -#define WILDCOPY_OVERLENGTH 8 -MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length) +MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE +void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype) { + ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src; const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; BYTE* const oend = op + length; - do - COPY8(op, ip) - while (op < oend); + + assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8)); + if (length < VECLEN || (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN)) { + do + COPY8(op, ip) + while (op < oend); + } + else { + if ((length & 8) == 0) + COPY8(op, ip); + do { + COPY16(op, ip); + } + while (op < oend); + } +} + +/*! ZSTD_wildcopy_16min() : + * same semantics as ZSTD_wilcopy() except guaranteed to be able to copy 16 bytes at the start */ +MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE +void ZSTD_wildcopy_16min(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype) +{ + ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src; + const BYTE* ip = (const BYTE*)src; + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + length; + + assert(length >= 8); + assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8)); + + if (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN) { + do + COPY8(op, ip) + while (op < oend); + } + else { + if ((length & 8) == 0) + COPY8(op, ip); + do { + COPY16(op, ip); + } + while (op < oend); + } } MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */ Modified: vendor/zstd/dist/lib/compress/zstd_compress.c ============================================================================== --- vendor/zstd/dist/lib/compress/zstd_compress.c Thu Aug 8 15:11:37 2019 (r350751) +++ vendor/zstd/dist/lib/compress/zstd_compress.c Thu Aug 8 15:30:49 2019 (r350752) @@ -385,6 +385,11 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter para bounds.upperBound = ZSTD_lcm_uncompressed; return bounds; + case ZSTD_c_targetCBlockSize: + bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN; + bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX; + return bounds; + default: { ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 }; return boundError; @@ -452,6 +457,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter par case ZSTD_c_ldmHashRateLog: case ZSTD_c_forceAttachDict: case ZSTD_c_literalCompressionMode: + case ZSTD_c_targetCBlockSize: default: return 0; } @@ -497,6 +503,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cP case ZSTD_c_ldmHashLog: case ZSTD_c_ldmMinMatch: case ZSTD_c_ldmBucketSizeLog: + case ZSTD_c_targetCBlockSize: break; default: RETURN_ERROR(parameter_unsupported); @@ -671,6 +678,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams->ldmParams.hashRateLog = value; return CCtxParams->ldmParams.hashRateLog; + case ZSTD_c_targetCBlockSize : + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_targetCBlockSize, value); + CCtxParams->targetCBlockSize = value; + return CCtxParams->targetCBlockSize; + default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } } @@ -692,13 +705,13 @@ size_t ZSTD_CCtxParams_getParameter( *value = CCtxParams->compressionLevel; break; case ZSTD_c_windowLog : - *value = CCtxParams->cParams.windowLog; + *value = (int)CCtxParams->cParams.windowLog; break; case ZSTD_c_hashLog : - *value = CCtxParams->cParams.hashLog; + *value = (int)CCtxParams->cParams.hashLog; break; case ZSTD_c_chainLog : - *value = CCtxParams->cParams.chainLog; + *value = (int)CCtxParams->cParams.chainLog; break; case ZSTD_c_searchLog : *value = CCtxParams->cParams.searchLog; @@ -773,6 +786,9 @@ size_t ZSTD_CCtxParams_getParameter( case ZSTD_c_ldmHashRateLog : *value = CCtxParams->ldmParams.hashRateLog; break; + case ZSTD_c_targetCBlockSize : + *value = (int)CCtxParams->targetCBlockSize; + break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return 0; @@ -930,12 +946,12 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDire @return : 0, or an error code if one value is beyond authorized range */ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) { - BOUNDCHECK(ZSTD_c_windowLog, cParams.windowLog); - BOUNDCHECK(ZSTD_c_chainLog, cParams.chainLog); - BOUNDCHECK(ZSTD_c_hashLog, cParams.hashLog); - BOUNDCHECK(ZSTD_c_searchLog, cParams.searchLog); - BOUNDCHECK(ZSTD_c_minMatch, cParams.minMatch); - BOUNDCHECK(ZSTD_c_targetLength,cParams.targetLength); + BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog); + BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog); + BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog); + BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog); + BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch); + BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); BOUNDCHECK(ZSTD_c_strategy, cParams.strategy); return 0; } @@ -951,7 +967,7 @@ ZSTD_clampCParams(ZSTD_compressionParameters cParams) if ((int)val bounds.upperBound) val=(type)bounds.upperBound; \ } -# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, int) +# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) CLAMP(ZSTD_c_windowLog, cParams.windowLog); CLAMP(ZSTD_c_chainLog, cParams.chainLog); CLAMP(ZSTD_c_hashLog, cParams.hashLog); @@ -1282,15 +1298,14 @@ static void ZSTD_reset_compressedBlockState(ZSTD_compr } /*! ZSTD_invalidateMatchState() - * Invalidate all the matches in the match finder tables. - * Requires nextSrc and base to be set (can be NULL). + * Invalidate all the matches in the match finder tables. + * Requires nextSrc and base to be set (can be NULL). */ static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) { ZSTD_window_clear(&ms->window); ms->nextToUpdate = ms->window.dictLimit; - ms->nextToUpdate3 = ms->window.dictLimit; ms->loadedDictEnd = 0; ms->opt.litLengthSum = 0; /* force reset of btopt stats */ ms->dictMatchState = NULL; @@ -1327,15 +1342,17 @@ static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_ typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e; +typedef enum { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e; + static void* ZSTD_reset_matchState(ZSTD_matchState_t* ms, void* ptr, const ZSTD_compressionParameters* cParams, - ZSTD_compResetPolicy_e const crp, U32 const forCCtx) + ZSTD_compResetPolicy_e const crp, ZSTD_resetTarget_e const forWho) { size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); size_t const hSize = ((size_t)1) << cParams->hashLog; - U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; + U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; size_t const h3Size = ((size_t)1) << hashLog3; size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); @@ -1349,7 +1366,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, ZSTD_invalidateMatchState(ms); /* opt parser space */ - if (forCCtx && (cParams->strategy >= ZSTD_btopt)) { + if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { DEBUGLOG(4, "reserving optimal parser space"); ms->opt.litFreq = (unsigned*)ptr; ms->opt.litLengthFreq = ms->opt.litFreq + (1< (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN); +} + #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */ #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 /* when workspace is continuously too large * during at least this number of times, @@ -1388,7 +1418,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, note : `params` are assumed fully validated at this stage */ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_CCtx_params params, - U64 pledgedSrcSize, + U64 const pledgedSrcSize, ZSTD_compResetPolicy_e const crp, ZSTD_buffered_policy_e const zbuff) { @@ -1400,13 +1430,21 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, if (ZSTD_equivalentParams(zc->appliedParams, params, zc->inBuffSize, zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit, - zbuff, pledgedSrcSize)) { - DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)", - zc->appliedParams.cParams.windowLog, zc->blockSize); + zbuff, pledgedSrcSize) ) { + DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> consider continue mode"); zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0); /* if it was too large, it still is */ - if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION) + if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION) { + DEBUGLOG(4, "continue mode confirmed (wLog1=%u, blockSize1=%zu)", + zc->appliedParams.cParams.windowLog, zc->blockSize); + if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) { + /* prefer a reset, faster than a rescale */ + ZSTD_reset_matchState(&zc->blockState.matchState, + zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32, + ¶ms.cParams, + crp, ZSTD_resetTarget_CCtx); + } return ZSTD_continueCCtx(zc, params, pledgedSrcSize); - } } + } } } DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx"); if (params.ldmParams.enableLdm) { @@ -1449,7 +1487,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); if (workSpaceTooSmall || workSpaceWasteful) { - DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB", + DEBUGLOG(4, "Resize workSpaceSize from %zuKB to %zuKB", zc->workSpaceSize >> 10, neededSpace >> 10); @@ -1491,7 +1529,10 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); - ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32; + ptr = ZSTD_reset_matchState(&zc->blockState.matchState, + zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32, + ¶ms.cParams, + crp, ZSTD_resetTarget_CCtx); /* ldm hash table */ /* initialize bucketOffsets table later for pointer alignment */ @@ -1509,8 +1550,6 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, } assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ - ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, ¶ms.cParams, crp, /* forCCtx */ 1); - /* sequences storage */ zc->seqStore.maxNbSeq = maxNbSeq; zc->seqStore.sequencesStart = (seqDef*)ptr; @@ -1587,15 +1626,14 @@ static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdi * handled in _enforceMaxDist */ } -static size_t ZSTD_resetCCtx_byAttachingCDict( - ZSTD_CCtx* cctx, - const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, - U64 pledgedSrcSize, - ZSTD_buffered_policy_e zbuff) +static size_t +ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, + U64 pledgedSrcSize, + ZSTD_buffered_policy_e zbuff) { - { - const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams; + { const ZSTD_compressionParameters* const cdict_cParams = &cdict->matchState.cParams; unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Resize working context table params for input only, since the dict @@ -1607,8 +1645,7 @@ static size_t ZSTD_resetCCtx_byAttachingCDict( assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); } - { - const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc + { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc - cdict->matchState.window.base); const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit; if (cdictLen == 0) { @@ -1625,9 +1662,9 @@ static size_t ZSTD_resetCCtx_byAttachingCDict( cctx->blockState.matchState.window.base + cdictEnd; ZSTD_window_clear(&cctx->blockState.matchState.window); } + /* loadedDictEnd is expressed within the referential of the active context */ cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit; - } - } + } } cctx->dictID = cdict->dictID; @@ -1681,7 +1718,6 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; - dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } @@ -1761,7 +1797,6 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCt ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; - dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } dstCCtx->dictID = srcCCtx->dictID; @@ -1831,16 +1866,15 @@ static void ZSTD_reduceTable_btlazy2(U32* const table, /*! ZSTD_reduceIndex() : * rescale all indexes to avoid future overflow (indexes are U32) */ -static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue) +static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue) { - ZSTD_matchState_t* const ms = &zc->blockState.matchState; - { U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog; + { U32 const hSize = (U32)1 << params->cParams.hashLog; ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); } - if (zc->appliedParams.cParams.strategy != ZSTD_fast) { - U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog; - if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2) + if (params->cParams.strategy != ZSTD_fast) { + U32 const chainSize = (U32)1 << params->cParams.chainLog; + if (params->cParams.strategy == ZSTD_btlazy2) ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); else ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); @@ -2524,6 +2558,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePt op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; + assert(op <= oend); if (nbSeq==0) { /* Copy the old tables over as if we repeated them */ memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); @@ -2532,6 +2567,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePt /* seqHead : flags for FSE encoding type */ seqHead = op++; + assert(op <= oend); /* convert length/distances into codes */ ZSTD_seqToCodes(seqStorePtr); @@ -2555,6 +2591,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePt if (LLtype == set_compressed) lastNCount = op; op += countSize; + assert(op <= oend); } } /* build CTable for Offsets */ { unsigned max = MaxOff; @@ -2577,6 +2614,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePt if (Offtype == set_compressed) lastNCount = op; op += countSize; + assert(op <= oend); } } /* build CTable for MatchLengths */ { unsigned max = MaxML; @@ -2597,6 +2635,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePt if (MLtype == set_compressed) lastNCount = op; op += countSize; + assert(op <= oend); } } *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); @@ -2610,6 +2649,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePt longOffsets, bmi2); FORWARD_IF_ERROR(bitstreamSize); op += bitstreamSize; + assert(op <= oend); /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() receives a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. @@ -2721,30 +2761,24 @@ void ZSTD_resetSeqStore(seqStore_t* ssPtr) ssPtr->longLengthID = 0; } -static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) +typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; + +static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) { ZSTD_matchState_t* const ms = &zc->blockState.matchState; - size_t cSize; - DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", - (unsigned)dstCapacity, (unsigned)ms->window.dictLimit, (unsigned)ms->nextToUpdate); + DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize); assert(srcSize <= ZSTD_BLOCKSIZE_MAX); *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***