ENGQNLDGWI4JFRTVOVXJYK2ZXXXDWZSWSDQORJIC65AKEIDORZOAC ** If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function* will store the DDict references in a table, and the DDict used for decompression* will be determined at decompression time, as per the dict ID in the frame.* The memory for the table is allocated on the first call to refDDict, and can be* freed with ZSTD_freeDCtx().*
/*! ZSTD_writeSkippableFrame() :* Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.** Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,* ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.* As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so* the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.** Returns an error if destination buffer is not large enough, if the source size is not representable* with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).** @return : number of bytes written or a ZSTD error.*/ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,const void* src, size_t srcSize, unsigned magicVariant);
/* ZSTD_d_refMultipleDDicts* Experimental parameter.* Default is 0 == disabled. Set to 1 to enable** If enabled and dctx is allocated on the heap, then additional memory will be allocated* to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()* using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead* store all references. At decompression time, the appropriate dictID is selected* from the set of DDicts based on the dictID in the frame.** Usage is simply calling ZSTD_refDDict() on multiple dict buffers.** Param has values of byte ZSTD_refMultipleDDicts_e** WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory* allocation for the hash table. ZSTD_freeDCtx() also frees this memory.* Memory is allocated as per ZSTD_DCtx::customMem.** Although this function allocates memory for the table, the user is still responsible for* memory management of the underlying ZSTD_DDict* themselves.*/#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
/************************************** Multiple DDicts Hashset internals **************************************/#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.* Currently, that means a 0.75 load factor.* So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded* the load factor of the ddict hash set.*/#define DDICT_HASHSET_TABLE_BASE_SIZE 64#define DDICT_HASHSET_RESIZE_FACTOR 2/* Hash function to determine starting position of dict insertion within the table* Returns an index between [0, hashSet->ddictPtrTableSize]*/static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {const U64 hash = XXH64(&dictID, sizeof(U32), 0);/* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */return hash & (hashSet->ddictPtrTableSize - 1);}/* Adds DDict to a hashset without resizing it.* If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.* Returns 0 if successful, or a zstd error code if something went wrong.*/static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {const U32 dictID = ZSTD_getDictID_fromDDict(ddict);size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);while (hashSet->ddictPtrTable[idx] != NULL) {/* Replace existing ddict if inserting ddict with same dictID */if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {DEBUGLOG(4, "DictID already exists, replacing rather than adding");hashSet->ddictPtrTable[idx] = ddict;return 0;}idx &= idxRangeMask;idx++;}DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);hashSet->ddictPtrTable[idx] = ddict;hashSet->ddictPtrCount++;return 0;}/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and* rehashes all values, allocates new table, frees old table.* Returns 0 on success, otherwise a zstd error code.*/static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;size_t oldTableSize = hashSet->ddictPtrTableSize;size_t i;DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");hashSet->ddictPtrTable = newTable;hashSet->ddictPtrTableSize = newTableSize;hashSet->ddictPtrCount = 0;for (i = 0; i < oldTableSize; ++i) {if (oldTable[i] != NULL) {FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");}}ZSTD_customFree((void*)oldTable, customMem);DEBUGLOG(4, "Finished re-hash");return 0;}/* Fetches a DDict with the given dictID* Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.*/static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);for (;;) {size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);if (currDictID == dictID || currDictID == 0) {/* currDictID == 0 implies a NULL ddict entry */break;} else {idx &= idxRangeMask; /* Goes to start of table when we reach the end */idx++;}}DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);return hashSet->ddictPtrTable[idx];}/* Allocates space for and returns a ddict hash set* The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.* Returns NULL if allocation failed.*/static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);DEBUGLOG(4, "Allocating new hash set");ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;ret->ddictPtrCount = 0;if (!ret || !ret->ddictPtrTable) {return NULL;}return ret;}/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.* Note: The ZSTD_DDict* within the table are NOT freed.*/static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {DEBUGLOG(4, "Freeing ddict hash set");if (hashSet && hashSet->ddictPtrTable) {ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);}if (hashSet) {ZSTD_customFree(hashSet, customMem);}}
/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.* Returns 0 on success, or a ZSTD error.*/static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");}FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");return 0;}
}/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on* the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then* accordingly sets the ddict to be used to decompress the frame.** If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.** ZSTD_d_refMultipleDDicts must be enabled for this function to be called.*/static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {assert(dctx->refMultipleDDicts && dctx->ddictSet);DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");if (dctx->ddict) {const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);if (frameDDict) {DEBUGLOG(4, "DDict found!");ZSTD_clearDict(dctx);dctx->dictID = dctx->fParams.dictID;dctx->ddict = frameDDict;dctx->dictUses = ZSTD_use_indefinitely;}}
static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming){#if ZSTD_TRACEif (dctx->traceCtx) {ZSTD_Trace trace;ZSTD_memset(&trace, 0, sizeof(trace));trace.version = ZSTD_VERSION_NUMBER;trace.streaming = streaming;if (dctx->ddict) {trace.dictionaryID = ZSTD_getDictID_fromDDict(dctx->ddict);trace.dictionarySize = ZSTD_DDict_dictSize(dctx->ddict);trace.dictionaryIsCold = dctx->ddictIsCold;}trace.uncompressedSize = (size_t)uncompressedSize;trace.compressedSize = (size_t)compressedSize;trace.dctx = dctx;ZSTD_trace_decompress_end(dctx->traceCtx, &trace);}#else(void)dctx;(void)uncompressedSize;(void)compressedSize;(void)streaming;#endif}
if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {if (dctx->ddictSet == NULL) {dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);if (!dctx->ddictSet) {RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");}}assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");}
case ZSTD_d_refMultipleDDicts:CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);if (dctx->staticSize != 0) {RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");}dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;return 0;
typedef struct {U64 rolling;U64 stopMask;} ldmRollingHashState_t;/** ZSTD_ldm_gear_init():** Initializes the rolling hash state such that it will honor the* settings in params. */static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params){unsigned maxBitsInMask = MIN(params->minMatchLength, 64);unsigned hashRateLog = params->hashRateLog;state->rolling = ~(U32)0;/* The choice of the splitting criterion is subject to two conditions:* 1. it has to trigger on average every 2^(hashRateLog) bytes;* 2. ideally, it has to depend on a window of minMatchLength bytes.** In the gear hash algorithm, bit n depends on the last n bytes;* so in order to obtain a good quality splitting criterion it is* preferable to use bits with high weight.** To match condition 1 we use a mask with hashRateLog bits set* and, because of the previous remark, we make sure these bits* have the highest possible weight while still respecting* condition 2.*/if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);} else {/* In this degenerate case we simply honor the hash rate. */state->stopMask = ((U64)1 << hashRateLog) - 1;}}/** ZSTD_ldm_gear_feed():** Registers in the splits array all the split points found in the first* size bytes following the data pointer. This function terminates when* either all the data has been processed or LDM_BATCH_SIZE splits are* present in the splits array.** Precondition: The splits array must not be full.* Returns: The number of bytes processed. */static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,BYTE const* data, size_t size,size_t* splits, unsigned* numSplits){size_t n;U64 hash, mask;hash = state->rolling;mask = state->stopMask;n = 0;#define GEAR_ITER_ONCE() do { \hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \n += 1; \if (UNLIKELY((hash & mask) == 0)) { \splits[*numSplits] = n; \*numSplits += 1; \if (*numSplits == LDM_BATCH_SIZE) \goto done; \} \} while (0)while (n + 3 < size) {GEAR_ITER_ONCE();GEAR_ITER_ONCE();GEAR_ITER_ONCE();GEAR_ITER_ONCE();}while (n < size) {GEAR_ITER_ONCE();}
#define LDM_HASH_CHAR_OFFSET 10
}/** ZSTD_ldm_getSmallHash() :* numBits should be <= 32* If numBits==0, returns 0.* @return : the most significant numBits of value. */static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits){assert(numBits <= 32);return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));}/** ZSTD_ldm_getChecksum() :* numBitsToDiscard should be <= 32* @return : the next most significant 32 bits after numBitsToDiscard */static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard){assert(numBitsToDiscard <= 32);return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;}/** ZSTD_ldm_getTag() ;* Given the hash, returns the most significant numTagBits bits* after (32 + hbits) bits.** If there are not enough bits remaining, return the last* numTagBits bits. */static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits){assert(numTagBits < 32 && hbits <= 32);if (32 - hbits < numTagBits) {return hash & (((U32)1 << numTagBits) - 1);} else {return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);}
BYTE* const pOffset = ldmState->bucketOffsets + hash;unsigned const offset = *pOffset;*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;*pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
BYTE* const bucketOffsets = ldmState->bucketOffsets;*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;bucketOffsets[hash]++;bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;}
/** ZSTD_ldm_makeEntryAndInsertByTag() :** Gets the small hash, checksum, and tag from the rollingHash.** If the tag matches (1 << ldmParams.hashRateLog)-1, then* creates an ldmEntry from the offset, and inserts it into the hash table.** hBits is the length of the small hash, which is the most significant hBits* of rollingHash. The checksum is the next 32 most significant bits, followed* by ldmParams.hashRateLog bits that make up the tag. */static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,U64 const rollingHash,U32 const hBits,U32 const offset,ldmParams_t const ldmParams){U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog);U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1;if (tag == tagMask) {U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);ldmEntry_t entry;entry.offset = offset;entry.checksum = checksum;ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);}
}/** ZSTD_ldm_fillLdmHashTable() :** Fills hashTable from (lastHashed + 1) to iend (non-inclusive).* lastHash is the rolling hash that corresponds to lastHashed.** Returns the rolling hash corresponding to position iend-1. */static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,U64 lastHash, const BYTE* lastHashed,const BYTE* iend, const BYTE* base,U32 hBits, ldmParams_t const ldmParams){U64 rollingHash = lastHash;const BYTE* cur = lastHashed + 1;while (cur < iend) {rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1],cur[ldmParams.minMatchLength-1],state->hashPower);ZSTD_ldm_makeEntryAndInsertByTag(state,rollingHash, hBits,(U32)(cur - base), ldmParams);++cur;}return rollingHash;
U32 const minMatchLength = params->minMatchLength;U32 const hBits = params->hashLog - params->bucketSizeLog;BYTE const* const base = ldmState->window.base;BYTE const* const istart = ip;ldmRollingHashState_t hashState;size_t* const splits = ldmState->splitIndices;unsigned numSplits;
ZSTD_ldm_gear_init(&hashState, params);while (ip < iend) {size_t hashed;unsigned n;numSplits = 0;hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);for (n = 0; n < numSplits; n++) {if (ip + splits[n] >= istart + minMatchLength) {BYTE const* const split = ip + splits[n] - minMatchLength;U64 const xxhash = XXH64(split, minMatchLength, 0);U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));ldmEntry_t entry;entry.offset = (U32)(split - base);entry.checksum = (U32)(xxhash >> 32);ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);}}ip += hashed;
if ((size_t)(iend - ip) >= params->minMatchLength) {U64 startingHash = ZSTD_rollingHash_compute(ip, params->minMatchLength);ZSTD_ldm_fillLdmHashTable(state, startingHash, ip, iend - params->minMatchLength, state->window.base,params->hashLog - params->bucketSizeLog,*params);
/* Rolling hash state */ldmRollingHashState_t hashState;/* Arrays for staged-processing */size_t* const splits = ldmState->splitIndices;ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;unsigned numSplits;if (srcSize < minMatchLength)return iend - anchor;/* Initialize the rolling hash state with the first minMatchLength bytes */ZSTD_ldm_gear_init(&hashState, params);{size_t n = 0;
/* Rolling hash */BYTE const* lastHashed = NULL;U64 rollingHash = 0;
while (n < minMatchLength) {numSplits = 0;n += ZSTD_ldm_gear_feed(&hashState, ip + n, minMatchLength - n,splits, &numSplits);
while (ip <= ilimit) {size_t mLength;U32 const curr = (U32)(ip - base);size_t forwardMatchLength = 0, backwardMatchLength = 0;ldmEntry_t* bestEntry = NULL;if (ip != istart) {rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0],lastHashed[minMatchLength],hashPower);} else {rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength);
for (n = 0; n < numSplits; n++) {BYTE const* const split = ip + splits[n] - minMatchLength;U64 const xxhash = XXH64(split, minMatchLength, 0);U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));candidates[n].split = split;candidates[n].hash = hash;candidates[n].checksum = (U32)(xxhash >> 32);candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);PREFETCH_L1(candidates[n].bucket);
/* Do not insert and do not look for a match */if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) {ip++;continue;
for (n = 0; n < numSplits; n++) {size_t forwardMatchLength = 0, backwardMatchLength = 0,bestMatchLength = 0, mLength;BYTE const* const split = candidates[n].split;U32 const checksum = candidates[n].checksum;U32 const hash = candidates[n].hash;ldmEntry_t* const bucket = candidates[n].bucket;ldmEntry_t const* cur;ldmEntry_t const* bestEntry = NULL;ldmEntry_t newEntry;
newEntry.offset = (U32)(split - base);newEntry.checksum = checksum;/* If a split point would generate a sequence overlapping with* the previous one, we merely register it in the hash table and* move on */if (split < anchor) {ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);continue;}
/* Get the best entry and compute the match lengths */{ldmEntry_t* const bucket =ZSTD_ldm_getBucket(ldmState,ZSTD_ldm_getSmallHash(rollingHash, hBits),*params);ldmEntry_t* cur;size_t bestMatchLength = 0;U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
curBackwardMatchLength =ZSTD_ldm_countBackwardsMatch_2segments(ip, anchor,pMatch, lowMatchPtr,dictStart, dictEnd);curTotalMatchLength = curForwardMatchLength +curBackwardMatchLength;
/* Match found */mLength = forwardMatchLength + backwardMatchLength;{U32 const offset = (U32)(split - base) - bestEntry->offset;rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
/* No match found -- continue searching */if (bestEntry == NULL) {ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,hBits, curr,*params);ip++;continue;}
/* Out of sequence storage */if (rawSeqStore->size == rawSeqStore->capacity)return ERROR(dstSize_tooSmall);seq->litLength = (U32)(split - backwardMatchLength - anchor);seq->matchLength = (U32)mLength;seq->offset = offset;rawSeqStore->size++;}
/* Match found */mLength = forwardMatchLength + backwardMatchLength;ip -= backwardMatchLength;
/* Insert the current entry into the hash table --- it must be* done after the previous block to avoid clobbering bestEntry */ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
{/* Store the sequence:* ip = curr - backwardMatchLength* The match is at (bestEntry->offset - backwardMatchLength)*/U32 const matchIndex = bestEntry->offset;U32 const offset = curr - matchIndex;rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
anchor = split + forwardMatchLength;
/* Out of sequence storage */if (rawSeqStore->size == rawSeqStore->capacity)return ERROR(dstSize_tooSmall);seq->litLength = (U32)(ip - anchor);seq->matchLength = (U32)mLength;seq->offset = offset;rawSeqStore->size++;
/* Fill the hash table from lastHashed+1 to ip+mLength*//* Heuristic: don't need to fill the entire table at end of block */if (ip + mLength <= ilimit) {rollingHash = ZSTD_ldm_fillLdmHashTable(ldmState, rollingHash, lastHashed,ip + mLength, base, hBits, *params);lastHashed = ip + mLength - 1;}ip += mLength;anchor = ip;}
}#define ZSTD_NO_CLEVEL 0/*** Initializes the cctxParams from params and compressionLevel.* @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.*/static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel){assert(!ZSTD_checkCParams(params->cParams));ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));cctxParams->cParams = params->cParams;cctxParams->fParams = params->fParams;/* Should not matter, as all cParams are presumed properly defined.* But, set it for tracing anyway.*/cctxParams->compressionLevel = compressionLevel;
ZSTD_CCtxParams_init_internal(cctxParams, ¶ms, ZSTD_NO_CLEVEL);
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));assert(!ZSTD_checkCParams(params.cParams));cctxParams->cParams = params.cParams;cctxParams->fParams = params.fParams;cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
/*** Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.* @param param Validated zstd parameters.*/static void ZSTD_CCtxParams_setZstdParams(ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
/* ZSTD_assignParamsToCCtxParams() :* params is presumed valid at this stage */static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(const ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
cctxParams->cParams = params->cParams;cctxParams->fParams = params->fParams;/* Should not matter, as all cParams are presumed properly defined.* But, set it for tracing anyway.*/cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
ret.cParams = params->cParams;ret.fParams = params->fParams;ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */return ret;
zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, ldmBucketSize);ZSTD_memset(zc->ldmState.bucketOffsets, 0, ldmBucketSize);
/* ZSTD_writeSkippableFrame_advanced() :* Writes out a skippable frame with the specified magic number variant (16 are supported),* from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.** Returns the total number of bytes written, or a ZSTD error code.*/size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,const void* src, size_t srcSize, unsigned magicVariant) {BYTE* op = (BYTE*)dst;RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,dstSize_tooSmall, "Not enough room for skippable frame");RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));MEM_writeLE32(op+4, (U32)srcSize);ZSTD_memcpy(op+8, src, srcSize);return srcSize + ZSTD_SKIPPABLEHEADERSIZE;}
ZSTD_CCtx_params cctxParams;{ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);}
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);ZSTD_CCtx_params const cctxParams =ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, ¶ms);
}void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize){#if ZSTD_TRACEif (cctx->traceCtx) {int const streaming = cctx->inBuffSize > 0 || cctx->outBuffSize > 0 || cctx->appliedParams.nbWorkers > 0;ZSTD_Trace trace;ZSTD_memset(&trace, 0, sizeof(trace));trace.version = ZSTD_VERSION_NUMBER;trace.streaming = streaming;trace.dictionaryID = cctx->dictID;trace.dictionarySize = cctx->dictContentSize;trace.uncompressedSize = cctx->consumedSrcSize;trace.compressedSize = cctx->producedCSize + extraCSize;trace.params = &cctx->appliedParams;trace.cctx = cctx;ZSTD_trace_compress_end(cctx->traceCtx, &trace);}cctx->traceCtx = 0;#else(void)cctx;(void)extraCSize;#endif
static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,void* dst, size_t dstCapacity,const void* src, size_t srcSize,const void* dict,size_t dictSize,const ZSTD_parameters* params){ZSTD_CCtx_params const cctxParams =ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);DEBUGLOG(4, "ZSTD_compress_internal");return ZSTD_compress_advanced_internal(cctx,dst, dstCapacity,src, srcSize,dict, dictSize,&cctxParams);}
ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, ZSTD_NO_CLEVEL);return ZSTD_compress_advanced_internal(cctx,dst, dstCapacity,src, srcSize,dict, dictSize,&cctxParams);
return ZSTD_compress_internal(cctx,dst, dstCapacity,src, srcSize,dict, dictSize,¶ms);
ZSTD_CCtx_params cctxParams;{ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);assert(params.fParams.contentSizeFlag == 1);ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);}
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, ¶ms);
ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, cdict->compressionLevel);
/* Increase window log to fit the entire dictionary and source if the* source size is known. Limit the increase to 19, which is the* window log for compression level 1 with the largest source size.*/if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);}params.fParams = fParams;return ZSTD_compressBegin_internal(cctx,NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,cdict,¶ms, pledgedSrcSize,ZSTDb_not_buffered);
/* Increase window log to fit the entire dictionary and source if the* source size is known. Limit the increase to 19, which is the* window log for compression level 1 with the largest source size.*/if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);}return ZSTD_compressBegin_internal(cctx,NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,cdict,&cctxParams, pledgedSrcSize,ZSTDb_not_buffered);
printf("update with");for(unsigned long i = prevOutPos; i < outTmp.pos; i++) {BYTE* b = (BYTE*) outTmp.dst;printf(" %d", b[i]);}printf("\n%d\n", outTmp.pos - prevOutPos);XXH64_update(&zs->xxhState, (BYTE*)outTmp.dst + prevOutPos,1); // outTmp.pos - prevOutPos);
XXH64_update(&zs->xxhState, (BYTE*)outTmp.dst + prevOutPos,outTmp.pos - prevOutPos);
}#[test]fn pijul_change() {let change = include_bytes!("../.pijul/changes/IH/334Q5ACWE4TNQYYOOF6GWV6CRXOEM6542NVNPA6HRIZ3CBFKEAC.change");use serde_derive::*;#[derive(Deserialize)]pub struct Offsets {pub version: u64,pub hashed_len: u64,pub unhashed_off: u64,pub unhashed_len: u64,pub contents_off: u64,pub contents_len: u64,pub total: u64,}let off0 = std::mem::size_of::<Offsets>();let offsets: Offsets = bincode::deserialize(&change[..off0]).unwrap();let mut s = Seekable::init_buf(&change[off0..offsets.unhashed_off as usize],).unwrap();let mut buf_ = Vec::new();buf_.resize(offsets.hashed_len as usize, 0);s.decompress(&mut buf_[..], 0).unwrap();