ReactOS 0.4.15-dev-7924-g5949c20
zstd_decompress_block.c File Reference
#include <string.h>
#include "compiler.h"
#include "cpu.h"
#include "mem.h"
#include "fse.h"
#include "huf.h"
#include "zstd_internal.h"
#include "zstd_decompress_internal.h"
#include "zstd_ddict.h"
#include "zstd_decompress_block.h"
Include dependency graph for zstd_decompress_block.c:

Go to the source code of this file.

Classes

struct  seq_t
 
struct  ZSTD_fseState
 
struct  seqState_t
 

Macros

#define FSE_STATIC_LINKING_ONLY
 
#define HUF_STATIC_LINKING_ONLY
 
#define LONG_OFFSETS_MAX_EXTRA_BITS_32
 
#define STORED_SEQS   4
 
#define STORED_SEQS_MASK   (STORED_SEQS-1)
 
#define ADVANCED_SEQS   4
 

Typedefs

typedef size_t(* ZSTD_decompressSequences_t) (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)
 

Enumerations

enum  ZSTD_longOffset_e { ZSTD_lo_isRegularOffset , ZSTD_lo_isLongOffset =1 }
 
enum  ZSTD_prefetch_e { ZSTD_p_noPrefetch =0 , ZSTD_p_prefetch =1 }
 

Functions

static void ZSTD_copy4 (void *dst, const void *src)
 
size_t ZSTD_getcBlockSize (const void *src, size_t srcSize, blockProperties_t *bpPtr)
 
size_t ZSTD_decodeLiteralsBlock (ZSTD_DCtx *dctx, const void *src, size_t srcSize)
 
static void ZSTD_buildSeqTable_rle (ZSTD_seqSymbol *dt, U32 baseValue, U32 nbAddBits)
 
void ZSTD_buildFSETable (ZSTD_seqSymbol *dt, const short *normalizedCounter, unsigned maxSymbolValue, const U32 *baseValue, const U32 *nbAdditionalBits, unsigned tableLog)
 
static size_t ZSTD_buildSeqTable (ZSTD_seqSymbol *DTableSpace, const ZSTD_seqSymbol **DTablePtr, symbolEncodingType_e type, unsigned max, U32 maxLog, const void *src, size_t srcSize, const U32 *baseValue, const U32 *nbAdditionalBits, const ZSTD_seqSymbol *defaultTable, U32 flagRepeatTable, int ddictIsCold, int nbSeq)
 
size_t ZSTD_decodeSeqHeaders (ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize)
 
HINT_INLINE void ZSTD_overlapCopy8 (BYTE **op, BYTE const **ip, size_t offset)
 
static void ZSTD_safecopy (BYTE *op, BYTE *const oend_w, BYTE const *ip, ptrdiff_t length, ZSTD_overlap_e ovtype)
 
FORCE_NOINLINE size_t ZSTD_execSequenceEnd (BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd)
 
HINT_INLINE size_t ZSTD_execSequence (BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd)
 
static void ZSTD_initFseState (ZSTD_fseState *DStatePtr, BIT_DStream_t *bitD, const ZSTD_seqSymbol *dt)
 
FORCE_INLINE_TEMPLATE void ZSTD_updateFseState (ZSTD_fseState *DStatePtr, BIT_DStream_t *bitD)
 
FORCE_INLINE_TEMPLATE void ZSTD_updateFseStateWithDInfo (ZSTD_fseState *DStatePtr, BIT_DStream_t *bitD, ZSTD_seqSymbol const DInfo)
 
FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequence (seqState_t *seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)
 
FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE ZSTD_decompressSequences_body (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)
 
static size_t ZSTD_decompressSequences_default (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)
 
FORCE_INLINE_TEMPLATE size_t ZSTD_decompressSequencesLong_body (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)
 
static size_t ZSTD_decompressSequencesLong_default (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)
 
static size_t ZSTD_decompressSequences (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)
 
static size_t ZSTD_decompressSequencesLong (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)
 
static unsigned ZSTD_getLongOffsetsShare (const ZSTD_seqSymbol *offTable)
 
size_t ZSTD_decompressBlock_internal (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const int frame)
 
void ZSTD_checkContinuity (ZSTD_DCtx *dctx, const void *dst)
 
size_t ZSTD_decompressBlock (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 

Variables

static const ZSTD_seqSymbol LL_defaultDTable [(1<< LL_DEFAULTNORMLOG)+1]
 
static const ZSTD_seqSymbol OF_defaultDTable [(1<< OF_DEFAULTNORMLOG)+1]
 
static const ZSTD_seqSymbol ML_defaultDTable [(1<< ML_DEFAULTNORMLOG)+1]
 

Macro Definition Documentation

◆ ADVANCED_SEQS

#define ADVANCED_SEQS   4

◆ FSE_STATIC_LINKING_ONLY

#define FSE_STATIC_LINKING_ONLY

Definition at line 21 of file zstd_decompress_block.c.

◆ HUF_STATIC_LINKING_ONLY

#define HUF_STATIC_LINKING_ONLY

Definition at line 23 of file zstd_decompress_block.c.

◆ LONG_OFFSETS_MAX_EXTRA_BITS_32

#define LONG_OFFSETS_MAX_EXTRA_BITS_32
Value:
(ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
: 0)
#define STREAM_ACCUMULATOR_MIN_32
Definition: bitstream.h:45

Definition at line 829 of file zstd_decompress_block.c.

◆ STORED_SEQS

#define STORED_SEQS   4

◆ STORED_SEQS_MASK

#define STORED_SEQS_MASK   (STORED_SEQS-1)

Typedef Documentation

◆ ZSTD_decompressSequences_t

typedef size_t(* ZSTD_decompressSequences_t) (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)

Definition at line 1262 of file zstd_decompress_block.c.

Enumeration Type Documentation

◆ ZSTD_longOffset_e

Enumerator
ZSTD_lo_isRegularOffset 
ZSTD_lo_isLongOffset 

Definition at line 834 of file zstd_decompress_block.c.

◆ ZSTD_prefetch_e

Enumerator
ZSTD_p_noPrefetch 
ZSTD_p_prefetch 

Definition at line 835 of file zstd_decompress_block.c.

Function Documentation

◆ ZSTD_buildFSETable()

void ZSTD_buildFSETable ( ZSTD_seqSymbol dt,
const short normalizedCounter,
unsigned  maxSymbolValue,
const U32 baseValue,
const U32 nbAdditionalBits,
unsigned  tableLog 
)

Definition at line 368 of file zstd_decompress_block.c.

372{
373 ZSTD_seqSymbol* const tableDecode = dt+1;
374 U16 symbolNext[MaxSeq+1];
375
376 U32 const maxSV1 = maxSymbolValue + 1;
377 U32 const tableSize = 1 << tableLog;
378 U32 highThreshold = tableSize-1;
379
380 /* Sanity Checks */
381 assert(maxSymbolValue <= MaxSeq);
382 assert(tableLog <= MaxFSELog);
383
384 /* Init, lay down lowprob symbols */
385 { ZSTD_seqSymbol_header DTableH;
386 DTableH.tableLog = tableLog;
387 DTableH.fastMode = 1;
388 { S16 const largeLimit= (S16)(1 << (tableLog-1));
389 U32 s;
390 for (s=0; s<maxSV1; s++) {
391 if (normalizedCounter[s]==-1) {
392 tableDecode[highThreshold--].baseValue = s;
393 symbolNext[s] = 1;
394 } else {
395 if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
396 assert(normalizedCounter[s]>=0);
397 symbolNext[s] = (U16)normalizedCounter[s];
398 } } }
399 memcpy(dt, &DTableH, sizeof(DTableH));
400 }
401
402 /* Spread symbols */
403 { U32 const tableMask = tableSize-1;
404 U32 const step = FSE_TABLESTEP(tableSize);
405 U32 s, position = 0;
406 for (s=0; s<maxSV1; s++) {
407 int i;
408 for (i=0; i<normalizedCounter[s]; i++) {
409 tableDecode[position].baseValue = s;
410 position = (position + step) & tableMask;
411 while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
412 } }
413 assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
414 }
415
416 /* Build Decoding table */
417 { U32 u;
418 for (u=0; u<tableSize; u++) {
419 U32 const symbol = tableDecode[u].baseValue;
420 U32 const nextState = symbolNext[symbol]++;
421 tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
422 tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
423 assert(nbAdditionalBits[symbol] < 255);
424 tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
425 tableDecode[u].baseValue = baseValue[symbol];
426 } }
427}
MEM_STATIC unsigned BIT_highbit32(U32 val)
Definition: bitstream.h:139
#define assert(x)
Definition: debug.h:53
signed short S16
Definition: mem.h:146
GLdouble s
Definition: gl.h:2039
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble * u
Definition: glfuncs.h:240
#define memcpy(s1, s2, n)
Definition: mkisofs.h:878
unsigned char BYTE
Definition: xxhash.c:193
unsigned int U32
Definition: xxhash.c:195
unsigned short U16
Definition: xxhash.c:194
#define MaxFSELog
#define MaxSeq

Referenced by ZSTD_loadDEntropy().

◆ ZSTD_buildSeqTable()

static size_t ZSTD_buildSeqTable ( ZSTD_seqSymbol DTableSpace,
const ZSTD_seqSymbol **  DTablePtr,
symbolEncodingType_e  type,
unsigned  max,
U32  maxLog,
const void src,
size_t  srcSize,
const U32 baseValue,
const U32 nbAdditionalBits,
const ZSTD_seqSymbol defaultTable,
U32  flagRepeatTable,
int  ddictIsCold,
int  nbSeq 
)
static

ZSTD_buildSeqTable() :

Returns
: nb bytes read from src, or an error code if it fails

Definition at line 433 of file zstd_decompress_block.c.

439{
440 switch(type)
441 {
442 case set_rle :
443 RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
444 RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
445 { U32 const symbol = *(const BYTE*)src;
446 U32 const baseline = baseValue[symbol];
447 U32 const nbBits = nbAdditionalBits[symbol];
448 ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
449 }
450 *DTablePtr = DTableSpace;
451 return 1;
452 case set_basic :
453 *DTablePtr = defaultTable;
454 return 0;
455 case set_repeat:
456 RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
457 /* prefetch FSE table if used */
458 if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
459 const void* const pStart = *DTablePtr;
460 size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
461 PREFETCH_AREA(pStart, pSize);
462 }
463 return 0;
464 case set_compressed :
465 { unsigned tableLog;
466 S16 norm[MaxSeq+1];
467 size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
468 RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
469 RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
470 ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
471 *DTablePtr = DTableSpace;
472 return headerSize;
473 }
474 default :
475 assert(0);
476 RETURN_ERROR(GENERIC, "impossible");
477 }
478}
#define RETURN_ERROR(StatusCode)
Definition: Base.h:751
_Tp _STLP_CALL norm(const complex< _Tp > &__z)
Definition: _complex.h:741
#define PREFETCH_AREA(p, s)
Definition: compiler.h:131
size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize)
#define FSE_isError
Definition: fse_compress.c:35
GLuint GLuint GLsizei GLenum type
Definition: gl.h:1545
GLenum src
Definition: glext.h:6340
#define max(a, b)
Definition: svc.c:63
static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol *dt, U32 baseValue, U32 nbAddBits)
void ZSTD_buildFSETable(ZSTD_seqSymbol *dt, const short *normalizedCounter, unsigned maxSymbolValue, const U32 *baseValue, const U32 *nbAdditionalBits, unsigned tableLog)
#define SEQSYMBOL_TABLE_SIZE(log)
#define RETURN_ERROR_IF(cond, err,...)
Definition: zstd_internal.h:91
@ set_repeat
@ set_basic
@ set_compressed
@ set_rle

◆ ZSTD_buildSeqTable_rle()

static void ZSTD_buildSeqTable_rle ( ZSTD_seqSymbol dt,
U32  baseValue,
U32  nbAddBits 
)
static

Definition at line 346 of file zstd_decompress_block.c.

347{
348 void* ptr = dt;
350 ZSTD_seqSymbol* const cell = dt + 1;
351
352 DTableH->tableLog = 0;
353 DTableH->fastMode = 0;
354
355 cell->nbBits = 0;
356 cell->nextState = 0;
357 assert(nbAddBits < 255);
358 cell->nbAdditionalBits = (BYTE)nbAddBits;
359 cell->baseValue = baseValue;
360}
static PVOID ptr
Definition: dispmode.c:27

◆ ZSTD_checkContinuity()

void ZSTD_checkContinuity ( ZSTD_DCtx dctx,
const void dst 
)

ZSTD_checkContinuity() : check if next dst follows previous position, where decompression ended. If yes, do nothing (continue on current segment). If not, classify previous segment as "external dictionary", and start a new segment. This function cannot fail.

Definition at line 1412 of file zstd_decompress_block.c.

1413{
1414 if (dst != dctx->previousDstEnd) { /* not contiguous */
1415 dctx->dictEnd = dctx->previousDstEnd;
1416 dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
1417 dctx->prefixStart = dst;
1418 dctx->previousDstEnd = dst;
1419 }
1420}
GLenum GLenum dst
Definition: glext.h:6340
const void * virtualStart
const void * prefixStart
const void * previousDstEnd

Referenced by ZSTD_decompressContinue(), ZSTD_decompressMultiFrame(), and ZSTD_insertBlock().

◆ ZSTD_copy4()

static void ZSTD_copy4 ( void dst,
const void src 
)
static

Definition at line 47 of file zstd_decompress_block.c.

47{ memcpy(dst, src, 4); }

◆ ZSTD_decodeLiteralsBlock()

size_t ZSTD_decodeLiteralsBlock ( ZSTD_DCtx dctx,
const void src,
size_t  srcSize 
)

ZSTD_decodeLiteralsBlock() :

Returns
: nb of bytes read from src (< srcSize ) note : symbol not declared but exposed for fullbench

Definition at line 79 of file zstd_decompress_block.c.

81{
82 DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
83 RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
84
85 { const BYTE* const istart = (const BYTE*) src;
86 symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
87
88 switch(litEncType)
89 {
90 case set_repeat:
91 DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
92 RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
93 /* fall-through */
94
95 case set_compressed:
96 RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
97 { size_t lhSize, litSize, litCSize;
98 U32 singleStream=0;
99 U32 const lhlCode = (istart[0] >> 2) & 3;
100 U32 const lhc = MEM_readLE32(istart);
101 size_t hufSuccess;
102 switch(lhlCode)
103 {
104 case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
105 /* 2 - 2 - 10 - 10 */
106 singleStream = !lhlCode;
107 lhSize = 3;
108 litSize = (lhc >> 4) & 0x3FF;
109 litCSize = (lhc >> 14) & 0x3FF;
110 break;
111 case 2:
112 /* 2 - 2 - 14 - 14 */
113 lhSize = 4;
114 litSize = (lhc >> 4) & 0x3FFF;
115 litCSize = lhc >> 18;
116 break;
117 case 3:
118 /* 2 - 2 - 18 - 18 */
119 lhSize = 5;
120 litSize = (lhc >> 4) & 0x3FFFF;
121 litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
122 break;
123 }
124 RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
125 RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
126
127 /* prefetch huffman table if cold */
128 if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
129 PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
130 }
131
132 if (litEncType==set_repeat) {
133 if (singleStream) {
134 hufSuccess = HUF_decompress1X_usingDTable_bmi2(
135 dctx->litBuffer, litSize, istart+lhSize, litCSize,
136 dctx->HUFptr, dctx->bmi2);
137 } else {
138 hufSuccess = HUF_decompress4X_usingDTable_bmi2(
139 dctx->litBuffer, litSize, istart+lhSize, litCSize,
140 dctx->HUFptr, dctx->bmi2);
141 }
142 } else {
143 if (singleStream) {
144#if defined(HUF_FORCE_DECOMPRESS_X2)
145 hufSuccess = HUF_decompress1X_DCtx_wksp(
146 dctx->entropy.hufTable, dctx->litBuffer, litSize,
147 istart+lhSize, litCSize, dctx->workspace,
148 sizeof(dctx->workspace));
149#else
150 hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
151 dctx->entropy.hufTable, dctx->litBuffer, litSize,
152 istart+lhSize, litCSize, dctx->workspace,
153 sizeof(dctx->workspace), dctx->bmi2);
154#endif
155 } else {
156 hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
157 dctx->entropy.hufTable, dctx->litBuffer, litSize,
158 istart+lhSize, litCSize, dctx->workspace,
159 sizeof(dctx->workspace), dctx->bmi2);
160 }
161 }
162
163 RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
164
165 dctx->litPtr = dctx->litBuffer;
166 dctx->litSize = litSize;
167 dctx->litEntropy = 1;
168 if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
169 memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
170 return litCSize + lhSize;
171 }
172
173 case set_basic:
174 { size_t litSize, lhSize;
175 U32 const lhlCode = ((istart[0]) >> 2) & 3;
176 switch(lhlCode)
177 {
178 case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
179 lhSize = 1;
180 litSize = istart[0] >> 3;
181 break;
182 case 1:
183 lhSize = 2;
184 litSize = MEM_readLE16(istart) >> 4;
185 break;
186 case 3:
187 lhSize = 3;
188 litSize = MEM_readLE24(istart) >> 4;
189 break;
190 }
191
192 if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
193 RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
194 memcpy(dctx->litBuffer, istart+lhSize, litSize);
195 dctx->litPtr = dctx->litBuffer;
196 dctx->litSize = litSize;
197 memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
198 return lhSize+litSize;
199 }
200 /* direct reference into compressed stream */
201 dctx->litPtr = istart+lhSize;
202 dctx->litSize = litSize;
203 return lhSize+litSize;
204 }
205
206 case set_rle:
207 { U32 const lhlCode = ((istart[0]) >> 2) & 3;
208 size_t litSize, lhSize;
209 switch(lhlCode)
210 {
211 case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
212 lhSize = 1;
213 litSize = istart[0] >> 3;
214 break;
215 case 1:
216 lhSize = 2;
217 litSize = MEM_readLE16(istart) >> 4;
218 break;
219 case 3:
220 lhSize = 3;
221 litSize = MEM_readLE24(istart) >> 4;
222 RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
223 break;
224 }
225 RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
226 memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
227 dctx->litPtr = dctx->litBuffer;
228 dctx->litSize = litSize;
229 return lhSize+1;
230 }
231 default:
232 RETURN_ERROR(corruption_detected, "impossible");
233 }
234 }
235}
#define DEBUGLOG(l,...)
Definition: debug.h:106
#define up(mutex)
Definition: glue.h:30
static UINT PSTR DWORD UINT * need
Definition: parser.c:36
symbolEncodingType_e
#define MIN_CBLOCK_SIZE

◆ ZSTD_decodeSeqHeaders()

size_t ZSTD_decodeSeqHeaders ( ZSTD_DCtx dctx,
int nbSeqPtr,
const void src,
size_t  srcSize 
)

ZSTD_decodeSeqHeaders() : decode sequence header from src

Definition at line 480 of file zstd_decompress_block.c.

482{
483 const BYTE* const istart = (const BYTE* const)src;
484 const BYTE* const iend = istart + srcSize;
485 const BYTE* ip = istart;
486 int nbSeq;
487 DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
488
489 /* check */
490 RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
491
492 /* SeqHead */
493 nbSeq = *ip++;
494 if (!nbSeq) {
495 *nbSeqPtr=0;
496 RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
497 return 1;
498 }
499 if (nbSeq > 0x7F) {
500 if (nbSeq == 0xFF) {
501 RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
502 nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
503 } else {
504 RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
505 nbSeq = ((nbSeq-0x80)<<8) + *ip++;
506 }
507 }
508 *nbSeqPtr = nbSeq;
509
510 /* FSE table descriptors */
511 RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
512 { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
513 symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
514 symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
515 ip++;
516
517 /* Build DTables */
518 { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
519 LLtype, MaxLL, LLFSELog,
520 ip, iend-ip,
523 dctx->ddictIsCold, nbSeq);
524 RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
525 ip += llhSize;
526 }
527
528 { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
529 OFtype, MaxOff, OffFSELog,
530 ip, iend-ip,
533 dctx->ddictIsCold, nbSeq);
534 RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
535 ip += ofhSize;
536 }
537
538 { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
539 MLtype, MaxML, MLFSELog,
540 ip, iend-ip,
543 dctx->ddictIsCold, nbSeq);
544 RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
545 ip += mlhSize;
546 }
547 }
548
549 return ip-istart;
550}
MEM_STATIC U16 MEM_readLE16(const void *memPtr)
Definition: mem.h:314
const ZSTD_seqSymbol * MLTptr
const ZSTD_seqSymbol * OFTptr
ZSTD_entropyDTables_t entropy
const ZSTD_seqSymbol * LLTptr
ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]
ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]
ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]
Definition: dhcpd.h:62
static const ZSTD_seqSymbol LL_defaultDTable[(1<< LL_DEFAULTNORMLOG)+1]
static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol *DTableSpace, const ZSTD_seqSymbol **DTablePtr, symbolEncodingType_e type, unsigned max, U32 maxLog, const void *src, size_t srcSize, const U32 *baseValue, const U32 *nbAdditionalBits, const ZSTD_seqSymbol *defaultTable, U32 flagRepeatTable, int ddictIsCold, int nbSeq)
static const ZSTD_seqSymbol ML_defaultDTable[(1<< ML_DEFAULTNORMLOG)+1]
static const ZSTD_seqSymbol OF_defaultDTable[(1<< OF_DEFAULTNORMLOG)+1]
static const U32 ML_base[MaxML+1]
static const U32 OF_base[MaxOff+1]
static const U32 OF_bits[MaxOff+1]
static const U32 LL_base[MaxLL+1]
#define MaxLL
#define MaxML
#define MIN_SEQUENCES_SIZE
#define ZSTD_isError
Definition: zstd_internal.h:46
static const U32 ML_bits[MaxML+1]
#define MLFSELog
#define LONGNBSEQ
static const U32 LL_bits[MaxLL+1]
#define MaxOff
#define LLFSELog
#define OffFSELog

◆ ZSTD_decodeSequence()

FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequence ( seqState_t seqState,
const ZSTD_longOffset_e  longOffsets,
const ZSTD_prefetch_e  prefetch 
)

Definition at line 838 of file zstd_decompress_block.c.

839{
840 seq_t seq;
841 ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
842 ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];
843 ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
844 U32 const llBase = llDInfo.baseValue;
845 U32 const mlBase = mlDInfo.baseValue;
846 U32 const ofBase = ofDInfo.baseValue;
847 BYTE const llBits = llDInfo.nbAdditionalBits;
848 BYTE const mlBits = mlDInfo.nbAdditionalBits;
849 BYTE const ofBits = ofDInfo.nbAdditionalBits;
850 BYTE const totalBits = llBits+mlBits+ofBits;
851
852 /* sequence */
853 { size_t offset;
854 if (ofBits > 1) {
857 assert(ofBits <= MaxOff);
858 if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
859 U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
860 offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
861 BIT_reloadDStream(&seqState->DStream);
862 if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
863 assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
864 } else {
865 offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
866 if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
867 }
868 seqState->prevOffset[2] = seqState->prevOffset[1];
869 seqState->prevOffset[1] = seqState->prevOffset[0];
870 seqState->prevOffset[0] = offset;
871 } else {
872 U32 const ll0 = (llBase == 0);
873 if (LIKELY((ofBits == 0))) {
874 if (LIKELY(!ll0))
875 offset = seqState->prevOffset[0];
876 else {
877 offset = seqState->prevOffset[1];
878 seqState->prevOffset[1] = seqState->prevOffset[0];
879 seqState->prevOffset[0] = offset;
880 }
881 } else {
882 offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
883 { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
884 temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
885 if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
886 seqState->prevOffset[1] = seqState->prevOffset[0];
887 seqState->prevOffset[0] = offset = temp;
888 } } }
889 seq.offset = offset;
890 }
891
892 seq.matchLength = mlBase;
893 if (mlBits > 0)
894 seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
895
897 BIT_reloadDStream(&seqState->DStream);
899 BIT_reloadDStream(&seqState->DStream);
900 /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
902
903 seq.litLength = llBase;
904 if (llBits > 0)
905 seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
906
907 if (MEM_32bits())
908 BIT_reloadDStream(&seqState->DStream);
909
910 DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
911 (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
912
913 if (prefetch == ZSTD_p_prefetch) {
914 size_t const pos = seqState->pos + seq.litLength;
915 const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
916 seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
917 * No consequence though : no memory access will occur, offset is only used for prefetching */
918 seqState->pos = pos + seq.matchLength;
919 }
920
921 /* ANS state update
922 * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
923 * clang-9.2.0 does 7% worse with ZSTD_updateFseState().
924 * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the
925 * better option, so it is the default for other compilers. But, if you
926 * measure that it is worse, please put up a pull request.
927 */
928 {
929#if defined(__GNUC__) && !defined(__clang__)
930 const int kUseUpdateFseState = 1;
931#else
932 const int kUseUpdateFseState = 0;
933#endif
934 if (kUseUpdateFseState) {
935 ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
936 ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
937 if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
938 ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
939 } else {
940 ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */
941 ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */
942 if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
943 ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */
944 }
945 }
946
947 return seq;
948}
#define MIN(x, y)
Definition: rdesktop.h:171
#define STREAM_ACCUMULATOR_MIN_64
Definition: bitstream.h:46
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD)
Definition: bitstream.h:416
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits)
Definition: bitstream.h:386
#define UNLIKELY(x)
Definition: compiler.h:162
#define LIKELY(x)
Definition: compiler.h:161
MEM_STATIC unsigned MEM_32bits(void)
Definition: mem.h:183
MEM_STATIC unsigned MEM_64bits(void)
Definition: mem.h:184
#define prefetch(a)
Definition: list.h:14
GLintptr offset
Definition: glext.h:5920
if(dx< 0)
Definition: linetemp.h:194
static calc_node_t temp
Definition: rpn_ieee.c:38
unsigned bitsConsumed
Definition: bitstream.h:93
const ZSTD_seqSymbol * table
BIT_DStream_t DStream
const BYTE * dictEnd
ZSTD_fseState stateML
size_t prevOffset[ZSTD_REP_NUM]
ZSTD_fseState stateLL
const BYTE * prefixStart
ZSTD_fseState stateOffb
const BYTE * match
size_t matchLength
FORCE_INLINE_TEMPLATE void ZSTD_updateFseStateWithDInfo(ZSTD_fseState *DStatePtr, BIT_DStream_t *bitD, ZSTD_seqSymbol const DInfo)
#define LONG_OFFSETS_MAX_EXTRA_BITS_32
FORCE_INLINE_TEMPLATE void ZSTD_updateFseState(ZSTD_fseState *DStatePtr, BIT_DStream_t *bitD)
#define ZSTD_STATIC_ASSERT(c)
Definition: zstd_internal.h:45

◆ ZSTD_decompressBlock()

size_t ZSTD_decompressBlock ( ZSTD_DCtx dctx,
void dst,
size_t  dstCapacity,
const void src,
size_t  srcSize 
)

Definition at line 1423 of file zstd_decompress_block.c.

1426{
1427 size_t dSize;
1429 dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
1430 dctx->previousDstEnd = (char*)dst + dSize;
1431 return dSize;
1432}
size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const int frame)
void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst)

◆ ZSTD_decompressBlock_internal()

size_t ZSTD_decompressBlock_internal ( ZSTD_DCtx dctx,
void dst,
size_t  dstCapacity,
const void src,
size_t  srcSize,
const int  frame 
)

Definition at line 1341 of file zstd_decompress_block.c.

1344{ /* blockType == blockCompressed */
1345 const BYTE* ip = (const BYTE*)src;
1346 /* isLongOffset must be true if there are long offsets.
1347 * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
1348 * We don't expect that to be the case in 64-bit mode.
1349 * In block mode, window size is not known, so we have to be conservative.
1350 * (note: but it could be evaluated from current-lowLimit)
1351 */
1352 ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
1353 DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
1354
1355 RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
1356
1357 /* Decode literals section */
1358 { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
1359 DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
1360 if (ZSTD_isError(litCSize)) return litCSize;
1361 ip += litCSize;
1362 srcSize -= litCSize;
1363 }
1364
1365 /* Build Decoding Tables */
1366 {
1367 /* These macros control at build-time which decompressor implementation
1368 * we use. If neither is defined, we do some inspection and dispatch at
1369 * runtime.
1370 */
1371#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
1372 !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
1373 int usePrefetchDecoder = dctx->ddictIsCold;
1374#endif
1375 int nbSeq;
1376 size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
1377 if (ZSTD_isError(seqHSize)) return seqHSize;
1378 ip += seqHSize;
1379 srcSize -= seqHSize;
1380
1381 RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
1382
1383#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
1384 !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
1385 if ( !usePrefetchDecoder
1386 && (!frame || (dctx->fParams.windowSize > (1<<24)))
1387 && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */
1388 U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
1389 U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
1390 usePrefetchDecoder = (shareLongOffsets >= minShare);
1391 }
1392#endif
1393
1394 dctx->ddictIsCold = 0;
1395
1396#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
1397 !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
1398 if (usePrefetchDecoder)
1399#endif
1400#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
1401 return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
1402#endif
1403
1404#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
1405 /* else */
1406 return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
1407#endif
1408 }
1409}
#define STREAM_ACCUMULATOR_MIN
Definition: bitstream.h:47
#define NULL
Definition: types.h:112
ZSTD_frameHeader fParams
#define ZSTD_BLOCKSIZE_MAX
Definition: zstd.h:104
static unsigned ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol *offTable)
#define ADVANCED_SEQS
static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)
static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize)
size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize)

Referenced by ZSTD_decompressContinue(), and ZSTD_decompressFrame().

◆ ZSTD_decompressSequences()

static size_t ZSTD_decompressSequences ( ZSTD_DCtx dctx,
void dst,
size_t  maxDstSize,
const void seqStart,
size_t  seqSize,
int  nbSeq,
const ZSTD_longOffset_e  isLongOffset,
const int  frame 
)
static

Definition at line 1271 of file zstd_decompress_block.c.

1275{
1276 DEBUGLOG(5, "ZSTD_decompressSequences");
1277#if DYNAMIC_BMI2
1278 if (dctx->bmi2) {
1279 return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1280 }
1281#endif
1282 return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1283}
static size_t ZSTD_decompressSequences_default(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)

◆ ZSTD_decompressSequences_body()

FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE ZSTD_decompressSequences_body ( ZSTD_DCtx dctx,
void dst,
size_t  maxDstSize,
const void seqStart,
size_t  seqSize,
int  nbSeq,
const ZSTD_longOffset_e  isLongOffset,
const int  frame 
)

Definition at line 995 of file zstd_decompress_block.c.

1000{
1001 const BYTE* ip = (const BYTE*)seqStart;
1002 const BYTE* const iend = ip + seqSize;
1003 BYTE* const ostart = (BYTE* const)dst;
1004 BYTE* const oend = ostart + maxDstSize;
1005 BYTE* op = ostart;
1006 const BYTE* litPtr = dctx->litPtr;
1007 const BYTE* const litEnd = litPtr + dctx->litSize;
1008 const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
1009 const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
1010 const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
1011 DEBUGLOG(5, "ZSTD_decompressSequences_body");
1012 (void)frame;
1013
1014 /* Regen sequences */
1015 if (nbSeq) {
1016 seqState_t seqState;
1017 size_t error = 0;
1018 dctx->fseEntropy = 1;
1019 { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
1021 ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
1022 corruption_detected, "");
1023 ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
1024 ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
1025 ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
1026 assert(dst != NULL);
1027
1032
1033#if defined(__GNUC__) && defined(__x86_64__)
1034 /* Align the decompression loop to 32 + 16 bytes.
1035 *
1036 * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
1037 * speed swings based on the alignment of the decompression loop. This
1038 * performance swing is caused by parts of the decompression loop falling
1039 * out of the DSB. The entire decompression loop should fit in the DSB,
1040 * when it can't we get much worse performance. You can measure if you've
1041 * hit the good case or the bad case with this perf command for some
1042 * compressed file test.zst:
1043 *
1044 * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
1045 * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
1046 *
1047 * If you see most cycles served out of the MITE you've hit the bad case.
1048 * If you see most cycles served out of the DSB you've hit the good case.
1049 * If it is pretty even then you may be in an okay case.
1050 *
1051 * I've been able to reproduce this issue on the following CPUs:
1052 * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
1053 * Use Instruments->Counters to get DSB/MITE cycles.
1054 * I never got performance swings, but I was able to
1055 * go from the good case of mostly DSB to half of the
1056 * cycles served from MITE.
1057 * - Coffeelake: Intel i9-9900k
1058 *
1059 * I haven't been able to reproduce the instability or DSB misses on any
1060 * of the following CPUS:
1061 * - Haswell
1062 * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
1063 * - Skylake
1064 *
1065 * If you are seeing performance stability this script can help test.
1066 * It tests on 4 commits in zstd where I saw performance change.
1067 *
1068 * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
1069 */
1070 __asm__(".p2align 5");
1071 __asm__("nop");
1072 __asm__(".p2align 4");
1073#endif
1074 for ( ; ; ) {
1075 seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);
1076 size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
1077#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
1078 assert(!ZSTD_isError(oneSeqSize));
1079 if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
1080#endif
1081 DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
1082 BIT_reloadDStream(&(seqState.DStream));
1083 /* gcc and clang both don't like early returns in this loop.
1084 * gcc doesn't like early breaks either.
1085 * Instead save an error and report it at the end.
1086 * When there is an error, don't increment op, so we don't
1087 * overwrite.
1088 */
1089 if (UNLIKELY(ZSTD_isError(oneSeqSize))) error = oneSeqSize;
1090 else op += oneSeqSize;
1091 if (UNLIKELY(!--nbSeq)) break;
1092 }
1093
1094 /* check if reached exact end */
1095 DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
1096 if (ZSTD_isError(error)) return error;
1097 RETURN_ERROR_IF(nbSeq, corruption_detected, "");
1098 RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
1099 /* save reps for next block */
1100 { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
1101 }
1102
1103 /* last literal segment */
1104 { size_t const lastLLSize = litEnd - litPtr;
1105 RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
1106 if (op != NULL) {
1107 memcpy(op, litPtr, lastLLSize);
1108 op += lastLLSize;
1109 }
1110 }
1111
1112 return op-ostart;
1113}
static struct recvd_message * sequence
Definition: SystemMenu.c:63
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize)
Definition: bitstream.h:272
@ BIT_DStream_endOfBuffer
Definition: bitstream.h:100
@ BIT_DStream_overflow
Definition: bitstream.h:102
@ BIT_DStream_unfinished
Definition: bitstream.h:99
@ BIT_DStream_completed
Definition: bitstream.h:101
UINT op
Definition: effect.c:236
ERR_STATIC unsigned ERR_isError(size_t code)
Definition: error_private.h:56
#define error(str)
Definition: mkdosfs.c:1605
__asm__(".p2align 4, 0x90\n" ".seh_proc __seh2_global_filter_func\n" "__seh2_global_filter_func:\n" "\tpush %rbp\n" "\t.seh_pushreg %rbp\n" "\tsub $32, %rsp\n" "\t.seh_stackalloc 32\n" "\t.seh_endprologue\n" "\tmov %rdx, %rbp\n" "\tjmp *%rax\n" "__seh2_global_filter_func_exit:\n" "\t.p2align 4\n" "\tadd $32, %rsp\n" "\tpop %rbp\n" "\tret\n" "\t.seh_endproc")
HINT_INLINE size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd)
static void ZSTD_initFseState(ZSTD_fseState *DStatePtr, BIT_DStream_t *bitD, const ZSTD_seqSymbol *dt)
FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequence(seqState_t *seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)
#define ZSTD_REP_NUM

◆ ZSTD_decompressSequences_default()

static size_t ZSTD_decompressSequences_default ( ZSTD_DCtx dctx,
void dst,
size_t  maxDstSize,
const void seqStart,
size_t  seqSize,
int  nbSeq,
const ZSTD_longOffset_e  isLongOffset,
const int  frame 
)
static

Definition at line 1116 of file zstd_decompress_block.c.

1121{
1122 return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1123}
FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE ZSTD_decompressSequences_body(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)

◆ ZSTD_decompressSequencesLong()

static size_t ZSTD_decompressSequencesLong ( ZSTD_DCtx dctx,
void dst,
size_t  maxDstSize,
const void seqStart,
size_t  seqSize,
int  nbSeq,
const ZSTD_longOffset_e  isLongOffset,
const int  frame 
)
static

Definition at line 1294 of file zstd_decompress_block.c.

1299{
1300 DEBUGLOG(5, "ZSTD_decompressSequencesLong");
1301#if DYNAMIC_BMI2
1302 if (dctx->bmi2) {
1303 return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1304 }
1305#endif
1306 return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1307}
static size_t ZSTD_decompressSequencesLong_default(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)

◆ ZSTD_decompressSequencesLong_body()

FORCE_INLINE_TEMPLATE size_t ZSTD_decompressSequencesLong_body ( ZSTD_DCtx dctx,
void dst,
size_t  maxDstSize,
const void seqStart,
size_t  seqSize,
int  nbSeq,
const ZSTD_longOffset_e  isLongOffset,
const int  frame 
)

Definition at line 1128 of file zstd_decompress_block.c.

1134{
1135 const BYTE* ip = (const BYTE*)seqStart;
1136 const BYTE* const iend = ip + seqSize;
1137 BYTE* const ostart = (BYTE* const)dst;
1138 BYTE* const oend = ostart + maxDstSize;
1139 BYTE* op = ostart;
1140 const BYTE* litPtr = dctx->litPtr;
1141 const BYTE* const litEnd = litPtr + dctx->litSize;
1142 const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
1143 const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
1144 const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
1145 (void)frame;
1146
1147 /* Regen sequences */
1148 if (nbSeq) {
1149#define STORED_SEQS 4
1150#define STORED_SEQS_MASK (STORED_SEQS-1)
1151#define ADVANCED_SEQS 4
1153 int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
1154 seqState_t seqState;
1155 int seqNb;
1156 dctx->fseEntropy = 1;
1157 { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
1158 seqState.prefixStart = prefixStart;
1159 seqState.pos = (size_t)(op-prefixStart);
1160 seqState.dictEnd = dictEnd;
1161 assert(dst != NULL);
1162 assert(iend >= ip);
1164 ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
1165 corruption_detected, "");
1166 ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
1167 ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
1168 ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
1169
1170 /* prepare in advance */
1171 for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
1172 sequences[seqNb] = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
1173 PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
1174 }
1175 RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
1176
1177 /* decode and decompress */
1178 for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
1179 seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
1180 size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
1181#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
1182 assert(!ZSTD_isError(oneSeqSize));
1183 if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
1184#endif
1185 if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
1186 PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
1188 op += oneSeqSize;
1189 }
1190 RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
1191
1192 /* finish queue */
1193 seqNb -= seqAdvance;
1194 for ( ; seqNb<nbSeq ; seqNb++) {
1195 size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
1196#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
1197 assert(!ZSTD_isError(oneSeqSize));
1198 if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
1199#endif
1200 if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
1201 op += oneSeqSize;
1202 }
1203
1204 /* save reps for next block */
1205 { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
1206 }
1207
1208 /* last literal segment */
1209 { size_t const lastLLSize = litEnd - litPtr;
1210 RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
1211 if (op != NULL) {
1212 memcpy(op, litPtr, lastLLSize);
1213 op += lastLLSize;
1214 }
1215 }
1216
1217 return op-ostart;
1218}
#define PREFETCH_L1(ptr)
Definition: compiler.h:124
__kernel_size_t size_t
Definition: linux.h:237
static struct msg_sequence * sequences[NUM_MSG_SEQUENCES]
Definition: button.c:54
Definition: match.c:28
#define STORED_SEQS
#define STORED_SEQS_MASK

◆ ZSTD_decompressSequencesLong_default()

static size_t ZSTD_decompressSequencesLong_default ( ZSTD_DCtx dctx,
void dst,
size_t  maxDstSize,
const void seqStart,
size_t  seqSize,
int  nbSeq,
const ZSTD_longOffset_e  isLongOffset,
const int  frame 
)
static

Definition at line 1221 of file zstd_decompress_block.c.

1226{
1227 return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1228}
FORCE_INLINE_TEMPLATE size_t ZSTD_decompressSequencesLong_body(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame)

◆ ZSTD_execSequence()

HINT_INLINE size_t ZSTD_execSequence ( BYTE op,
BYTE *const  oend,
seq_t  sequence,
const BYTE **  litPtr,
const BYTE *const  litLimit,
const BYTE *const  prefixStart,
const BYTE *const  virtualStart,
const BYTE *const  dictEnd 
)

Definition at line 704 of file zstd_decompress_block.c.

708{
709 BYTE* const oLitEnd = op + sequence.litLength;
710 size_t const sequenceLength = sequence.litLength + sequence.matchLength;
711 BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
712 BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */
713 const BYTE* const iLitEnd = *litPtr + sequence.litLength;
714 const BYTE* match = oLitEnd - sequence.offset;
715
716 assert(op != NULL /* Precondition */);
717 assert(oend_w < oend /* No underflow */);
718 /* Handle edge cases in a slow path:
719 * - Read beyond end of literals
720 * - Match end is within WILDCOPY_OVERLIMIT of oend
721 * - 32-bit mode and the match length overflows
722 */
723 if (UNLIKELY(
724 iLitEnd > litLimit ||
725 oMatchEnd > oend_w ||
726 (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
727 return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
728
729 /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
730 assert(op <= oLitEnd /* No overflow */);
731 assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
732 assert(oMatchEnd <= oend /* No underflow */);
733 assert(iLitEnd <= litLimit /* Literal length is in bounds */);
734 assert(oLitEnd <= oend_w /* Can wildcopy literals */);
735 assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
736
737 /* Copy Literals:
738 * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
739 * We likely don't need the full 32-byte wildcopy.
740 */
742 ZSTD_copy16(op, (*litPtr));
743 if (UNLIKELY(sequence.litLength > 16)) {
744 ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
745 }
746 op = oLitEnd;
747 *litPtr = iLitEnd; /* update for next sequence */
748
749 /* Copy Match */
750 if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
751 /* offset beyond prefix -> go into extDict */
752 RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
753 match = dictEnd + (match - prefixStart);
754 if (match + sequence.matchLength <= dictEnd) {
755 memmove(oLitEnd, match, sequence.matchLength);
756 return sequenceLength;
757 }
758 /* span extDict & currentPrefixSegment */
759 { size_t const length1 = dictEnd - match;
760 memmove(oLitEnd, match, length1);
761 op = oLitEnd + length1;
762 sequence.matchLength -= length1;
763 match = prefixStart;
764 } }
765 /* Match within prefix of 1 or more bytes */
766 assert(op <= oMatchEnd);
767 assert(oMatchEnd <= oend_w);
768 assert(match >= prefixStart);
769 assert(sequence.matchLength >= 1);
770
771 /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
772 * without overlap checking.
773 */
774 if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
775 /* We bet on a full wildcopy for matches, since we expect matches to be
776 * longer than literals (in general). In silesia, ~10% of matches are longer
777 * than 16 bytes.
778 */
780 return sequenceLength;
781 }
783
784 /* Copy 8 bytes and spread the offset to be >= 8. */
786
787 /* If the match length is > 8 bytes, then continue with the wildcopy. */
788 if (sequence.matchLength > 8) {
789 assert(op < oMatchEnd);
791 }
792 return sequenceLength;
793}
__kernel_ptrdiff_t ptrdiff_t
Definition: linux.h:247
#define memmove(s1, s2, n)
Definition: mkisofs.h:881
HINT_INLINE void ZSTD_overlapCopy8(BYTE **op, BYTE const **ip, size_t offset)
FORCE_NOINLINE size_t ZSTD_execSequenceEnd(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd)
#define WILDCOPY_OVERLENGTH
static void ZSTD_copy16(void *dst, const void *src)
MEM_STATIC FORCE_INLINE_ATTR void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
@ ZSTD_overlap_src_before_dst
@ ZSTD_no_overlap
#define WILDCOPY_VECLEN

◆ ZSTD_execSequenceEnd()

FORCE_NOINLINE size_t ZSTD_execSequenceEnd ( BYTE op,
BYTE *const  oend,
seq_t  sequence,
const BYTE **  litPtr,
const BYTE *const  litLimit,
const BYTE *const  prefixStart,
const BYTE *const  virtualStart,
const BYTE *const  dictEnd 
)

Definition at line 661 of file zstd_decompress_block.c.

665{
666 BYTE* const oLitEnd = op + sequence.litLength;
667 size_t const sequenceLength = sequence.litLength + sequence.matchLength;
668 const BYTE* const iLitEnd = *litPtr + sequence.litLength;
669 const BYTE* match = oLitEnd - sequence.offset;
670 BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
671
672 /* bounds checks : careful of address space overflow in 32-bit mode */
673 RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
674 RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
675 assert(op < op + sequenceLength);
676 assert(oLitEnd < op + sequenceLength);
677
678 /* copy literals */
679 ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
680 op = oLitEnd;
681 *litPtr = iLitEnd;
682
683 /* copy Match */
684 if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
685 /* offset beyond prefix */
686 RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
687 match = dictEnd - (prefixStart-match);
688 if (match + sequence.matchLength <= dictEnd) {
689 memmove(oLitEnd, match, sequence.matchLength);
690 return sequenceLength;
691 }
692 /* span extDict & currentPrefixSegment */
693 { size_t const length1 = dictEnd - match;
694 memmove(oLitEnd, match, length1);
695 op = oLitEnd + length1;
696 sequence.matchLength -= length1;
697 match = prefixStart;
698 } }
700 return sequenceLength;
701}
static void ZSTD_safecopy(BYTE *op, BYTE *const oend_w, BYTE const *ip, ptrdiff_t length, ZSTD_overlap_e ovtype)

◆ ZSTD_getcBlockSize()

size_t ZSTD_getcBlockSize ( const void src,
size_t  srcSize,
blockProperties_t bpPtr 
)

ZSTD_getcBlockSize() : Provides the size of compressed block from block header src

Definition at line 56 of file zstd_decompress_block.c.

58{
59 RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
60
61 { U32 const cBlockHeader = MEM_readLE24(src);
62 U32 const cSize = cBlockHeader >> 3;
63 bpPtr->lastBlock = cBlockHeader & 1;
64 bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
65 bpPtr->origSize = cSize; /* only useful for RLE */
66 if (bpPtr->blockType == bt_rle) return 1;
67 RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
68 return cSize;
69 }
70}
MEM_STATIC U32 MEM_readLE24(const void *memPtr)
Definition: mem.h:335
blockType_e blockType
static const size_t ZSTD_blockHeaderSize
blockType_e
@ bt_rle
@ bt_reserved

Referenced by ZSTD_decompressContinue(), ZSTD_decompressFrame(), and ZSTD_findFrameSizeInfo().

◆ ZSTD_getLongOffsetsShare()

static unsigned ZSTD_getLongOffsetsShare ( const ZSTD_seqSymbol offTable)
static

Definition at line 1319 of file zstd_decompress_block.c.

1320{
1321 const void* ptr = offTable;
1322 U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
1323 const ZSTD_seqSymbol* table = offTable + 1;
1324 U32 const max = 1 << tableLog;
1325 U32 u, total = 0;
1326 DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
1327
1328 assert(max <= (1 << OffFSELog)); /* max not too large */
1329 for (u=0; u<max; u++) {
1330 if (table[u].nbAdditionalBits > 22) total += 1;
1331 }
1332
1333 assert(tableLog <= OffFSELog);
1334 total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
1335
1336 return total;
1337}
size_t total

◆ ZSTD_initFseState()

static void ZSTD_initFseState ( ZSTD_fseState DStatePtr,
BIT_DStream_t bitD,
const ZSTD_seqSymbol dt 
)
static

Definition at line 796 of file zstd_decompress_block.c.

797{
798 const void* ptr = dt;
799 const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
800 DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
801 DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
802 (U32)DStatePtr->state, DTableH->tableLog);
803 BIT_reloadDStream(bitD);
804 DStatePtr->table = dt + 1;
805}
MEM_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits)
Definition: bitstream.h:377

◆ ZSTD_overlapCopy8()

HINT_INLINE void ZSTD_overlapCopy8 ( BYTE **  op,
BYTE const **  ip,
size_t  offset 
)

ZSTD_overlapCopy8() : Copies 8 bytes from ip to op and updates op and ip where ip <= op. If the offset is < 8 then the offset is spread to at least 8 bytes.

Precondition: *ip <= *op Postcondition: *op - *op >= 8

Definition at line 583 of file zstd_decompress_block.c.

583 {
584 assert(*ip <= *op);
585 if (offset < 8) {
586 /* close range match, overlap */
587 static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
588 static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
589 int const sub2 = dec64table[offset];
590 (*op)[0] = (*ip)[0];
591 (*op)[1] = (*ip)[1];
592 (*op)[2] = (*ip)[2];
593 (*op)[3] = (*ip)[3];
594 *ip += dec32table[offset];
595 ZSTD_copy4(*op+4, *ip);
596 *ip -= sub2;
597 } else {
598 ZSTD_copy8(*op, *ip);
599 }
600 *ip += 8;
601 *op += 8;
602 assert(*op - *ip >= 8);
603}
static void ZSTD_copy4(void *dst, const void *src)
static void ZSTD_copy8(void *dst, const void *src)

◆ ZSTD_safecopy()

static void ZSTD_safecopy ( BYTE op,
BYTE *const  oend_w,
BYTE const ip,
ptrdiff_t  length,
ZSTD_overlap_e  ovtype 
)
static

ZSTD_safecopy() : Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer and write up to 16 bytes past oend_w (op >= oend_w is allowed). This function is only called in the uncommon case where the sequence is near the end of the block. It should be fast for a single long sequence, but can be slow for several short sequences.

Parameters
ovtypecontrols the overlap detection
  • ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
  • ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. The src buffer must be before the dst buffer.

Definition at line 616 of file zstd_decompress_block.c.

616 {
617 ptrdiff_t const diff = op - ip;
618 BYTE* const oend = op + length;
619
620 assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
621 (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
622
623 if (length < 8) {
624 /* Handle short lengths. */
625 while (op < oend) *op++ = *ip++;
626 return;
627 }
628 if (ovtype == ZSTD_overlap_src_before_dst) {
629 /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
630 assert(length >= 8);
631 ZSTD_overlapCopy8(&op, &ip, diff);
632 assert(op - ip >= 8);
633 assert(op <= oend);
634 }
635
636 if (oend <= oend_w) {
637 /* No risk of overwrite. */
638 ZSTD_wildcopy(op, ip, length, ovtype);
639 return;
640 }
641 if (op <= oend_w) {
642 /* Wildcopy until we get close to the end. */
643 assert(oend > oend_w);
644 ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
645 ip += oend_w - op;
646 op = oend_w;
647 }
648 /* Handle the leftovers. */
649 while (op < oend) *op++ = *ip++;
650}
GLuint GLsizei GLsizei * length
Definition: glext.h:6040

◆ ZSTD_updateFseState()

FORCE_INLINE_TEMPLATE void ZSTD_updateFseState ( ZSTD_fseState DStatePtr,
BIT_DStream_t bitD 
)

Definition at line 808 of file zstd_decompress_block.c.

809{
810 ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
811 U32 const nbBits = DInfo.nbBits;
812 size_t const lowBits = BIT_readBits(bitD, nbBits);
813 DStatePtr->state = DInfo.nextState + lowBits;
814}

◆ ZSTD_updateFseStateWithDInfo()

FORCE_INLINE_TEMPLATE void ZSTD_updateFseStateWithDInfo ( ZSTD_fseState DStatePtr,
BIT_DStream_t bitD,
ZSTD_seqSymbol const  DInfo 
)

Definition at line 817 of file zstd_decompress_block.c.

818{
819 U32 const nbBits = DInfo.nbBits;
820 size_t const lowBits = BIT_readBits(bitD, nbBits);
821 DStatePtr->state = DInfo.nextState + lowBits;
822}

Variable Documentation

◆ LL_defaultDTable

const ZSTD_seqSymbol LL_defaultDTable[(1<< LL_DEFAULTNORMLOG)+1]
static

Definition at line 247 of file zstd_decompress_block.c.

◆ ML_defaultDTable

const ZSTD_seqSymbol ML_defaultDTable[(1<< ML_DEFAULTNORMLOG)+1]
static

Definition at line 308 of file zstd_decompress_block.c.

◆ OF_defaultDTable

const ZSTD_seqSymbol OF_defaultDTable[(1<< OF_DEFAULTNORMLOG)+1]
static
Initial value:
= {
{ 1, 1, 1, OF_DEFAULTNORMLOG},
{ 0, 0, 5, 0}, { 0, 6, 4, 61},
{ 0, 9, 5, 509}, { 0, 15, 5,32765},
{ 0, 21, 5,2097149}, { 0, 3, 5, 5},
{ 0, 7, 4, 125}, { 0, 12, 5, 4093},
{ 0, 18, 5,262141}, { 0, 23, 5,8388605},
{ 0, 5, 5, 29}, { 0, 8, 4, 253},
{ 0, 14, 5,16381}, { 0, 20, 5,1048573},
{ 0, 2, 5, 1}, { 16, 7, 4, 125},
{ 0, 11, 5, 2045}, { 0, 17, 5,131069},
{ 0, 22, 5,4194301}, { 0, 4, 5, 13},
{ 16, 8, 4, 253}, { 0, 13, 5, 8189},
{ 0, 19, 5,524285}, { 0, 1, 5, 1},
{ 16, 6, 4, 61}, { 0, 10, 5, 1021},
{ 0, 16, 5,65533}, { 0, 28, 5,268435453},
{ 0, 27, 5,134217725}, { 0, 26, 5,67108861},
{ 0, 25, 5,33554429}, { 0, 24, 5,16777213},
}
#define OF_DEFAULTNORMLOG

Definition at line 285 of file zstd_decompress_block.c.