2 * Copyright (c) Meta Platforms, Inc. and affiliates.
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
11 /* *************************************
13 ***************************************/
14 #include "util.h" /* Compiler options, UTIL_GetFileSize, UTIL_sleep */
15 #include <stdlib.h> /* malloc, free */
16 #include <string.h> /* memset */
17 #include <stdio.h> /* fprintf, fopen, ftello64 */
18 #include <time.h> /* clock_t, clock, CLOCKS_PER_SEC */
19 #include <ctype.h> /* toupper */
20 #include <errno.h> /* errno */
22 #include "timefn.h" /* UTIL_time_t, UTIL_getTime, UTIL_clockSpanMicro, UTIL_waitForNextTick */
24 #define ZSTD_STATIC_LINKING_ONLY
26 #include "datagen.h" /* RDG_genBuffer */
29 #include "../zstd_zlibwrapper.h"
33 /*-************************************
35 **************************************/
36 #ifndef ZSTDCLI_CLEVEL_DEFAULT
37 # define ZSTDCLI_CLEVEL_DEFAULT 3
41 /*-************************************
43 **************************************/
44 #define COMPRESSOR_NAME "Zstandard wrapper for zlib command line interface"
46 # define ZSTD_VERSION "v" ZSTD_VERSION_STRING
48 #define AUTHOR "Yann Collet"
49 #define WELCOME_MESSAGE "*** %s %i-bits %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR
51 #ifndef ZSTD_GIT_COMMIT
52 # define ZSTD_GIT_COMMIT_STRING ""
54 # define ZSTD_GIT_COMMIT_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_GIT_COMMIT)
58 #define TIMELOOP_MICROSEC 1*1000000ULL /* 1 second */
59 #define ACTIVEPERIOD_MICROSEC 70*1000000ULL /* 70 seconds */
60 #define COOLPERIOD_SEC 10
66 static const size_t maxMemory = (sizeof(size_t)==4) ? (2 GB - 64 MB) : (size_t)(1ULL << ((sizeof(size_t)*8)-31));
68 static U32 g_compressibilityDefault = 50;
71 /* *************************************
73 ***************************************/
74 #define DEFAULT_DISPLAY_LEVEL 2
75 #define DISPLAY(...) fprintf(displayOut, __VA_ARGS__)
76 #define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
77 static unsigned g_displayLevel = DEFAULT_DISPLAY_LEVEL; /* 0 : no display; 1: errors; 2 : + result + interaction + warnings; 3 : + progression; 4 : + information */
78 static FILE* displayOut;
80 #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
81 if ((clock() - g_time > refreshRate) || (g_displayLevel>=4)) \
82 { g_time = clock(); DISPLAY(__VA_ARGS__); \
83 if (g_displayLevel>=4) fflush(displayOut); } }
84 static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
85 static clock_t g_time = 0;
88 /* *************************************
90 ***************************************/
94 #define DEBUGOUTPUT(...) { if (DEBUG) DISPLAY(__VA_ARGS__); }
95 #define EXM_THROW(error, ...) \
97 DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \
98 DISPLAYLEVEL(1, "Error %i : ", error); \
99 DISPLAYLEVEL(1, __VA_ARGS__); \
100 DISPLAYLEVEL(1, "\n"); \
105 /* *************************************
106 * Benchmark Parameters
107 ***************************************/
108 static unsigned g_nbIterations = NBLOOPS;
109 static size_t g_blockSize = 0;
110 int g_additionalParam = 0;
112 static void BMK_setNotificationLevel(unsigned level) { g_displayLevel=level; }
114 static void BMK_setAdditionalParam(int additionalParam) { g_additionalParam=additionalParam; }
116 static void BMK_SetNbIterations(unsigned nbLoops)
118 g_nbIterations = nbLoops;
119 DISPLAYLEVEL(3, "- test >= %u seconds per compression / decompression -\n", g_nbIterations);
122 static void BMK_SetBlockSize(size_t blockSize)
124 g_blockSize = blockSize;
125 DISPLAYLEVEL(2, "using blocks of size %u KB \n", (unsigned)(blockSize>>10));
129 /* ********************************************************
131 **********************************************************/
134 #define MIN(a,b) ((a)<(b) ? (a) : (b))
135 #define MAX(a,b) ((a)>(b) ? (a) : (b))
139 z_const char* srcPtr;
148 typedef enum { BMK_ZSTD, BMK_ZSTD_STREAM, BMK_ZLIB, BMK_ZWRAP_ZLIB, BMK_ZWRAP_ZSTD, BMK_ZLIB_REUSE, BMK_ZWRAP_ZLIB_REUSE, BMK_ZWRAP_ZSTD_REUSE } BMK_compressor;
151 static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
152 const char* displayName, int cLevel,
153 const size_t* fileSizes, U32 nbFiles,
154 const void* dictBuffer, size_t dictBufferSize, BMK_compressor compressor)
156 size_t const blockSize = (g_blockSize>=32 ? g_blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ;
157 size_t const avgSize = MIN(g_blockSize, (srcSize / nbFiles));
158 U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + nbFiles;
159 blockParam_t* const blockTable = (blockParam_t*) malloc(maxNbBlocks * sizeof(blockParam_t));
160 size_t const maxCompressedSize = ZSTD_compressBound(srcSize) + (maxNbBlocks * 1024); /* add some room for safety */
161 void* const compressedBuffer = malloc(maxCompressedSize);
162 void* const resultBuffer = malloc(srcSize);
163 ZSTD_CCtx* const ctx = ZSTD_createCCtx();
164 ZSTD_DCtx* const dctx = ZSTD_createDCtx();
168 if (!compressedBuffer || !resultBuffer || !blockTable || !ctx || !dctx)
169 EXM_THROW(31, "allocation error : not enough memory");
172 if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* can only display 17 characters */
174 /* Init blockTable data */
175 { z_const char* srcPtr = (z_const char*)srcBuffer;
176 char* cPtr = (char*)compressedBuffer;
177 char* resPtr = (char*)resultBuffer;
179 for (nbBlocks=0, fileNb=0; fileNb<nbFiles; fileNb++) {
180 size_t remaining = fileSizes[fileNb];
181 U32 const nbBlocksforThisFile = (U32)((remaining + (blockSize-1)) / blockSize);
182 U32 const blockEnd = nbBlocks + nbBlocksforThisFile;
183 for ( ; nbBlocks<blockEnd; nbBlocks++) {
184 size_t const thisBlockSize = MIN(remaining, blockSize);
185 blockTable[nbBlocks].srcPtr = srcPtr;
186 blockTable[nbBlocks].cPtr = cPtr;
187 blockTable[nbBlocks].resPtr = resPtr;
188 blockTable[nbBlocks].srcSize = thisBlockSize;
189 blockTable[nbBlocks].cRoom = ZSTD_compressBound(thisBlockSize);
190 srcPtr += thisBlockSize;
191 cPtr += blockTable[nbBlocks].cRoom;
192 resPtr += thisBlockSize;
193 remaining -= thisBlockSize;
196 /* warming up memory */
197 RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1);
200 { U64 fastestC = (U64)(-1LL), fastestD = (U64)(-1LL);
201 U64 const crcOrig = XXH64(srcBuffer, srcSize, 0);
202 UTIL_time_t coolTime;
203 U64 const maxTime = (g_nbIterations * TIMELOOP_MICROSEC) + 100;
204 U64 totalCTime=0, totalDTime=0;
205 U32 cCompleted=0, dCompleted=0;
207 const char* const marks[NB_MARKS] = { " |", " /", " =", "\\" };
212 coolTime = UTIL_getTime();
213 DISPLAYLEVEL(2, "\r%79s\r", "");
214 while (!cCompleted | !dCompleted) {
215 UTIL_time_t clockStart;
216 U64 clockLoop = g_nbIterations ? TIMELOOP_MICROSEC : 1;
218 /* overheat protection */
219 if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
220 DISPLAYLEVEL(2, "\rcooling down ... \r");
221 UTIL_sleep(COOLPERIOD_SEC);
222 coolTime = UTIL_getTime();
226 DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (unsigned)srcSize);
227 if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */
229 UTIL_sleepMilli(1); /* give processor time to other processes */
230 UTIL_waitForNextTick();
231 clockStart = UTIL_getTime();
233 if (!cCompleted) { /* still some time to do compression tests */
235 if (compressor == BMK_ZSTD) {
236 ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
237 ZSTD_customMem const cmem = { NULL, NULL, NULL };
238 ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_auto, zparams.cParams, cmem);
239 if (cdict==NULL) EXM_THROW(1, "ZSTD_createCDict_advanced() allocation failure");
244 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
245 if (dictBufferSize) {
246 rSize = ZSTD_compress_usingCDict(ctx,
247 blockTable[blockNb].cPtr, blockTable[blockNb].cRoom,
248 blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize,
251 rSize = ZSTD_compressCCtx (ctx,
252 blockTable[blockNb].cPtr, blockTable[blockNb].cRoom,
253 blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize, cLevel);
255 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_compress_usingCDict() failed : %s", ZSTD_getErrorName(rSize));
256 blockTable[blockNb].cSize = rSize;
259 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
260 ZSTD_freeCDict(cdict);
261 } else if (compressor == BMK_ZSTD_STREAM) {
262 ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
263 ZSTD_inBuffer inBuffer;
264 ZSTD_outBuffer outBuffer;
265 ZSTD_CStream* zbc = ZSTD_createCStream();
267 ZSTD_CCtx_params* cctxParams = ZSTD_createCCtxParams();
269 if (!cctxParams) EXM_THROW(1, "ZSTD_createCCtxParams() allocation failure");
270 if (zbc == NULL) EXM_THROW(1, "ZSTD_createCStream() allocation failure");
273 initErr |= ZSTD_isError(ZSTD_CCtx_reset(zbc, ZSTD_reset_session_only));
274 initErr |= ZSTD_isError(ZSTD_CCtxParams_init_advanced(cctxParams, zparams));
275 initErr |= ZSTD_isError(ZSTD_CCtx_setParametersUsingCCtxParams(zbc, cctxParams));
276 initErr |= ZSTD_isError(ZSTD_CCtx_setPledgedSrcSize(zbc, avgSize));
277 initErr |= ZSTD_isError(ZSTD_CCtx_loadDictionary(zbc, dictBuffer, dictBufferSize));
279 ZSTD_freeCCtxParams(cctxParams);
280 if (initErr) EXM_THROW(1, "CCtx init failed!");
285 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
286 rSize = ZSTD_CCtx_reset(zbc, ZSTD_reset_session_only);
287 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_CCtx_reset() failed : %s", ZSTD_getErrorName(rSize));
288 rSize = ZSTD_CCtx_setPledgedSrcSize(zbc, blockTable[blockNb].srcSize);
289 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_CCtx_setPledgedSrcSize() failed : %s", ZSTD_getErrorName(rSize));
290 inBuffer.src = blockTable[blockNb].srcPtr;
291 inBuffer.size = blockTable[blockNb].srcSize;
293 outBuffer.dst = blockTable[blockNb].cPtr;
294 outBuffer.size = blockTable[blockNb].cRoom;
296 rSize = ZSTD_compressStream(zbc, &outBuffer, &inBuffer);
297 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_compressStream() failed : %s", ZSTD_getErrorName(rSize));
298 rSize = ZSTD_endStream(zbc, &outBuffer);
299 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_endStream() failed : %s", ZSTD_getErrorName(rSize));
300 blockTable[blockNb].cSize = outBuffer.pos;
303 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
304 ZSTD_freeCStream(zbc);
305 } else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) {
308 int useSetDict = (dictBuffer != NULL);
309 if (compressor == BMK_ZLIB_REUSE || compressor == BMK_ZWRAP_ZLIB_REUSE) ZWRAP_useZSTDcompression(0);
310 else ZWRAP_useZSTDcompression(1);
314 ret = deflateInit(&def, cLevel);
315 if (ret != Z_OK) EXM_THROW(1, "deflateInit failure");
316 /* if (ZWRAP_isUsingZSTDcompression()) {
317 ret = ZWRAP_setPledgedSrcSize(&def, avgSize);
318 if (ret != Z_OK) EXM_THROW(1, "ZWRAP_setPledgedSrcSize failure");
322 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
323 if (ZWRAP_isUsingZSTDcompression())
324 ret = ZWRAP_deflateReset_keepDict(&def); /* reuse dictionary to make compression faster */
326 ret = deflateReset(&def);
327 if (ret != Z_OK) EXM_THROW(1, "deflateReset failure");
329 ret = deflateSetDictionary(&def, (const z_Bytef*)dictBuffer, dictBufferSize);
330 if (ret != Z_OK) EXM_THROW(1, "deflateSetDictionary failure");
331 if (ZWRAP_isUsingZSTDcompression()) useSetDict = 0; /* zstd doesn't require deflateSetDictionary after ZWRAP_deflateReset_keepDict */
333 def.next_in = (z_const z_Bytef*) blockTable[blockNb].srcPtr;
334 def.avail_in = (uInt)blockTable[blockNb].srcSize;
336 def.next_out = (z_Bytef*) blockTable[blockNb].cPtr;
337 def.avail_out = (uInt)blockTable[blockNb].cRoom;
339 ret = deflate(&def, Z_FINISH);
340 if (ret != Z_STREAM_END) EXM_THROW(1, "deflate failure ret=%d srcSize=%d" , ret, (int)blockTable[blockNb].srcSize);
341 blockTable[blockNb].cSize = def.total_out;
344 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
345 ret = deflateEnd(&def);
346 if (ret != Z_OK) EXM_THROW(1, "deflateEnd failure");
349 if (compressor == BMK_ZLIB || compressor == BMK_ZWRAP_ZLIB) ZWRAP_useZSTDcompression(0);
350 else ZWRAP_useZSTDcompression(1);
353 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
358 ret = deflateInit(&def, cLevel);
359 if (ret != Z_OK) EXM_THROW(1, "deflateInit failure");
361 ret = deflateSetDictionary(&def, (const z_Bytef*)dictBuffer, dictBufferSize);
362 if (ret != Z_OK) EXM_THROW(1, "deflateSetDictionary failure");
364 def.next_in = (z_const z_Bytef*) blockTable[blockNb].srcPtr;
365 def.avail_in = (uInt)blockTable[blockNb].srcSize;
367 def.next_out = (z_Bytef*) blockTable[blockNb].cPtr;
368 def.avail_out = (uInt)blockTable[blockNb].cRoom;
370 ret = deflate(&def, Z_FINISH);
371 if (ret != Z_STREAM_END) EXM_THROW(1, "deflate failure");
372 ret = deflateEnd(&def);
373 if (ret != Z_OK) EXM_THROW(1, "deflateEnd failure");
374 blockTable[blockNb].cSize = def.total_out;
377 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
379 { U64 const clockSpan = UTIL_clockSpanMicro(clockStart);
380 if (clockSpan < fastestC*nbLoops) fastestC = clockSpan / nbLoops;
381 totalCTime += clockSpan;
382 cCompleted = totalCTime>maxTime;
386 { U32 blockNb; for (blockNb=0; blockNb<nbBlocks; blockNb++) cSize += blockTable[blockNb].cSize; }
387 ratio = (double)srcSize / (double)cSize;
388 markNb = (markNb+1) % NB_MARKS;
389 DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s\r",
390 marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio,
391 (double)srcSize / (double)fastestC );
393 (void)fastestD; (void)crcOrig; /* unused when decompression disabled */
396 if (!dCompleted) memset(resultBuffer, 0xD6, srcSize); /* warm result buffer */
398 UTIL_sleepMilli(1); /* give processor time to other processes */
399 UTIL_waitForNextTick();
400 clockStart = UTIL_getTime();
404 if (compressor == BMK_ZSTD) {
405 ZSTD_DDict* ddict = ZSTD_createDDict(dictBuffer, dictBufferSize);
406 if (!ddict) EXM_THROW(2, "ZSTD_createDDict() allocation failure");
409 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
410 size_t const regenSize = ZSTD_decompress_usingDDict(dctx,
411 blockTable[blockNb].resPtr, blockTable[blockNb].srcSize,
412 blockTable[blockNb].cPtr, blockTable[blockNb].cSize,
414 if (ZSTD_isError(regenSize)) {
415 DISPLAY("ZSTD_decompress_usingDDict() failed on block %u : %s \n",
416 blockNb, ZSTD_getErrorName(regenSize));
417 clockLoop = 0; /* force immediate test end */
420 blockTable[blockNb].resSize = regenSize;
423 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
424 ZSTD_freeDDict(ddict);
425 } else if (compressor == BMK_ZSTD_STREAM) {
426 ZSTD_inBuffer inBuffer;
427 ZSTD_outBuffer outBuffer;
428 ZSTD_DStream* zbd = ZSTD_createDStream();
430 if (zbd == NULL) EXM_THROW(1, "ZSTD_createDStream() allocation failure");
431 rSize = ZSTD_DCtx_reset(zbd, ZSTD_reset_session_only);
432 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_DCtx_reset() failed : %s", ZSTD_getErrorName(rSize));
433 rSize = ZSTD_DCtx_loadDictionary(zbd, dictBuffer, dictBufferSize);
434 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_DCtx_loadDictionary() failed : %s", ZSTD_getErrorName(rSize));
437 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
438 rSize = ZSTD_DCtx_reset(zbd, ZSTD_reset_session_only);
439 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_DCtx_reset() failed : %s", ZSTD_getErrorName(rSize));
440 inBuffer.src = blockTable[blockNb].cPtr;
441 inBuffer.size = blockTable[blockNb].cSize;
443 outBuffer.dst = blockTable[blockNb].resPtr;
444 outBuffer.size = blockTable[blockNb].srcSize;
446 rSize = ZSTD_decompressStream(zbd, &outBuffer, &inBuffer);
447 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_decompressStream() failed : %s", ZSTD_getErrorName(rSize));
448 blockTable[blockNb].resSize = outBuffer.pos;
451 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
452 ZSTD_freeDStream(zbd);
453 } else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) {
456 if (compressor == BMK_ZLIB_REUSE) ZWRAP_setDecompressionType(ZWRAP_FORCE_ZLIB);
457 else ZWRAP_setDecompressionType(ZWRAP_AUTO);
461 ret = inflateInit(&inf);
462 if (ret != Z_OK) EXM_THROW(1, "inflateInit failure");
465 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
466 if (ZWRAP_isUsingZSTDdecompression(&inf))
467 ret = ZWRAP_inflateReset_keepDict(&inf); /* reuse dictionary to make decompression faster; inflate will return Z_NEED_DICT only for the first time */
469 ret = inflateReset(&inf);
470 if (ret != Z_OK) EXM_THROW(1, "inflateReset failure");
471 inf.next_in = (z_const z_Bytef*) blockTable[blockNb].cPtr;
472 inf.avail_in = (uInt)blockTable[blockNb].cSize;
474 inf.next_out = (z_Bytef*) blockTable[blockNb].resPtr;
475 inf.avail_out = (uInt)blockTable[blockNb].srcSize;
477 ret = inflate(&inf, Z_FINISH);
478 if (ret == Z_NEED_DICT) {
479 ret = inflateSetDictionary(&inf, (const z_Bytef*)dictBuffer, dictBufferSize);
480 if (ret != Z_OK) EXM_THROW(1, "inflateSetDictionary failure");
481 ret = inflate(&inf, Z_FINISH);
483 if (ret != Z_STREAM_END) EXM_THROW(1, "inflate failure");
484 blockTable[blockNb].resSize = inf.total_out;
487 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
488 ret = inflateEnd(&inf);
489 if (ret != Z_OK) EXM_THROW(1, "inflateEnd failure");
492 if (compressor == BMK_ZLIB) ZWRAP_setDecompressionType(ZWRAP_FORCE_ZLIB);
493 else ZWRAP_setDecompressionType(ZWRAP_AUTO);
496 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
501 ret = inflateInit(&inf);
502 if (ret != Z_OK) EXM_THROW(1, "inflateInit failure");
503 inf.next_in = (z_const z_Bytef*) blockTable[blockNb].cPtr;
504 inf.avail_in = (uInt)blockTable[blockNb].cSize;
506 inf.next_out = (z_Bytef*) blockTable[blockNb].resPtr;
507 inf.avail_out = (uInt)blockTable[blockNb].srcSize;
509 ret = inflate(&inf, Z_FINISH);
510 if (ret == Z_NEED_DICT) {
511 ret = inflateSetDictionary(&inf, (const z_Bytef*) dictBuffer, dictBufferSize);
512 if (ret != Z_OK) EXM_THROW(1, "inflateSetDictionary failure");
513 ret = inflate(&inf, Z_FINISH);
515 if (ret != Z_STREAM_END) EXM_THROW(1, "inflate failure");
516 ret = inflateEnd(&inf);
517 if (ret != Z_OK) EXM_THROW(1, "inflateEnd failure");
518 blockTable[blockNb].resSize = inf.total_out;
521 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
523 { U64 const clockSpan = UTIL_clockSpanMicro(clockStart);
524 if (clockSpan < fastestD*nbLoops) fastestD = clockSpan / nbLoops;
525 totalDTime += clockSpan;
526 dCompleted = totalDTime>maxTime;
529 markNb = (markNb+1) % NB_MARKS;
530 DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s ,%6.1f MB/s\r",
531 marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio,
532 (double)srcSize / (double)fastestC,
533 (double)srcSize / (double)fastestD );
536 { U64 const crcCheck = XXH64(resultBuffer, srcSize, 0);
537 if (crcOrig!=crcCheck) {
539 DISPLAY("!!! WARNING !!! %14s : Invalid Checksum : %x != %x \n", displayName, (unsigned)crcOrig, (unsigned)crcCheck);
540 for (u=0; u<srcSize; u++) {
541 if (((const BYTE*)srcBuffer)[u] != ((const BYTE*)resultBuffer)[u]) {
542 unsigned segNb, bNb, pos;
544 DISPLAY("Decoding error at pos %u ", (unsigned)u);
545 for (segNb = 0; segNb < nbBlocks; segNb++) {
546 if (bacc + blockTable[segNb].srcSize > u) break;
547 bacc += blockTable[segNb].srcSize;
549 pos = (U32)(u - bacc);
550 bNb = pos / (128 KB);
551 DISPLAY("(block %u, sub %u, pos %u) \n", segNb, bNb, pos);
554 if (u==srcSize-1) { /* should never happen */
555 DISPLAY("no difference detected\n");
558 } } /* CRC Checking */
560 } /* for (testNb = 1; testNb <= (g_nbIterations + !g_nbIterations); testNb++) */
562 if (g_displayLevel == 1) {
563 double cSpeed = (double)srcSize / (double)fastestC;
564 double dSpeed = (double)srcSize / (double)fastestD;
565 if (g_additionalParam)
566 DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, g_additionalParam);
568 DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName);
570 DISPLAYLEVEL(2, "%2i#\n", cLevel);
575 free(compressedBuffer);
583 static size_t BMK_findMaxMem(U64 requiredMem)
585 size_t const step = 64 MB;
586 BYTE* testmem = NULL;
588 requiredMem = (((requiredMem >> 26) + 1) << 26);
590 if (requiredMem > maxMemory) requiredMem = maxMemory;
593 testmem = (BYTE*)malloc((size_t)requiredMem);
595 } while (!testmem && requiredMem); /* do not allocate zero bytes */
598 return (size_t)(requiredMem+1); /* avoid zero */
601 static void BMK_benchCLevel(void* srcBuffer, size_t benchedSize,
602 const char* displayName, int cLevel, int cLevelLast,
603 const size_t* fileSizes, unsigned nbFiles,
604 const void* dictBuffer, size_t dictBufferSize)
608 const char* pch = strrchr(displayName, '\\'); /* Windows */
609 if (!pch) pch = strrchr(displayName, '/'); /* Linux */
610 if (pch) displayName = pch+1;
612 SET_REALTIME_PRIORITY;
614 if (g_displayLevel == 1 && !g_additionalParam)
615 DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n",
616 ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING,
617 (unsigned)benchedSize, g_nbIterations, (unsigned)(g_blockSize>>10));
619 if (cLevelLast < cLevel) cLevelLast = cLevel;
621 DISPLAY("benchmarking zstd %s (using ZSTD_CStream)\n", ZSTD_VERSION_STRING);
622 for (l=cLevel; l <= cLevelLast; l++) {
623 BMK_benchMem(srcBuffer, benchedSize,
626 dictBuffer, dictBufferSize, BMK_ZSTD_STREAM);
629 DISPLAY("benchmarking zstd %s (using ZSTD_CCtx)\n", ZSTD_VERSION_STRING);
630 for (l=cLevel; l <= cLevelLast; l++) {
631 BMK_benchMem(srcBuffer, benchedSize,
634 dictBuffer, dictBufferSize, BMK_ZSTD);
637 DISPLAY("benchmarking zstd %s (using zlibWrapper)\n", ZSTD_VERSION_STRING);
638 for (l=cLevel; l <= cLevelLast; l++) {
639 BMK_benchMem(srcBuffer, benchedSize,
642 dictBuffer, dictBufferSize, BMK_ZWRAP_ZSTD_REUSE);
645 DISPLAY("benchmarking zstd %s (zlibWrapper not reusing a context)\n", ZSTD_VERSION_STRING);
646 for (l=cLevel; l <= cLevelLast; l++) {
647 BMK_benchMem(srcBuffer, benchedSize,
650 dictBuffer, dictBufferSize, BMK_ZWRAP_ZSTD);
654 if (cLevelLast > Z_BEST_COMPRESSION) cLevelLast = Z_BEST_COMPRESSION;
657 DISPLAY("benchmarking zlib %s\n", ZLIB_VERSION);
658 for (l=cLevel; l <= cLevelLast; l++) {
659 BMK_benchMem(srcBuffer, benchedSize,
662 dictBuffer, dictBufferSize, BMK_ZLIB_REUSE);
665 DISPLAY("benchmarking zlib %s (zlib not reusing a context)\n", ZLIB_VERSION);
666 for (l=cLevel; l <= cLevelLast; l++) {
667 BMK_benchMem(srcBuffer, benchedSize,
670 dictBuffer, dictBufferSize, BMK_ZLIB);
673 DISPLAY("benchmarking zlib %s (using zlibWrapper)\n", ZLIB_VERSION);
674 for (l=cLevel; l <= cLevelLast; l++) {
675 BMK_benchMem(srcBuffer, benchedSize,
678 dictBuffer, dictBufferSize, BMK_ZWRAP_ZLIB_REUSE);
681 DISPLAY("benchmarking zlib %s (zlibWrapper not reusing a context)\n", ZLIB_VERSION);
682 for (l=cLevel; l <= cLevelLast; l++) {
683 BMK_benchMem(srcBuffer, benchedSize,
686 dictBuffer, dictBufferSize, BMK_ZWRAP_ZLIB);
691 /*! BMK_loadFiles() :
692 Loads `buffer` with content of files listed within `fileNamesTable`.
693 At most, fills `buffer` entirely */
694 static void BMK_loadFiles(void* buffer, size_t bufferSize,
696 const char** fileNamesTable, unsigned nbFiles)
698 size_t pos = 0, totalSize = 0;
700 for (n=0; n<nbFiles; n++) {
702 U64 fileSize = UTIL_getFileSize(fileNamesTable[n]);
703 if (UTIL_isDirectory(fileNamesTable[n])) {
704 DISPLAYLEVEL(2, "Ignoring %s directory... \n", fileNamesTable[n]);
708 if (fileSize == UTIL_FILESIZE_UNKNOWN) {
709 DISPLAYLEVEL(2, "Cannot determine size of %s ... \n", fileNamesTable[n]);
713 f = fopen(fileNamesTable[n], "rb");
714 if (f==NULL) EXM_THROW(10, "impossible to open file %s", fileNamesTable[n]);
715 DISPLAYUPDATE(2, "Loading %s... \r", fileNamesTable[n]);
716 if (fileSize > bufferSize-pos) fileSize = bufferSize-pos, nbFiles=n; /* buffer too small - stop after this file */
717 { size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f);
718 if (readSize != (size_t)fileSize) EXM_THROW(11, "could not read %s", fileNamesTable[n]);
720 fileSizes[n] = (size_t)fileSize;
721 totalSize += (size_t)fileSize;
725 if (totalSize == 0) EXM_THROW(12, "no data to bench");
728 static void BMK_benchFileTable(const char** fileNamesTable, unsigned nbFiles,
729 const char* dictFileName, int cLevel, int cLevelLast)
733 void* dictBuffer = NULL;
734 size_t dictBufferSize = 0;
735 size_t* fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t));
736 U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles);
737 char mfName[20] = {0};
739 if (!fileSizes) EXM_THROW(12, "not enough memory for fileSizes");
741 /* Load dictionary */
742 if (dictFileName != NULL) {
743 U64 const dictFileSize = UTIL_getFileSize(dictFileName);
744 if (dictFileSize > 64 MB)
745 EXM_THROW(10, "dictionary file %s too large", dictFileName);
746 dictBufferSize = (size_t)dictFileSize;
747 dictBuffer = malloc(dictBufferSize);
748 if (dictBuffer==NULL)
749 EXM_THROW(11, "not enough memory for dictionary (%u bytes)", (unsigned)dictBufferSize);
750 BMK_loadFiles(dictBuffer, dictBufferSize, fileSizes, &dictFileName, 1);
753 /* Memory allocation & restrictions */
754 benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3;
755 if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad;
756 if (benchedSize < totalSizeToLoad)
757 DISPLAY("Not enough memory; testing %u MB only...\n", (unsigned)(benchedSize >> 20));
758 srcBuffer = malloc(benchedSize + !benchedSize);
759 if (!srcBuffer) EXM_THROW(12, "not enough memory");
761 /* Load input buffer */
762 BMK_loadFiles(srcBuffer, benchedSize, fileSizes, fileNamesTable, nbFiles);
765 snprintf (mfName, sizeof(mfName), " %u files", nbFiles);
766 { const char* displayName = (nbFiles > 1) ? mfName : fileNamesTable[0];
767 BMK_benchCLevel(srcBuffer, benchedSize,
768 displayName, cLevel, cLevelLast,
770 dictBuffer, dictBufferSize);
780 static void BMK_syntheticTest(int cLevel, int cLevelLast, double compressibility)
783 size_t benchedSize = 10000000;
784 void* const srcBuffer = malloc(benchedSize);
786 /* Memory allocation */
787 if (!srcBuffer) EXM_THROW(21, "not enough memory");
789 /* Fill input buffer */
790 RDG_genBuffer(srcBuffer, benchedSize, compressibility, 0.0, 0);
793 snprintf (name, sizeof(name), "Synthetic %2u%%", (unsigned)(compressibility*100));
794 BMK_benchCLevel(srcBuffer, benchedSize, name, cLevel, cLevelLast, &benchedSize, 1, NULL, 0);
801 static int BMK_benchFiles(const char** fileNamesTable, unsigned nbFiles,
802 const char* dictFileName, int cLevel, int cLevelLast)
804 double const compressibility = (double)g_compressibilityDefault / 100;
807 BMK_syntheticTest(cLevel, cLevelLast, compressibility);
809 BMK_benchFileTable(fileNamesTable, nbFiles, dictFileName, cLevel, cLevelLast);
816 /*-************************************
818 **************************************/
819 static int usage(const char* programName)
821 DISPLAY(WELCOME_MESSAGE);
822 DISPLAY( "Usage :\n");
823 DISPLAY( " %s [args] [FILE(s)] [-o file]\n", programName);
825 DISPLAY( "FILE : a filename\n");
826 DISPLAY( " with no FILE, or when FILE is - , read standard input\n");
827 DISPLAY( "Arguments :\n");
828 DISPLAY( " -D file: use `file` as Dictionary \n");
829 DISPLAY( " -h/-H : display help/long help and exit\n");
830 DISPLAY( " -V : display Version number and exit\n");
831 DISPLAY( " -v : verbose mode; specify multiple times to increase log level (default:%d)\n", DEFAULT_DISPLAY_LEVEL);
832 DISPLAY( " -q : suppress warnings; specify twice to suppress errors too\n");
833 #ifdef UTIL_HAS_CREATEFILELIST
834 DISPLAY( " -r : operate recursively on directories\n");
837 DISPLAY( "Benchmark arguments :\n");
838 DISPLAY( " -b# : benchmark file(s), using # compression level (default : %d) \n", ZSTDCLI_CLEVEL_DEFAULT);
839 DISPLAY( " -e# : test all compression levels from -bX to # (default: %d)\n", ZSTDCLI_CLEVEL_DEFAULT);
840 DISPLAY( " -i# : minimum evaluation time in seconds (default : 3s)\n");
841 DISPLAY( " -B# : cut file into independent chunks of size # (default: no chunking)\n");
845 static int badusage(const char* programName)
847 DISPLAYLEVEL(1, "Incorrect parameters\n");
848 if (g_displayLevel >= 1) usage(programName);
852 static void waitEnter(void)
855 DISPLAY("Press enter to continue...\n");
860 /*! readU32FromChar() :
861 @return : unsigned integer value reach from input in `char` format
862 Will also modify `*stringPtr`, advancing it to position where it stopped reading.
863 Note : this function can overflow if digit string > MAX_UINT */
864 static unsigned readU32FromChar(const char** stringPtr)
867 while ((**stringPtr >='0') && (**stringPtr <='9'))
868 result *= 10, result += (unsigned)(**stringPtr - '0'), (*stringPtr)++ ;
873 #define CLEAN_RETURN(i) { operationResult = (i); goto _end; }
875 int main(int argCount, char** argv)
879 nextEntryIsDictionary=0,
881 nextArgumentIsFile=0;
882 int cLevel = ZSTDCLI_CLEVEL_DEFAULT;
884 unsigned recursive = 0;
885 FileNamesTable* filenames = UTIL_allocateFileNamesTable((size_t)argCount);
886 const char* programName = argv[0];
887 const char* dictFileName = NULL;
888 char* dynNameSpace = NULL;
891 if (filenames==NULL) { DISPLAY("zstd: %s \n", strerror(errno)); exit(1); }
894 /* Pick out program name from path. Don't rely on stdlib because of conflicting behavior */
896 for (pos = strlen(programName); pos > 0; pos--) { if (programName[pos] == '/') { pos++; break; } }
900 /* command switches */
901 for(argNb=1; argNb<argCount; argNb++) {
902 const char* argument = argv[argNb];
903 if(!argument) continue; /* Protection if argument empty */
905 if (nextArgumentIsFile==0) {
907 /* long commands (--long-word) */
908 if (!strcmp(argument, "--")) { nextArgumentIsFile=1; continue; }
909 if (!strcmp(argument, "--version")) { displayOut=stdout; DISPLAY(WELCOME_MESSAGE); CLEAN_RETURN(0); }
910 if (!strcmp(argument, "--help")) { displayOut=stdout; CLEAN_RETURN(usage(programName)); }
911 if (!strcmp(argument, "--verbose")) { g_displayLevel++; continue; }
912 if (!strcmp(argument, "--quiet")) { g_displayLevel--; continue; }
914 /* Decode commands (note : aggregated commands are allowed) */
915 if (argument[0]=='-') {
918 while (argument[0]!=0) {
922 case 'V': displayOut=stdout; DISPLAY(WELCOME_MESSAGE); CLEAN_RETURN(0); /* Version Only */
924 case 'h': displayOut=stdout; CLEAN_RETURN(usage(programName));
926 /* Use file content as dictionary */
927 case 'D': nextEntryIsDictionary = 1; argument++; break;
930 case 'v': g_displayLevel++; argument++; break;
933 case 'q': g_displayLevel--; argument++; break;
935 #ifdef UTIL_HAS_CREATEFILELIST
937 case 'r': recursive=1; argument++; break;
942 /* first compression Level */
944 cLevel = (int)readU32FromChar(&argument);
947 /* range bench (benchmark only) */
949 /* last compression Level */
951 cLevelLast = (int)readU32FromChar(&argument);
954 /* Modify Nb Iterations (benchmark only) */
957 { U32 const iters = readU32FromChar(&argument);
958 BMK_setNotificationLevel(g_displayLevel);
959 BMK_SetNbIterations(iters);
963 /* cut input into blocks (benchmark only) */
966 { size_t bSize = readU32FromChar(&argument);
967 if (toupper(*argument)=='K') bSize<<=10, argument++; /* allows using KB notation */
968 if (toupper(*argument)=='M') bSize<<=20, argument++;
969 if (toupper(*argument)=='B') argument++;
970 BMK_setNotificationLevel(g_displayLevel);
971 BMK_SetBlockSize(bSize);
975 /* Pause at the end (-p) or set an additional param (-p#) (hidden option) */
976 case 'p': argument++;
977 if ((*argument>='0') && (*argument<='9')) {
978 BMK_setAdditionalParam((int)readU32FromChar(&argument));
982 /* unknown command */
983 default : CLEAN_RETURN(badusage(programName));
987 } /* if (argument[0]=='-') */
989 } /* if (nextArgumentIsAFile==0) */
991 if (nextEntryIsDictionary) {
992 nextEntryIsDictionary = 0;
993 dictFileName = argument;
997 /* add filename to list */
998 UTIL_refFilename(filenames, argument);
1001 /* Welcome message (if verbose) */
1002 DISPLAYLEVEL(3, WELCOME_MESSAGE);
1004 #ifdef UTIL_HAS_CREATEFILELIST
1006 UTIL_expandFNT(&filenames, 1);
1010 BMK_setNotificationLevel(g_displayLevel);
1011 BMK_benchFiles(filenames->fileNames, (unsigned)filenames->tableSize, dictFileName, cLevel, cLevelLast);
1014 if (main_pause) waitEnter();
1016 UTIL_freeFileNamesTable(filenames);
1017 return operationResult;