diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index ca153d8356a..00000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,168 +0,0 @@ -version: 2 - -jobs: - # the first half of the jobs are in this test - short-tests-0: - # TODO: Create a small custom docker image with all the dependencies we need - # preinstalled to reduce installation time. - docker: - - image: fbopensource/zstd-circleci-primary:0.0.1 - steps: - - checkout - - run: - name: Test - command: | - cc -v; CFLAGS="-O0 -Werror" make all && make clean - make c99build ; make clean - make c11build ; make clean - make aarch64build ; make clean - make -j regressiontest; make clean - make shortest ; make clean - make cxxtest ; make clean - # the second half of the jobs are in this test - short-tests-1: - docker: - - image: fbopensource/zstd-circleci-primary:0.0.1 - steps: - - checkout - - run: - name: Test - command: | - make gnu90build; make clean - make gnu99build; make clean - make ppc64build; make clean - make ppcbuild ; make clean - make armbuild ; make clean - make -C tests test-legacy test-longmatch test-symbols; make clean - make -C lib libzstd-nomt; make clean - # This step is only run on release tags. - # It publishes the source tarball as artifacts and if the GITHUB_TOKEN - # environment variable is set it will publish the source tarball to the - # tagged release. - publish-github-release: - docker: - - image: fbopensource/zstd-circleci-primary:0.0.1 - environment: - CIRCLE_ARTIFACTS: /tmp/circleci-artifacts - steps: - - checkout - - run: - name: Publish - command: | - export VERSION=$(echo $CIRCLE_TAG | tail -c +2) - export ZSTD_VERSION=zstd-$VERSION - git archive $CIRCLE_TAG --prefix $ZSTD_VERSION/ --format tar \ - -o $ZSTD_VERSION.tar - sha256sum $ZSTD_VERSION.tar > $ZSTD_VERSION.tar.sha256 - zstd -19 $ZSTD_VERSION.tar - sha256sum $ZSTD_VERSION.tar.zst > $ZSTD_VERSION.tar.zst.sha256 - gzip -k -9 $ZSTD_VERSION.tar - sha256sum $ZSTD_VERSION.tar.gz > $ZSTD_VERSION.tar.gz.sha256 - mkdir -p $CIRCLE_ARTIFACTS - cp $ZSTD_VERSION.tar* $CIRCLE_ARTIFACTS - - store_artifacts: - path: /tmp/circleci-artifacts - # This step should only be run in a cron job - regression-test: - docker: - - image: fbopensource/zstd-circleci-primary:0.0.1 - environment: - CIRCLE_ARTIFACTS: /tmp/circleci-artifacts - steps: - - checkout - # Restore the cached resources. - - restore_cache: - # We try our best to bust the cache when the data changes by hashing - # data.c. If that doesn't work, simply update the version number here - # and below. If we fail to bust the cache, the regression testing will - # still work, since it has its own stamp, but will need to redownload - # everything. - keys: - - regression-cache-{{ checksum "tests/regression/data.c" }}-v0 - - run: - name: Regression Test - command: | - make -C programs zstd - make -C tests/regression test - mkdir -p $CIRCLE_ARTIFACTS - ./tests/regression/test \ - --cache tests/regression/cache \ - --output $CIRCLE_ARTIFACTS/results.csv \ - --zstd programs/zstd - echo "NOTE: The new results.csv is uploaded as an artifact to this job" - echo " If this fails, go to the Artifacts pane in CircleCI, " - echo " download /tmp/circleci-artifacts/results.csv, and if they " - echo " are still good, copy it into the repo and commit it." - echo "> diff tests/regression/results.csv $CIRCLE_ARTIFACTS/results.csv" - diff tests/regression/results.csv $CIRCLE_ARTIFACTS/results.csv - # Only save the cache on success (default), since if the failure happened - # before we stamp the data cache, we will have a bad cache for this key. - - save_cache: - key: regression-cache-{{ checksum "tests/regression/data.c" }}-v0 - paths: - - tests/regression/cache - - store_artifacts: - path: /tmp/circleci-artifacts - - -workflows: - version: 2 - commit: - jobs: - # Run the tests in parallel - - short-tests-0: - filters: - tags: - only: /.*/ - - short-tests-1: - filters: - tags: - only: /.*/ - # Create a branch called regression and set it to dev to force a - # regression test run - - regression-test: - filters: - branches: - only: - - regression - # Only run on release tags. - - publish-github-release: - requires: - - short-tests-0 - - short-tests-1 - filters: - branches: - ignore: /.*/ - tags: - only: /^v\d+\.\d+\.\d+$/ - nightly: - triggers: - - schedule: - cron: "0 0 * * *" - filters: - branches: - only: - - master - - dev - jobs: - # Run daily long regression tests - - regression-test - - - - # Longer tests - #- make -C tests test-zstd-nolegacy && make clean - #- pyenv global 3.4.4; make -C tests versionsTest && make clean - #- make zlibwrapper && make clean - #- gcc -v; make -C tests test32 MOREFLAGS="-I/usr/include/x86_64-linux-gnu" && make clean - #- make uasan && make clean - #- make asan32 && make clean - #- make -C tests test32 CC=clang MOREFLAGS="-g -fsanitize=address -I/usr/include/x86_64-linux-gnu" - # Valgrind tests - #- CFLAGS="-O1 -g" make -C zlibWrapper valgrindTest && make clean - #- make -C tests valgrindTest && make clean - # ARM, AArch64, PowerPC, PowerPC64 tests - #- make ppctest && make clean - #- make ppc64test && make clean - #- make armtest && make clean - #- make aarch64test && make clean diff --git a/.circleci/images/primary/Dockerfile b/.circleci/images/primary/Dockerfile deleted file mode 100644 index dd800415252..00000000000 --- a/.circleci/images/primary/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM circleci/buildpack-deps:bionic - -RUN sudo dpkg --add-architecture i386 -RUN sudo apt-get -y -qq update -RUN sudo apt-get -y install \ - gcc-multilib-powerpc-linux-gnu gcc-arm-linux-gnueabi \ - libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross \ - libc6-dev-ppc64-powerpc-cross zstd gzip coreutils \ - libcurl4-openssl-dev diff --git a/.cirrus.yml b/.cirrus.yml index 506647a009e..745024bc22f 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -1,16 +1,9 @@ -env: - CIRRUS_CLONE_DEPTH: 1 - ARCH: amd64 - task: + name: FreeBSD (make check) freebsd_instance: matrix: - image: freebsd-12-0-release-amd64 - image: freebsd-11-2-release-amd64 - install_script: - - sed -i.bak -e 's,pkg+http://pkg.FreeBSD.org/\${ABI}/quarterly,pkg+http://pkg.FreeBSD.org/\${ABI}/latest,' /etc/pkg/FreeBSD.conf - - pkg upgrade -y - - pkg install -y gmake coreutils + image_family: freebsd-14-2 + install_script: pkg install -y gmake coreutils script: | MOREFLAGS="-Werror" gmake -j all - gmake shortest + gmake check diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000000..755a46af145 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,35 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Downloads data '...' +2. Run '...' with flags '...' +3. Scroll up on the log to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots and charts** +If applicable, add screenshots and charts to help explain your problem. + +**Desktop (please complete the following information):** + - OS: [e.g. Mac] + - Version [e.g. 22] + - Compiler [e.g. gcc] + - Flags [e.g. O2] + - Other relevant hardware specs [e.g. Dual-core] + - Build system [e.g. Makefile] + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000000..bbcbbe7d615 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000000..5ace4600a1f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,6 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" diff --git a/.github/workflows/android-ndk-build.yml b/.github/workflows/android-ndk-build.yml new file mode 100644 index 00000000000..99183049c01 --- /dev/null +++ b/.github/workflows/android-ndk-build.yml @@ -0,0 +1,50 @@ +name: Android NDK Build + +on: + pull_request: + branches: [ dev, release, actionsTest ] + push: + branches: [ actionsTest, '*ndk*' ] + +permissions: read-all + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + + - name: Set up JDK 17 + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 + with: + java-version: '17' + distribution: 'temurin' + + - name: Setup Android SDK + uses: android-actions/setup-android@9fc6c4e9069bf8d3d10b2204b1fb8f6ef7065407 # v3.2.2 + + - name: Install Android NDK + run: | + sdkmanager --install "ndk;27.0.12077973" + echo "ANDROID_NDK_HOME=$ANDROID_SDK_ROOT/ndk/27.0.12077973" >> $GITHUB_ENV + + - name: Build with NDK + run: | + export PATH=$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH + make CC=aarch64-linux-android21-clang \ + AR=llvm-ar \ + RANLIB=llvm-ranlib \ + STRIP=llvm-strip + + - name: Build with CMake and NDK + run: | + mkdir -p build-android + cd build-android + cmake --version + cmake ../build/cmake \ + -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK_HOME/build/cmake/android.toolchain.cmake \ + -DANDROID_ABI=arm64-v8a \ + -DANDROID_PLATFORM=android-21 \ + -DCMAKE_BUILD_TYPE=Release + cmake --build . --parallel diff --git a/.github/workflows/cmake-tests.yml b/.github/workflows/cmake-tests.yml new file mode 100644 index 00000000000..5534cdf89c3 --- /dev/null +++ b/.github/workflows/cmake-tests.yml @@ -0,0 +1,168 @@ +name: cmake-tests +# CMake-specific build and test workflows +# This workflow validates zstd builds across different CMake configurations, +# platforms, and edge cases to ensure broad compatibility. + +concurrency: + group: cmake-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + branches: [ dev, release, actionsTest ] + +permissions: read-all + +env: + # Centralized test timeouts for consistency + QUICK_TEST_TIME: "30s" + STANDARD_TEST_TIME: "1mn" + # Common CMake flags + COMMON_CMAKE_FLAGS: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + +jobs: + # Basic cmake build using the root CMakeLists.txt + # Provides a lightweight sanity check that the top-level project config builds + # with the default Unix Makefiles generator driven purely through cmake commands + cmake-root-basic: + name: "CMake Root Build" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Configure (Root) + run: | + cmake -S . -B cmake-build -DCMAKE_BUILD_TYPE=Release ${{ env.COMMON_CMAKE_FLAGS }} + - name: Build (Root) + run: | + cmake --build cmake-build --config Release + + # Ubuntu-based cmake build using make wrapper + # This test uses the make-driven cmake build to ensure compatibility + # with the existing build system integration + cmake-ubuntu-basic: + name: "CMake build using make wrapper" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Install dependencies + run: | + sudo apt install liblzma-dev # Required for compression algorithms + - name: CMake build and test via make + run: | + # Use make wrapper for cmake build with quick test timeouts + FUZZERTEST=-T${{ env.STANDARD_TEST_TIME }} ZSTREAM_TESTTIME=-T${{ env.STANDARD_TEST_TIME }} make cmakebuild V=1 + + # Cross-platform cmake build with edge case: source paths containing spaces + # This test ensures cmake handles filesystem paths with spaces correctly + # across different operating systems and build generators + cmake-cross-platform-spaces: + name: "CMake Cross-Platform (Spaces in Path)" + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: ubuntu-latest + generator: "Unix Makefiles" + name: "Linux" + - os: windows-latest + generator: "NMake Makefiles" + name: "Windows NMake" + - os: macos-latest + generator: "Unix Makefiles" + name: "macOS" + env: + SRC_DIR: "source directory with spaces" + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + with: + path: "${{ env.SRC_DIR }}" + - uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756 # v1.13.0 + if: ${{ matrix.generator == 'NMake Makefiles' }} + - name: "CMake build and install (${{ matrix.name }})" + run: | + # Test Release build with installation to verify packaging + cmake -S "${{ env.SRC_DIR }}/build/cmake" -B build -DBUILD_TESTING=ON -G "${{ matrix.generator }}" -DCMAKE_BUILD_TYPE=Release --install-prefix "${{ runner.temp }}/install" + cmake --build build --config Release + cmake --install build --config Release + + # Windows-specific cmake testing with Visual Studio 2022 + # Tests multiple generators and toolchains to ensure broad Windows compatibility + # including MSVC (x64, Win32, ARM64), MinGW, and Clang-CL with various architectures and optimizations + cmake-windows-comprehensive: + name: "CMake Windows VS2022 (${{ matrix.name }})" + runs-on: ${{ matrix.runner }} + strategy: + fail-fast: false + matrix: + include: + - generator: "Visual Studio 17 2022" + flags: "-A x64" + name: "MSVC x64" + runner: "windows-2022" + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + - generator: "Visual Studio 17 2022" + flags: "-A Win32" + name: "MSVC Win32" + runner: "windows-2022" + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + - generator: "Visual Studio 17 2022" + flags: "-A x64" + name: "MSVC x64 (No ZSTD_BUILD_TESTS)" + runner: "windows-2022" + # Intentionally omit ZSTD_BUILD_TESTS to reproduce the CXX language configuration bug + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON" + - generator: "Visual Studio 17 2022" + flags: "-A ARM64" + name: "MSVC ARM64" + runner: "windows-11-arm" # githuh runner for WOA instance + - generator: "MinGW Makefiles" + flags: "" + name: "MinGW" + runner: "windows-2022" + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + - generator: "Visual Studio 17 2022" + flags: "-T ClangCL" + name: "Clang-CL" + runner: "windows-2022" + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + - generator: "Visual Studio 17 2022" + flags: "-T ClangCL -A x64 -DCMAKE_C_FLAGS=/arch:AVX2" + name: "Clang-CL AVX2" + runner: "windows-2022" + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Add MSBuild to PATH + uses: microsoft/setup-msbuild@6fb02220983dee41ce7ae257b6f4d8f9bf5ed4ce # tag=v2.0.0 + - name: "Configure CMake (${{ matrix.name }})" + run: | + cd build\cmake + mkdir build + cd build + cmake.exe -G "${{matrix.generator}}" ${{matrix.flags}} -DCMAKE_BUILD_TYPE=Debug ${{ matrix.cmake_extra_flags }} -DZSTD_ZSTREAM_FLAGS=-T${{ env.QUICK_TEST_TIME }} -DZSTD_FUZZER_FLAGS=-T${{ env.QUICK_TEST_TIME }} -DZSTD_FULLBENCH_FLAGS=-i0 .. + - name: "Build (${{ matrix.name }})" + run: | + cd build\cmake\build + cmake.exe --build . + - name: "Test (${{ matrix.name }})" + run: | + cd build\cmake\build + ctest.exe -V -C Debug + + # macOS ARM64 (Apple Silicon) specific cmake testing + # Validates zstd builds and runs correctly on Apple Silicon architecture + # Uses native ARM64 hardware for optimal performance and compatibility testing + cmake-macos-arm64: + name: "CMake macOS ARM64 (Apple Silicon)" + runs-on: macos-14 # ARM64 runner + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: "CMake build and test (ARM64)" + run: | + # Configure and build with ARM64-specific optimizations + cd build/cmake + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release ${{ env.COMMON_CMAKE_FLAGS }} -DZSTD_ZSTREAM_FLAGS=-T${{ env.QUICK_TEST_TIME }} -DZSTD_FUZZER_FLAGS=-T${{ env.QUICK_TEST_TIME }} -DZSTD_FULLBENCH_FLAGS=-i1 .. + make -j$(sysctl -n hw.ncpu) + ctest -V diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml new file mode 100644 index 00000000000..872e3b73483 --- /dev/null +++ b/.github/workflows/commit.yml @@ -0,0 +1,104 @@ +name: facebook/zstd/commit +on: + push: + branches: + - dev + pull_request: + branches: + - dev + +permissions: read-all + +jobs: + short-tests-0: + runs-on: ubuntu-latest + services: + docker: + image: fbopensource/zstd-circleci-primary:0.0.1 + options: --entrypoint /bin/bash + steps: + - uses: actions/checkout@v5 + - name: Install Dependencies + run: | + sudo apt-get update + sudo apt-get install libcurl4-gnutls-dev + - name: Test + run: | + ./tests/test-license.py + cc -v + CFLAGS="-O0 -Werror -pedantic" make allmost; make clean + make c99build; make clean + make c11build; make clean + make -j regressiontest; make clean + make check; make clean + make cxxtest; make clean + + short-tests-1: + runs-on: ubuntu-latest + services: + docker: + image: fbopensource/zstd-circleci-primary:0.0.1 + options: --entrypoint /bin/bash + steps: + - uses: actions/checkout@v5 + - name: Install Dependencies + run: | + sudo apt-get update + sudo apt-get install gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi gcc-aarch64-linux-gnu libc6-dev-ppc64-powerpc-cross libcurl4-gnutls-dev lib64gcc-13-dev-powerpc-cross + - name: gnu90 build + run: make gnu90build && make clean + - name: gnu99 build + run: make gnu99build && make clean + - name: ppc64 build + run: make ppc64build V=1 && make clean + - name: ppc build + run: make ppcbuild V=1 && make clean + - name: arm build + run: make armbuild V=1 && make clean + - name: aarch64 build + run: make aarch64build V=1 && make clean + - name: test-legacy + run: make -C tests test-legacy V=1 && make clean + - name: test-longmatch + run: make -C tests test-longmatch V=1 && make clean + - name: libzstd-nomt build + run: make -C lib libzstd-nomt V=1 && make clean + + regression-test: + runs-on: ubuntu-latest + services: + docker: + image: fbopensource/zstd-circleci-primary:0.0.1 + options: --entrypoint /bin/bash + env: + CIRCLE_ARTIFACTS: "/tmp/circleci-artifacts" + steps: + - uses: actions/checkout@v5 + - name: restore_cache + uses: actions/cache@v4 + with: + key: regression-cache-{{ checksum "tests/regression/data.c" }}-v0 + path: tests/regression/cache + restore-keys: regression-cache-{{ checksum "tests/regression/data.c" }}-v0 + - name: Install Dependencies + run: | + sudo apt-get update + sudo apt-get install libcurl4-gnutls-dev + - name: Regression Test + run: | + make -C programs zstd + make -C tests/regression test + mkdir -p $CIRCLE_ARTIFACTS + ./tests/regression/test \ + --cache tests/regression/cache \ + --output $CIRCLE_ARTIFACTS/results.csv \ + --zstd programs/zstd + echo "NOTE: The new results.csv is uploaded as an artifact to this job" + echo " If this fails, go to the Artifacts pane in CircleCI, " + echo " download /tmp/circleci-artifacts/results.csv, and if they " + echo " are still good, copy it into the repo and commit it." + echo "> diff tests/regression/results.csv $CIRCLE_ARTIFACTS/results.csv" + diff tests/regression/results.csv $CIRCLE_ARTIFACTS/results.csv + - uses: actions/upload-artifact@v5 + with: + path: "/tmp/circleci-artifacts" diff --git a/.github/workflows/dev-long-tests.yml b/.github/workflows/dev-long-tests.yml new file mode 100644 index 00000000000..4b2d2b3f18f --- /dev/null +++ b/.github/workflows/dev-long-tests.yml @@ -0,0 +1,324 @@ +name: dev-long-tests +# Tests generally longer than 10mn + +concurrency: + group: long-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + branches: [ dev, release, actionsTest ] + +permissions: read-all + +jobs: + # lasts ~7mn + make-all: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: make all + run: make all + + # lasts ~19mn + make-test: + runs-on: ubuntu-latest + env: + DEVNULLRIGHTS: 1 + READFROMBLOCKDEVICE: 1 + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: make test + run: | + make test + make -j zstd + ./tests/test_process_substitution.bash ./zstd + + # lasts ~16mn + make-test-macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: make test on macos + run: make test + + # lasts ~10mn + make-test-32bit: + runs-on: ubuntu-latest + env: + DEVNULLRIGHTS: 1 + READFROMBLOCKDEVICE: 1 + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: make test # note: `make -j test success` seems to require a clean state + run: | + sudo apt-get -qqq update + make libc6install + make clean + CFLAGS="-m32 -O2" make -j test V=1 + + # lasts ~7mn + test-largeDictionary: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: largeDictionary + run: | + CFLAGS="-Werror -O3" make -j -C tests test-largeDictionary + + # lasts ~9mn + no-intrinsics-fuzztest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: no intrinsics fuzztest + run: MOREFLAGS="-DZSTD_NO_INTRINSICS" make -C tests fuzztest + + # lasts ~8mn + tsan-zstreamtest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: thread sanitizer zstreamtest + run: CC=clang ZSTREAM_TESTTIME=-T3mn make tsan-test-zstream + + uasan-zstreamtest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: ub + address sanitizer on zstreamtest + run: CC=clang make uasan-test-zstream + + # lasts ~11mn + tsan-fuzztest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: thread sanitizer fuzztest + run: CC=clang make tsan-fuzztest + + big-tests-zstreamtest32: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: zstream tests in 32bit mode, with big tests + run: | + sudo apt-get -qqq update + make libc6install + CC=clang make -C tests test-zstream32 FUZZER_FLAGS="--big-tests" + + # lasts ~13mn + gcc-8-asan-ubsan-testzstd: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: gcc-8 + ASan + UBSan + Test Zstd + # See https://askubuntu.com/a/1428822 + run: | + echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu focal main universe" | sudo tee -a /etc/apt/sources.list + sudo apt-get -qqq update + make gcc8install + CC=gcc-8 make -j uasan-test-zstd cross.ini < + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=${{matrix.toolset}} + /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} /warnaserror + - name: Build ${{matrix.name}} + working-directory: ${{env.GITHUB_WORKSPACE}} + # See https://docs.microsoft.com/visualstudio/msbuild/msbuild-command-line-reference + if: ${{ matrix.arch != '' }} + run: > + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=${{matrix.toolset}} + /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} /warnaserror + /p:InstructionSet=${{matrix.arch}} + + # This tests that we don't accidentally grow the size too much. + # If the size grows intentionally, you can raise these numbers. + # But we do need to think about binary size, since it is a concern. + libzstd-size: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: libzstd size test + run: | + make clean && make -j -C lib libzstd && ./tests/check_size.py lib/libzstd.so 1100000 + make clean && make -j -C lib libzstd ZSTD_LIB_COMPRESSION=0 ZSTD_LIB_DICTBUILDER=0 && ./tests/check_size.py lib/libzstd.so 400000 + make clean && make -j -C lib libzstd ZSTD_LIB_MINIFY=1 && ./tests/check_size.py lib/libzstd.so 300000 + make clean && make -j -C lib libzstd ZSTD_LIB_MINIFY=1 ZSTD_LIB_COMPRESSION=0 ZSTD_LIB_DICTBUILDER=0 && ./tests/check_size.py lib/libzstd.so 80000 + + minimal-decompressor-macros: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: minimal decompressor macros + run: | + make clean && make -j all ZSTD_LIB_MINIFY=1 MOREFLAGS="-Werror" + make clean && make check ZSTD_LIB_MINIFY=1 MOREFLAGS="-Werror" + make clean && make -j all MOREFLAGS="-Werror -DHUF_FORCE_DECOMPRESS_X1 -DZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT" + make clean && make check MOREFLAGS="-Werror -DHUF_FORCE_DECOMPRESS_X1 -DZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT" + make clean && make -j all MOREFLAGS="-Werror -DHUF_FORCE_DECOMPRESS_X2 -DZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG" + make clean && make check MOREFLAGS="-Werror -DHUF_FORCE_DECOMPRESS_X2 -DZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG" + make clean && make -j all MOREFLAGS="-Werror -DZSTD_NO_INLINE -DZSTD_STRIP_ERROR_STRINGS" + make clean && make check MOREFLAGS="-Werror -DZSTD_NO_INLINE -DZSTD_STRIP_ERROR_STRINGS" + make clean && make check ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP=1 MOREFLAGS="-Werror" + make clean && make check ZSTD_LIB_EXCLUDE_COMPRESSORS_GREEDY_AND_UP=1 MOREFLAGS="-Werror" + + dynamic-bmi2: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: dynamic bmi2 tests + run: | + make clean && make -j check MOREFLAGS="-O0 -Werror -mbmi2" + make clean && make -j check MOREFLAGS="-O0 -Werror -DDYNAMIC_BMI2=1" + make clean && make -j check MOREFLAGS="-O0 -Werror -DDYNAMIC_BMI2=1 -mbmi2" + make clean && make -j check MOREFLAGS="-O0 -Werror -DDYNAMIC_BMI2=0" + make clean && make -j check MOREFLAGS="-O0 -Werror -DDYNAMIC_BMI2=0 -mbmi2" + + test-variants: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: make all variants & validate + run: | + make -j -C programs allVariants MOREFLAGS=-O0 + ./tests/test-variants.sh + + qemu-consistency: + name: QEMU ${{ matrix.name }} + runs-on: ubuntu-24.04 + strategy: + fail-fast: false # 'false' means Don't stop matrix workflows even if some matrix failed. + matrix: + include: [ + { name: ARM, xcc_pkg: gcc-arm-linux-gnueabi, xcc: arm-linux-gnueabi-gcc, xemu_pkg: qemu-system-arm, xemu: qemu-arm-static }, + { name: ARM64, xcc_pkg: gcc-aarch64-linux-gnu, xcc: aarch64-linux-gnu-gcc, xemu_pkg: qemu-system-aarch64,xemu: qemu-aarch64-static }, + { name: PPC, xcc_pkg: gcc-powerpc-linux-gnu, xcc: powerpc-linux-gnu-gcc, xemu_pkg: qemu-system-ppc, xemu: qemu-ppc-static }, + { name: PPC64LE, xcc_pkg: gcc-powerpc64le-linux-gnu, xcc: powerpc64le-linux-gnu-gcc, xemu_pkg: qemu-system-ppc, xemu: qemu-ppc64le-static }, + { name: S390X, xcc_pkg: gcc-s390x-linux-gnu, xcc: s390x-linux-gnu-gcc, xemu_pkg: qemu-system-s390x, xemu: qemu-s390x-static }, + { name: MIPS, xcc_pkg: gcc-mips-linux-gnu, xcc: mips-linux-gnu-gcc, xemu_pkg: qemu-system-mips, xemu: qemu-mips-static }, + { name: RISC-V, xcc_pkg: gcc-14-riscv64-linux-gnu, xcc: riscv64-linux-gnu-gcc-14, xemu_pkg: qemu-system-riscv64,xemu: qemu-riscv64-static }, + { name: M68K, xcc_pkg: gcc-m68k-linux-gnu, xcc: m68k-linux-gnu-gcc, xemu_pkg: qemu-system-m68k, xemu: qemu-m68k-static }, + { name: SPARC, xcc_pkg: gcc-sparc64-linux-gnu, xcc: sparc64-linux-gnu-gcc, xemu_pkg: qemu-system-sparc, xemu: qemu-sparc64-static }, + ] + env: # Set environment variables + XCC: ${{ matrix.xcc }} + XEMU: ${{ matrix.xemu }} + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: apt update & install + run: | + sudo apt-get update + sudo apt-get install gcc-multilib g++-multilib qemu-utils qemu-user-static + sudo apt-get install ${{ matrix.xcc_pkg }} ${{ matrix.xemu_pkg }} + - name: Environment info + run: | + echo && which $XCC + echo && $XCC --version + echo && $XCC -v # Show built-in specs + echo && which $XEMU + echo && $XEMU --version + - name: ARM + if: ${{ matrix.name == 'ARM' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: ARM64 + if: ${{ matrix.name == 'ARM64' }} + run: | + make clean + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make -j check + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make -j -C tests test-cli-tests + CFLAGS="-O3 -march=armv8.2-a+sve2" LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make -j check + CFLAGS="-O3 -march=armv8.2-a+sve2" LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make -j -C tests test-cli-tests +# This test is only compatible with standard libraries that support BTI (Branch Target Identification). +# Unfortunately, the standard library provided on Ubuntu 24.04 does not have this feature enabled. +# make clean +# LDFLAGS="-static -z force-bti" MOREFLAGS="-mbranch-protection=standard" CC=$XCC QEMU_SYS=$XEMU make check V=1 + - name: PPC + if: ${{ matrix.name == 'PPC' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: PPC64LE + if: ${{ matrix.name == 'PPC64LE' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: S390X + if: ${{ matrix.name == 'S390X' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: MIPS + if: ${{ matrix.name == 'MIPS' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: RISC-V + if: ${{ matrix.name == 'RISC-V' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + CFLAGS="-march=rv64gcv -O3" LDFLAGS="-static -DMEM_FORCE_MEMORY_ACCESS=0" CC=$XCC QEMU_SYS="$XEMU -cpu rv64,v=true,vlen=128" make clean check + CFLAGS="-march=rv64gcv -O3" LDFLAGS="-static -DMEM_FORCE_MEMORY_ACCESS=0" CC=$XCC QEMU_SYS="$XEMU -cpu rv64,v=true,vlen=256" make clean check + CFLAGS="-march=rv64gcv -O3" LDFLAGS="-static -DMEM_FORCE_MEMORY_ACCESS=0" CC=$XCC QEMU_SYS="$XEMU -cpu rv64,v=true,vlen=512" make clean check + - name: M68K + if: ${{ matrix.name == 'M68K' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: SPARC + if: ${{ matrix.name == 'SPARC' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + + mingw-short-test: + runs-on: windows-latest + strategy: + fail-fast: false # 'false' means Don't stop matrix workflows even if some matrix failed. + matrix: + include: [ + { compiler: gcc, msystem: MINGW32, cflags: "-Werror"}, + { compiler: gcc, msystem: MINGW64, cflags: "-Werror"}, + { compiler: clang, msystem: MINGW64, cflags: "--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion -Wno-unused-command-line-argument"}, + ] + defaults: + run: + shell: msys2 {0} + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - uses: msys2/setup-msys2@fb197b72ce45fb24f17bf3f807a388985654d1f2 # tag=v2.29.0 + with: + msystem: ${{ matrix.msystem }} + install: make diffutils + update: true + # Based on https://ariya.io/2020/07/on-github-actions-with-msys2 + - name: install mingw gcc i686 + if: ${{ (matrix.msystem == 'MINGW32') && (matrix.compiler == 'gcc') }} + run: pacman --noconfirm -S mingw-w64-i686-gcc + - name: install mingw gcc x86_64 + if: ${{ (matrix.msystem == 'MINGW64') && (matrix.compiler == 'gcc') }} + run: pacman --noconfirm -S mingw-w64-x86_64-gcc + - name: install mingw clang i686 + if: ${{ (matrix.msystem == 'MINGW32') && (matrix.compiler == 'clang') }} + run: pacman --noconfirm -S mingw-w64-i686-clang + - name: install mingw clang x86_64 + if: ${{ (matrix.msystem == 'MINGW64') && (matrix.compiler == 'clang') }} + run: pacman --noconfirm -S mingw-w64-x86_64-clang + - name: run mingw tests + run: | + make -v + export CC=${{ matrix.compiler }} + $CC --version + CFLAGS="${{ matrix.cflags }}" make -j allzstd + echo "Testing $CC ${{ matrix.msystem }}" + make clean + MSYS="" make check + + visual-runtime-tests: + runs-on: windows-latest + strategy: + matrix: + platform: [x64, Win32] + configuration: [Release] + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Add MSBuild to PATH + uses: microsoft/setup-msbuild@6fb02220983dee41ce7ae257b6f4d8f9bf5ed4ce # tag=v2.0.0 + - name: Build and run tests + working-directory: ${{env.GITHUB_WORKSPACE}} + env: + ZSTD_BIN: ./zstd.exe + DATAGEN_BIN: ./datagen.exe + # See https://docs.microsoft.com/visualstudio/msbuild/msbuild-command-line-reference + run: | + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v142 /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} + COPY build\VS2010\bin\${{matrix.platform}}_${{matrix.configuration}}\*.exe tests\ + CD tests + sh -e playTests.sh + .\fuzzer.exe -T2m + + # Following instructions at: https://github.com/marketplace/actions/install-cygwin-action + cygwin-tests: + runs-on: windows-latest + steps: + - run: git config --global core.autocrlf input + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - uses: cygwin/cygwin-install-action@f2009323764960f80959895c7bc3bb30210afe4d # tag=v6 + with: + platform: x86_64 + packages: >- + autoconf, + automake, + gcc-g++, + make, + mingw64-x86_64-gcc-g++, + patch, + perl + - name: cygwin tests + shell: C:\cygwin\bin\bash.exe --noprofile --norc -eo pipefail '{0}' + run: >- + export PATH=/usr/bin:$(cygpath ${SYSTEMROOT})/system32 && + export CFLAGS="-Werror -O1" && + ls && + make -j allzstd && + make -C tests fuzzer && + ./tests/fuzzer.exe -v -T1m + - name: cygwin install test + shell: C:\cygwin\bin\bash.exe --noprofile --norc -eo pipefail '{0}' + run: >- + make -j && + make install + + pkg-config: + runs-on: ubuntu-latest + container: + image: debian:testing + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Install dependencies + run: | + apt -y update + apt -y install --no-install-recommends gcc libc6-dev make pkg-config + - name: Build and install + run: make -C lib install + - name: Test pkg-config + run: | + cc -Wall -Wextra -Wpedantic -Werror -o simple examples/simple_compression.c $(pkg-config --cflags --libs libzstd) + ./simple LICENSE + + versions-compatibility: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Versions Compatibility Test + run: | + make -C tests versionsTest + + clangbuild: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: make clangbuild + run: | + make clangbuild + + gcc-pgo: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Build PGO Zstd with GCC + env: + CC: gcc + run: | + make -C programs zstd-pgo + ./programs/zstd -b + + clang-pgo: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Build PGO Zstd with Clang + env: + CC: clang + run: | + sudo apt install -y llvm + llvm-profdata --version + make -C programs zstd-pgo + ./programs/zstd -b + + musl-build: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Install musl-tools + run: | + sudo apt install -y musl-tools + - name: Compile with musl-gcc and test-zstd + run: | + CC=musl-gcc CFLAGS="-Werror -O3" CPPFLAGS=-DZDICT_QSORT=ZDICT_QSORT_C90 make -j -C tests test-zstd V=1 + + intel-cet-compatibility: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Build Zstd + run: | + make -j zstd V=1 + readelf -n zstd + - name: Get Intel SDE + run: | + curl -LO https://downloadmirror.intel.com/813591/sde-external-9.33.0-2024-01-07-lin.tar.xz + tar xJvf sde-external-9.33.0-2024-01-07-lin.tar.xz + - name: Configure Permissions + run: | + echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope + - name: Run Under SDE + run: | + sde-external-9.33.0-2024-01-07-lin/sde -cet -cet-raise 0 -cet-endbr-exe -cet-stderr -cet-abort -- ./zstd -b3 + + icx: + # install instructions: https://www.intel.com/content/www/us/en/docs/oneapi/installation-guide-linux/2025-0/apt-005.html + name: icx-check + runs-on: ubuntu-latest + steps: + - name: install icx + run: | + # download the key to system keyring + wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ + | gpg --dearmor | sudo tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null + + # add signed entry to apt sources and configure the APT client to use Intel repository: + echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list + sudo apt-get update + sudo apt-get install -y intel-basekit intel-hpckit + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: make check + run: | + source /opt/intel/oneapi/setvars.sh + make CC=icx check + make CC=icx -C tests test-cli-tests diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml new file mode 100644 index 00000000000..e85546770f7 --- /dev/null +++ b/.github/workflows/nightly.yml @@ -0,0 +1,38 @@ +name: facebook/zstd/nightly +on: + schedule: + - cron: '0 0 * * *' + push: + branches: + - release + - dev + - '*nightly*' +permissions: read-all +jobs: + regression-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y libcurl4-openssl-dev + - name: Regression Test + run: | + make -C programs zstd + make -C tests/regression test + +# Longer tests + #- make -C tests test-zstd-nolegacy && make clean + #- pyenv global 3.4.4; make -C tests versionsTest && make clean + #- make zlibwrapper && make clean + #- gcc -v; make -C tests test32 MOREFLAGS="-I/usr/include/x86_64-linux-gnu" && make clean + #- make uasan && make clean + #- make asan32 && make clean + #- make -C tests test32 CC=clang MOREFLAGS="-g -fsanitize=address -I/usr/include/x86_64-linux-gnu" +# Valgrind tests + #- CFLAGS="-O1 -g" make -C zlibWrapper valgrindTest && make clean + #- make -C tests valgrindTest && make clean +# ARM, AArch64, PowerPC, PowerPC64 tests + #- make ppctest && make clean + #- make ppc64test && make clean + #- make armtest && make clean + #- make aarch64test && make clean diff --git a/.github/workflows/publish-release-artifacts.yml b/.github/workflows/publish-release-artifacts.yml new file mode 100644 index 00000000000..ee55cd8aa7e --- /dev/null +++ b/.github/workflows/publish-release-artifacts.yml @@ -0,0 +1,73 @@ +name: publish-release-artifacts + +on: + release: + types: + - published + +permissions: read-all + +jobs: + publish-release-artifacts: + permissions: + contents: write # to fetch code and upload artifacts + + runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/tags/') + + steps: + - name: Checkout + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + + - name: Archive + env: + RELEASE_SIGNING_KEY: ${{ secrets.RELEASE_SIGNING_KEY }} + RELEASE_SIGNING_KEY_PASSPHRASE: ${{ secrets.RELEASE_SIGNING_KEY_PASSPHRASE }} + run: | + # compute file name + export TAG="$(echo "$GITHUB_REF" | sed -n 's_^refs/tags/__p')" + if [ -z "$TAG" ]; then + echo "action must be run on a tag. GITHUB_REF is not a tag: $GITHUB_REF" + exit 1 + fi + # Attempt to extract "1.2.3" from "v1.2.3" to maintain artifact name backwards compat. + # Otherwise, degrade to using full tag. + export VERSION="$(echo "$TAG" | sed 's_^v\([0-9]\+\.[0-9]\+\.[0-9]\+\)$_\1_')" + export ZSTD_VERSION="zstd-$VERSION" + + # archive + git archive $TAG \ + --prefix $ZSTD_VERSION/ \ + --format tar \ + -o $ZSTD_VERSION.tar + + # Do the rest of the work in a sub-dir so we can glob everything we want to publish. + mkdir artifacts/ + mv $ZSTD_VERSION.tar artifacts/ + cd artifacts/ + + # compress + zstd -k -19 $ZSTD_VERSION.tar + gzip -k -9 $ZSTD_VERSION.tar + + # we only publish the compressed tarballs + rm $ZSTD_VERSION.tar + + # hash + sha256sum $ZSTD_VERSION.tar.zst > $ZSTD_VERSION.tar.zst.sha256 + sha256sum $ZSTD_VERSION.tar.gz > $ZSTD_VERSION.tar.gz.sha256 + + # sign + if [ -n "$RELEASE_SIGNING_KEY" ]; then + export GPG_BATCH_OPTS="--batch --no-use-agent --pinentry-mode loopback --no-tty --yes" + echo "$RELEASE_SIGNING_KEY" | gpg $GPG_BATCH_OPTS --import + gpg $GPG_BATCH_OPTS --armor --sign --sign-with signing@zstd.net --detach-sig --passphrase "$RELEASE_SIGNING_KEY_PASSPHRASE" --output $ZSTD_VERSION.tar.zst.sig $ZSTD_VERSION.tar.zst + gpg $GPG_BATCH_OPTS --armor --sign --sign-with signing@zstd.net --detach-sig --passphrase "$RELEASE_SIGNING_KEY_PASSPHRASE" --output $ZSTD_VERSION.tar.gz.sig $ZSTD_VERSION.tar.gz + fi + + - name: Publish + uses: skx/github-action-publish-binaries@b9ca5643b2f1d7371a6cba7f35333f1461bbc703 # tag=release-2.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + args: artifacts/* diff --git a/.github/workflows/release_check.yml b/.github/workflows/release_check.yml new file mode 100644 index 00000000000..23d5da0a7ee --- /dev/null +++ b/.github/workflows/release_check.yml @@ -0,0 +1,64 @@ +name: release_checks + +on: + push: + branches: + - release + pull_request: + branches: + - release + +permissions: read-all + +jobs: + verify-manual: + runs-on: ubuntu-latest + steps: + - name: Check out repository + uses: actions/checkout@v5 + + - name: Save current manual + run: mv doc/zstd_manual.html doc/zstd_manual_saved.html + + - name: Generate new manual + run: make manual + + - name: Compare manuals + run: | + if ! cmp -s doc/zstd_manual.html doc/zstd_manual_saved.html; then + echo "The API manual was not updated before release !" + exit 1 + fi + + verify-man-pages: + runs-on: ubuntu-latest + steps: + - name: Check out repository + uses: actions/checkout@v5 + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y ruby ruby-dev + sudo gem install ronn + + - name: Display ronn version + run: ronn --version + + - name: Save current man pages + run: | + mv programs/zstd.1 programs/zstd.1.saved + mv programs/zstdgrep.1 programs/zstdgrep.1.saved + mv programs/zstdless.1 programs/zstdless.1.saved + + - name: Generate new manual pages + run: make -C programs man + + - name: Compare man pages + run: | + for file in zstd.1 zstdgrep.1 zstdless.1; do + if ! cmp -s programs/$file programs/$file.saved; then + echo "The man page $file should have been updated." + exit 1 + fi + done diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml new file mode 100644 index 00000000000..5fd3b48705f --- /dev/null +++ b/.github/workflows/scorecards.yml @@ -0,0 +1,64 @@ +name: Scorecards supply-chain security +on: + # Only the default branch is supported. + branch_protection_rule: + schedule: + - cron: '22 21 * * 2' + push: + # TODO: Add release branch when supported? + branches: [ "dev" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecards analysis + if: github.repository == 'facebook/zstd' + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Used to receive a badge. + id-token: write + # Needs for private repositories. + contents: read + actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # tag=v2.4.1 + with: + results_file: results.sarif + results_format: sarif + # (Optional) Read-only PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecards on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_READ_TOKEN }} + + # Publish the results for public repositories to enable scorecard badges. For more details, see + # https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories, `publish_results` will automatically be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # tag=v5.0.0 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # tag=v4.31.2 + with: + sarif_file: results.sarif diff --git a/.github/workflows/windows-artifacts.yml b/.github/workflows/windows-artifacts.yml new file mode 100644 index 00000000000..249ef28b280 --- /dev/null +++ b/.github/workflows/windows-artifacts.yml @@ -0,0 +1,119 @@ +name: windows-artifacts + +on: + push: + branches: [ test_artifacts, win_artifacts, release ] + release: + types: + - published + +permissions: read-all + +jobs: + windows-artifacts: + permissions: + contents: write # to fetch code and upload artifacts + # For msys2, see https://ariya.io/2020/07/on-github-actions-with-msys2 + runs-on: ${{ matrix.shell == 'cmake' && 'windows-11-arm' || 'windows-latest' }} + strategy: + # For msys2, see https://github.com/msys2/setup-msys2 + matrix: + include: + - { msystem: mingw64, env: x86_64, ziparch: win64, shell: msys2 } + - { msystem: mingw32, env: i686, ziparch: win32, shell: msys2 } + - { msystem: null, env: arm64, ziparch: win-arm64, shell: cmake } + + defaults: + run: + shell: ${{ matrix.shell == 'cmake' && 'pwsh' || 'msys2 {0}' }} + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + + # MSYS2 setup + - uses: msys2/setup-msys2@fb197b72ce45fb24f17bf3f807a388985654d1f2 # tag=v2.29.0 + if: matrix.shell == 'msys2' + with: + msystem: ${{ matrix.msystem }} + install: make p7zip git mingw-w64-${{matrix.env}}-gcc + update: true + + - name: display versions (MSYS2) + if: matrix.shell == 'msys2' + run: | + make -v + cc -v + + - name: display versions (CMake) + if: matrix.shell == 'cmake' + run: | + cmake --version + + # Build dependencies (MSYS2 only) + - name: Building zlib to static link + if: matrix.shell == 'msys2' + run: | + git clone --depth 1 --branch v1.3.1 https://github.com/madler/zlib + make -C zlib -f win32/Makefile.gcc libz.a + + - name: Building lz4 to static link + if: matrix.shell == 'msys2' + run: | + git clone --depth 1 --branch v1.10.0 https://github.com/lz4/lz4 + # ensure both libraries use the same version of libxxhash + cp lib/common/xxhash.* lz4/lib + CPPFLAGS=-DXXH_NAMESPACE=LZ4_ make -C lz4/lib liblz4.a V=1 + + # Build zstd + - name: Building zstd programs + if: matrix.shell == 'msys2' + run: | + CPPFLAGS="-I../zlib -I../lz4/lib" LDFLAGS=-static make -j allzstd V=1 HAVE_ZLIB=1 HAVE_LZ4=1 HAVE_LZMA=0 LDLIBS="../zlib/libz.a ../lz4/lib/liblz4.a" + + - name: Build zstd (CMake ARM64) + if: matrix.shell == 'cmake' + run: | + cd build\cmake + mkdir build + cd build + cmake.exe -G "Visual Studio 17 2022" -A ARM64 -DCMAKE_BUILD_TYPE=Release -DZSTD_BUILD_PROGRAMS=ON -DZSTD_BUILD_SHARED=ON -DZSTD_BUILD_STATIC=ON .. + cmake.exe --build . --config Release --parallel + + - name: Create artifacts (MSYS2) + if: matrix.shell == 'msys2' + run: | + ./lib/dll/example/build_package.bat || exit 1 + mv bin/ zstd-${{ github.ref_name }}-${{matrix.ziparch}}/ + + - name: Create artifacts (CMake) + if: matrix.shell == 'cmake' + run: | + .\lib\dll\example\build_package.bat + if ($LASTEXITCODE -ne 0) { exit 1 } + mv bin/ zstd-${{ github.ref_name }}-${{matrix.ziparch}}/ + + - name: Publish zstd-$VERSION-${{matrix.ziparch}}.zip for manual inspection + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # tag=v5.0.0 + with: + compression-level: 9 # maximum compression + if-no-files-found: error # defaults to `warn` + path: ${{ github.workspace }}/zstd-${{ github.ref_name }}-${{matrix.ziparch}}/ + name: zstd-${{ github.ref_name }}-${{matrix.ziparch}} + + - name: Package artifact for upload (MSYS2) + if: matrix.shell == 'msys2' + run: | + 7z a -tzip -mx9 "$(cygpath -u '${{ github.workspace }}/zstd-${{ github.ref_name }}-${{ matrix.ziparch }}.zip')" "$(cygpath -u '${{ github.workspace }}/zstd-${{ github.ref_name }}-${{ matrix.ziparch }}')" + + - name: Package artifact for upload (CMake) + if: matrix.shell == 'cmake' + run: | + Compress-Archive -Path "zstd-${{ github.ref_name }}-${{ matrix.ziparch }}" -DestinationPath "zstd-${{ github.ref_name }}-${{ matrix.ziparch }}.zip" -CompressionLevel Optimal + + - name: Upload release asset + if: github.event_name == 'release' + shell: pwsh + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh release upload "${{ github.ref_name }}" "$env:GITHUB_WORKSPACE/zstd-${{ github.ref_name }}-${{ matrix.ziparch }}.zip" --clobber \ No newline at end of file diff --git a/.gitignore b/.gitignore index abdf3b6338e..03ecbde6af8 100644 --- a/.gitignore +++ b/.gitignore @@ -12,33 +12,53 @@ *.so *.so.* *.dylib +*.framework +*.xcframework # Executables -zstd +/zstd zstdmt *.exe *.out *.app -# Test artefacts -tmp* -dictionary* -NUL - # Build artefacts +contrib/linux-kernel/linux/ projects/ bin/ .buckd/ buck-out/ +build-* +*.gcda +cmakebuild/ +cmake-build/ + +# Test artefacts +tmp* +*.zst +*.zstd +dictionary. +dictionary +NUL +install/ + +# IDE +.clang_complete +compile_flags.txt +.clang-format # Other files .directory _codelite/ _zstdbench/ -.clang_complete *.idea *.swp .DS_Store googletest/ *.d *.vscode +*.code-workspace +compile_commands.json +.clangd +perf.data +perf.data.old diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 3d9c9d99a89..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,214 +0,0 @@ -# Medium Tests: Run on all commits/PRs to dev branch - -language: c -dist: trusty - -git: - depth: 1 - -branches: - only: - - dev - - master - - travisTest - -addons: - apt: - update: true - -env: - global: - - FUZZERTEST=-T2mn - ZSTREAM_TESTTIME=-T2mn - DECODECORPUS_TESTTIME=-T1mn - - -matrix: - fast_finish: true - include: - - name: Trusty (Test All) - script: - - make test - - - name: gcc-6 + gcc-7 compilation - script: - - make gcc6install gcc7install - - CC=gcc-6 CFLAGS=-Werror make -j all - - make clean - - CC=gcc-7 CFLAGS=-Werror make -j all - - - name: gcc-8 + ASan + UBSan + Test Zstd - script: - - make gcc8install - - CC=gcc-8 CFLAGS="-Werror" make -j all - - make clean - - CC=gcc-8 make -j uasan-test-zstd 3 GB) using high levels (--ultra) and multithreading, by @terrelln +perf: Improved decompression speed: x64 : +10% (clang) / +5% (gcc); ARM : from +15% to +50%, depending on SoC, by @terrelln +perf: Automatically downsizes ZSTD_DCtx when too large for too long (#2069, by @bimbashreshta) +perf: Improved fast compression speed on aarch64 (#2040, ~+3%, by @caoyzh) +perf: Small level 1 compression speed gains (depending on compiler) +cli : New --patch-from command, create and apply patches from files, by @bimbashreshta +cli : New --filelist= : Provide a list of files to operate upon from a file +cli : -b -d command can now benchmark decompression on multiple files +cli : New --no-content-size command +cli : New --show-default-cparams information command +api : ZDICT_finalizeDictionary() is promoted to stable (#2111) +api : new experimental parameter ZSTD_d_stableOutBuffer (#2094) +build: Generate a single-file libzstd library (#2065, by @cwoffenden) +build: Relative includes no longer require -I compiler flags for zstd lib subdirs (#2103, by @felixhandte) +build: zstd now compiles cleanly under -pedantic (#2099) +build: zstd now compiles with make-4.3 +build: Support mingw cross-compilation from Linux, by @Ericson2314 +build: Meson multi-thread build fix on windows +build: Some misc icc fixes backed by new ci test on travis +misc: bitflip analyzer tool, by @felixhandte +misc: Extend largeNbDicts benchmark to compression +misc: Edit-distance match finder in contrib/ +doc : Improved beginner CONTRIBUTING.md docs +doc : New issue templates for zstd + +v1.4.4 (Nov 6, 2019) +perf: Improved decompression speed, by > 10%, by @terrelln +perf: Better compression speed when re-using a context, by @felixhandte +perf: Fix compression ratio when compressing large files with small dictionary, by @senhuang42 +perf: zstd reference encoder can generate RLE blocks, by @bimbashrestha +perf: minor generic speed optimization, by @davidbolvansky +api: new ability to extract sequences from the parser for analysis, by @bimbashrestha +api: fixed decoding of magic-less frames, by @terrelln +api: fixed ZSTD_initCStream_advanced() performance with fast modes, reported by @QrczakMK +cli: Named pipes support, by @bimbashrestha +cli: short tar's extension support, by @stokito +cli: command --output-dir-flat= , generates target files into requested directory, by @senhuang42 +cli: commands --stream-size=# and --size-hint=#, by @nmagerko +cli: command --exclude-compressed, by @shashank0791 +cli: faster `-t` test mode +cli: improved some error messages, by @vangyzen +cli: fix command `-D dictionary` on Windows, reported by @artyompetrov +cli: fix rare deadlock condition within dictionary builder, by @terrelln +build: single-file decoder with emscripten compilation script, by @cwoffenden +build: fixed zlibWrapper compilation on Visual Studio, reported by @bluenlive +build: fixed deprecation warning for certain gcc version, reported by @jasonma163 +build: fix compilation on old gcc versions, by @cemeyer +build: improved installation directories for cmake script, by Dmitri Shubin +pack: modified pkgconfig, for better integration into openwrt, requested by @neheb +misc: Improved documentation : ZSTD_CLEVEL, DYNAMIC_BMI2, ZSTD_CDict, function deprecation, zstd format +misc: fixed educational decoder : accept larger literals section, and removed UNALIGNED() macro + +v1.4.3 (Aug 20, 2019) +bug: Fix Dictionary Compression Ratio Regression by @cyan4973 (#1709) +bug: Fix Buffer Overflow in legacy v0.3 decompression by @felixhandte (#1722) +build: Add support for IAR C/C++ Compiler for Arm by @joseph0918 (#1705) + +v1.4.2 (Jul 26, 2019) +bug: Fix bug in zstd-0.5 decoder by @terrelln (#1696) +bug: Fix seekable decompression in-memory API by @iburinoc (#1695) +misc: Validate blocks are smaller than size limit by @vivekmg (#1685) +misc: Restructure source files by @ephiepark (#1679) + +v1.4.1 (Jul 20, 2019) +bug: Fix data corruption in niche use cases by @terrelln (#1659) +bug: Fuzz legacy modes, fix uncovered bugs by @terrelln (#1593, #1594, #1595) +bug: Fix out of bounds read by @terrelln (#1590) +perf: Improve decode speed by ~7% @mgrice (#1668) +perf: Slightly improved compression ratio of level 3 and 4 (ZSTD_dfast) by @cyan4973 (#1681) +perf: Slightly faster compression speed when re-using a context by @cyan4973 (#1658) +perf: Improve compression ratio for small windowLog by @cyan4973 (#1624) +perf: Faster compression speed in high compression mode for repetitive data by @terrelln (#1635) +api: Add parameter to generate smaller dictionaries by @tyler-tran (#1656) +cli: Recognize symlinks when built in C99 mode by @felixhandte (#1640) +cli: Expose cpu load indicator for each file on -vv mode by @ephiepark (#1631) +cli: Restrict read permissions on destination files by @chungy (#1644) +cli: zstdgrep: handle -f flag by @felixhandte (#1618) +cli: zstdcat: follow symlinks by @vejnar (#1604) +doc: Remove extra size limit on compressed blocks by @felixhandte (#1689) +doc: Fix typo by @yk-tanigawa (#1633) +doc: Improve documentation on streaming buffer sizes by @cyan4973 (#1629) +build: CMake: support building with LZ4 @leeyoung624 (#1626) +build: CMake: install zstdless and zstdgrep by @leeyoung624 (#1647) +build: CMake: respect existing uninstall target by @j301scott (#1619) +build: Make: skip multithread tests when built without support by @michaelforney (#1620) +build: Make: Fix examples/ test target by @sjnam (#1603) +build: Meson: rename options out of deprecated namespace by @lzutao (#1665) +build: Meson: fix build by @lzutao (#1602) +build: Visual Studio: don't export symbols in static lib by @scharan (#1650) +build: Visual Studio: fix linking by @absotively (#1639) +build: Fix MinGW-W64 build by @myzhang1029 (#1600) +misc: Expand decodecorpus coverage by @ephiepark (#1664) + +v1.4.0 (Apr 17, 2019) perf: Improve level 1 compression speed in most scenarios by 6% by @gbtucker and @terrelln api: Move the advanced API, including all functions in the staging section, to the stable section api: Make ZSTD_e_flush and ZSTD_e_end block for maximum forward progress @@ -35,7 +437,7 @@ misc: Optimize dictionary memory usage in corner cases misc: Improve the dictionary builder on small or homogeneous data misc: Fix spelling across the repo by @jsoref -v1.3.8 +v1.3.8 (Dec 28, 2018) perf: better decompression speed on large files (+7%) and cold dictionaries (+15%) perf: slightly better compression ratio at high compression modes api : finalized advanced API, last stage before "stable" status @@ -57,14 +459,14 @@ doc : clarified zstd_compression_format.md, by @ulikunitz misc: fixed zstdgrep, returns 1 on failure, by @lzutao misc: NEWS renamed as CHANGELOG, in accordance with fboss -v1.3.7 +v1.3.7 (Oct 20, 2018) perf: slightly better decompression speed on clang (depending on hardware target) fix : performance of dictionary compression for small input < 4 KB at levels 9 and 10 build: no longer build backtrace by default in release mode; restrict further automatic mode build: control backtrace support through build macro BACKTRACE misc: added man pages for zstdless and zstdgrep, by @samrussell -v1.3.6 +v1.3.6 (Oct 6, 2018) perf: much faster dictionary builder, by @jenniferliu perf: faster dictionary compression on small data when using multiple contexts, by @felixhandte perf: faster dictionary decompression when using a very large number of dictionaries simultaneously @@ -78,7 +480,7 @@ build: Read Legacy format is limited to v0.5+ by default. Can be changed at comp doc : zstd_compression_format.md updated to match wording in IETF RFC 8478 misc: tests/paramgrill, a parameter optimizer, by @GeorgeLu97 -v1.3.5 +v1.3.5 (Jun 29, 2018) perf: much faster dictionary compression, by @felixhandte perf: small quality improvement for dictionary generation, by @terrelln perf: slightly improved high compression levels (notably level 19) @@ -93,7 +495,7 @@ build: make and make all are compatible with -j doc : clarify zstd_compression_format.md, updated for IETF RFC process misc: pzstd compatible with reproducible compilation, by @lamby -v1.3.4 +v1.3.4 (Mar 27, 2018) perf: faster speed (especially decoding speed) on recent cpus (haswell+) perf: much better performance associating --long with multi-threading, by @terrelln perf: better compression at levels 13-15 @@ -111,9 +513,9 @@ build: VS2017 scripts, by @HaydnTrigg misc: all /contrib projects fixed misc: added /contrib/docker script by @gyscos -v1.3.3 +v1.3.3 (Dec 21, 2017) perf: faster zstd_opt strategy (levels 16-19) -fix : bug #944 : multithreading with shared ditionary and large data, reported by @gsliepen +fix : bug #944 : multithreading with shared dictionary and large data, reported by @gsliepen cli : fix : content size written in header by default cli : fix : improved LZ4 format support, by @felixhandte cli : new : hidden command `-S`, to benchmark multiple files while generating one result per file @@ -123,7 +525,7 @@ api : change : when setting `pledgedSrcSize`, use `ZSTD_CONTENTSIZE_UNKNOWN` mac build: fix : compilation under rhel6 and centos6, reported by @pixelb build: added `check` target -v1.3.2 +v1.3.2 (Oct 10, 2017) new : long range mode, using --long command, by Stella Lau (@stellamplau) new : ability to generate and decode magicless frames (#591) changed : maximum nb of threads reduced to 200, to avoid address space exhaustion in 32-bits mode @@ -146,7 +548,7 @@ example : added streaming_memory_usage license : changed /examples license to BSD + GPLv2 license : fix a few header files to reflect new license (#825) -v1.3.1 +v1.3.1 (Aug 21, 2017) New license : BSD + GPLv2 perf: substantially decreased memory usage in Multi-threading mode, thanks to reports by Tino Reichardt (@mcmilk) perf: Multi-threading supports up to 256 threads. Cap at 256 when more are requested (#760) @@ -161,7 +563,7 @@ new : contrib/adaptive-compression, I/O driven compression strength, by Paul Cru new : contrib/long_distance_matching, statistics by Stella Lau (@stellamplau) updated : contrib/linux-kernel, by Nick Terrell (@terrelln) -v1.3.0 +v1.3.0 (Jul 6, 2017) cli : new : `--list` command, by Paul Cruz cli : changed : xz/lzma support enabled by default cli : changed : `-t *` continue processing list after a decompression error @@ -176,7 +578,7 @@ tools : decodecorpus can generate random dictionary-compressed samples, by Paul new : contrib/seekable_format, demo and API, by Sean Purcell changed : contrib/linux-kernel, updated version and license, by Nick Terrell -v1.2.0 +v1.2.0 (May 5, 2017) cli : changed : Multithreading enabled by default (use target zstd-nomt or HAVE_THREAD=0 to disable) cli : new : command -T0 means "detect and use nb of cores", by Sean Purcell cli : new : zstdmt symlink hardwired to `zstd -T0` @@ -198,7 +600,7 @@ build: enabled Multi-threading support for *BSD, by Baptiste Daroussin tools: updated Paramgrill. Command -O# provides best parameters for sample and speed target. new : contrib/linux-kernel version, by Nick Terrell -v1.1.4 +v1.1.4 (Mar 18, 2017) cli : new : can compress in *.gz format, using --format=gzip command, by Przemyslaw Skibinski cli : new : advanced benchmark command --priority=rt cli : fix : write on sparse-enabled file systems in 32-bits mode, by @ds77 @@ -214,7 +616,7 @@ build : improved cmake script, by @Majlen build : added -Wformat-security flag, as recommended by Padraig Brady doc : new : educational decoder, by Sean Purcell -v1.1.3 +v1.1.3 (Feb 7, 2017) cli : zstd can decompress .gz files (can be disabled with `make zstd-nogz` or `make HAVE_ZLIB=0`) cli : new : experimental target `make zstdmt`, with multi-threading support cli : new : improved dictionary builder "cover" (experimental), by Nick Terrell, based on prior work by Giuseppe Ottaviano. @@ -230,7 +632,7 @@ API : fix : all symbols properly exposed in libzstd, by Nick Terrell build : support for Solaris target, by Przemyslaw Skibinski doc : clarified specification, by Sean Purcell -v1.1.2 +v1.1.2 (Dec 15, 2016) API : streaming : decompression : changed : automatic implicit reset when chain-decoding new frames without init API : experimental : added : dictID retrieval functions, and ZSTD_initCStream_srcSize() API : zbuff : changed : prototypes now generate deprecation warnings @@ -247,7 +649,7 @@ zlib_wrapper : added support for gz* functions, by Przemyslaw Skibinski install : better compatibility with FreeBSD, by Dimitry Andric source tree : changed : zbuff source files moved to lib/deprecated -v1.1.1 +v1.1.1 (Nov 2, 2016) New : command -M#, --memory=, --memlimit=, --memlimit-decompress= to limit allowed memory consumption New : doc/zstd_manual.html, by Przemyslaw Skibinski Improved : slightly better compression ratio at --ultra levels (>= 20) @@ -258,7 +660,7 @@ Changed : zstd_errors.h is now installed within /include (and replaces errors_pu Updated man page Fixed : zstd-small, zstd-compress and zstd-decompress compilation targets -v1.1.0 +v1.1.0 (Sep 28, 2016) New : contrib/pzstd, parallel version of zstd, by Nick Terrell added : NetBSD install target (#338) Improved : speed for batches of small files @@ -272,7 +674,7 @@ Fixed : compatibility with OpenBSD, reported by Juan Francisco Cantero Hurtado ( Fixed : compatibility with Hurd, by Przemyslaw Skibinski (#365) Fixed : zstd-pgo, reported by octoploid (#329) -v1.0.0 +v1.0.0 (Sep 1, 2016) Change Licensing, all project is now BSD, Copyright Facebook Small decompression speed improvement API : Streaming API supports legacy format @@ -281,7 +683,7 @@ CLI supports legacy formats v0.4+ Fixed : compression fails on certain huge files, reported by Jesse McGrew Enhanced documentation, by Przemyslaw Skibinski -v0.8.1 +v0.8.1 (Aug 18, 2016) New streaming API Changed : --ultra now enables levels beyond 19 Changed : -i# now selects benchmark time in second @@ -290,7 +692,7 @@ Fixed : speed regression on specific patterns (#272) Fixed : support for Z_SYNC_FLUSH, by Dmitry Krot (#291) Fixed : ICC compilation, by Przemyslaw Skibinski -v0.8.0 +v0.8.0 (Aug 2, 2016) Improved : better speed on clang and gcc -O2, thanks to Eric Biggers New : Build on FreeBSD and DragonFly, thanks to JrMarino Changed : modified API : ZSTD_compressEnd() @@ -303,17 +705,17 @@ Modified : minor compression level adaptations Updated : compression format specification to v0.2.0 changed : zstd.h moved to /lib directory -v0.7.5 +v0.7.5 (Aug 1, 2016) Transition version, supporting decoding of v0.8.x -v0.7.4 +v0.7.4 (Jul 17, 2016) Added : homebrew for Mac, by Daniel Cade Added : more examples Fixed : segfault when using small dictionaries, reported by Felix Handte Modified : default compression level for CLI is now 3 Updated : specification, to v0.1.1 -v0.7.3 +v0.7.3 (Jul 9, 2016) New : compression format specification New : `--` separator, stating that all following arguments are file names. Suggested by Chip Turner. New : `ZSTD_getDecompressedSize()` @@ -325,18 +727,18 @@ fixed : multi-blocks decoding with intermediate uncompressed blocks, reported by modified : removed "mem.h" and "error_public.h" dependencies from "zstd.h" (experimental section) modified : legacy functions no longer need magic number -v0.7.2 +v0.7.2 (Jul 4, 2016) fixed : ZSTD_decompressBlock() using multiple consecutive blocks. Reported by Greg Slazinski. fixed : potential segfault on very large files (many gigabytes). Reported by Chip Turner. fixed : CLI displays system error message when destination file cannot be created (#231). Reported by Chip Turner. -v0.7.1 +v0.7.1 (Jun 23, 2016) fixed : ZBUFF_compressEnd() called multiple times with too small `dst` buffer, reported by Christophe Chevalier fixed : dictBuilder fails if first sample is too small, reported by Руслан Ковалёв fixed : corruption issue, reported by cj modified : checksum enabled by default in command line mode -v0.7.0 +v0.7.0 (Jun 17, 2016) New : Support for directory compression, using `-r`, thanks to Przemyslaw Skibinski New : Command `--rm`, to remove source file after successful de/compression New : Visual build scripts, by Christophe Chevalier @@ -349,7 +751,7 @@ API : support for custom malloc/free functions New : controllable Dictionary ID New : Support for skippable frames -v0.6.1 +v0.6.1 (May 13, 2016) New : zlib wrapper API, thanks to Przemyslaw Skibinski New : Ability to compile compressor / decompressor separately Changed : new lib directory structure @@ -359,103 +761,103 @@ Fixed : null-string roundtrip (#176) New : benchmark mode can select directory as input Experimental : midipix support, VMS support -v0.6.0 +v0.6.0 (Apr 13, 2016) Stronger high compression modes, thanks to Przemyslaw Skibinski API : ZSTD_getFrameParams() provides size of decompressed content New : highest compression modes require `--ultra` command to fully unleash their capacity Fixed : zstd cli return error code > 0 and removes dst file artifact when decompression fails, thanks to Chip Turner -v0.5.1 +v0.5.1 (Feb 18, 2016) New : Optimal parsing => Very high compression modes, thanks to Przemyslaw Skibinski Changed : Dictionary builder integrated into libzstd and zstd cli Changed (!) : zstd cli now uses "multiple input files" as default mode. See `zstd -h`. Fix : high compression modes for big-endian platforms New : zstd cli : `-t` | `--test` command -v0.5.0 +v0.5.0 (Feb 5, 2016) New : dictionary builder utility Changed : streaming & dictionary API Improved : better compression of small data -v0.4.7 +v0.4.7 (Jan 22, 2016) Improved : small compression speed improvement in HC mode Changed : `zstd_decompress.c` has ZSTD_LEGACY_SUPPORT to 0 by default fix : bt search bug -v0.4.6 +v0.4.6 (Jan 13, 2016) fix : fast compression mode on Windows New : cmake configuration file, thanks to Artyom Dymchenko Improved : high compression mode on repetitive data New : block-level API New : ZSTD_duplicateCCtx() -v0.4.5 +v0.4.5 (Dec 18, 2015) new : -m/--multiple : compress/decompress multiple files -v0.4.4 +v0.4.4 (Dec 14, 2015) Fixed : high compression modes for Windows 32 bits new : external dictionary API extended to buffered mode and accessible through command line new : windows DLL project, thanks to Christophe Chevalier -v0.4.3 : +v0.4.3 (Dec 7, 2015) new : external dictionary API new : zstd-frugal -v0.4.2 : +v0.4.2 (Dec 2, 2015) Generic minor improvements for small blocks Fixed : big-endian compatibility, by Peter Harris (#85) -v0.4.1 +v0.4.1 (Dec 1, 2015) Fixed : ZSTD_LEGACY_SUPPORT=0 build mode (reported by Luben) removed `zstd.c` -v0.4.0 +v0.4.0 (Nov 29, 2015) Command line utility compatible with high compression levels Removed zstdhc => merged into zstd Added : ZBUFF API (see zstd_buffered.h) Rolling buffer support -v0.3.6 +v0.3.6 (Nov 10, 2015) small blocks params -v0.3.5 +v0.3.5 (Nov 9, 2015) minor generic compression improvements -v0.3.4 +v0.3.4 (Nov 6, 2015) Faster fast cLevels -v0.3.3 +v0.3.3 (Nov 5, 2015) Small compression ratio improvement -v0.3.2 +v0.3.2 (Nov 2, 2015) Fixed Visual Studio -v0.3.1 : +v0.3.1 (Nov 2, 2015) Small compression ratio improvement -v0.3 +v0.3 (Oct 30, 2015) HC mode : compression levels 2-26 -v0.2.2 +v0.2.2 (Oct 28, 2015) Fix : Visual Studio 2013 & 2015 release compilation, by Christophe Chevalier -v0.2.1 +v0.2.1 (Oct 24, 2015) Fix : Read errors, advanced fuzzer tests, by Hanno Böck -v0.2.0 +v0.2.0 (Oct 22, 2015) **Breaking format change** Faster decompression speed Can still decode v0.1 format -v0.1.3 +v0.1.3 (Oct 15, 2015) fix uninitialization warning, reported by Evan Nemerson -v0.1.2 +v0.1.2 (Sep 11, 2015) frame concatenation support -v0.1.1 +v0.1.1 (Aug 27, 2015) fix compression bug detects write-flush errors -v0.1.0 +v0.1.0 (Aug 25, 2015) first release diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 00000000000..10fef39b909 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.10) + +# Thin wrapper so `cmake -S .` behaves like `cmake -S build/cmake`. +# Policy lives in build/cmake; keep parent project language-less. +project(zstd-superbuild LANGUAGES NONE) + +if(CMAKE_SOURCE_DIR STREQUAL CMAKE_BINARY_DIR) + message(FATAL_ERROR "In-source builds are not supported. Specify -B .") +endif() + +add_subdirectory(build/cmake) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dd013f8084f..a9ad99ebdab 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,9 +5,9 @@ possible. ## Our Development Process New versions are being developed in the "dev" branch, or in their own feature branch. -When they are deemed ready for a release, they are merged into "master". +When they are deemed ready for a release, they are merged into "release". -As a consequences, all contributions must stage first through "dev" +As a consequence, all contributions must stage first through "dev" or their own feature branch. ## Pull Requests @@ -26,6 +26,360 @@ to do this once to work on any of Facebook's open source projects. Complete your CLA here: +## Workflow +Zstd uses a branch-based workflow for making changes to the codebase. Typically, zstd +will use a new branch per sizable topic. For smaller changes, it is okay to lump multiple +related changes into a branch. + +Our contribution process works in three main stages: +1. Local development + * Update: + * Checkout your fork of zstd if you have not already + ``` + git checkout https://github.com//zstd + cd zstd + ``` + * Update your local dev branch + ``` + git pull https://github.com/facebook/zstd dev + git push origin dev + ``` + * Topic and development: + * Make a new branch on your fork about the topic you're developing for + ``` + # branch names should be concise but sufficiently informative + git checkout -b + git push origin + ``` + * Make commits and push + ``` + # make some changes = + git add -u && git commit -m + git push origin + ``` + * Note: run local tests to ensure that your changes didn't break existing functionality + * Quick check + ``` + make check + ``` + * Longer check + ``` + make test + ``` +2. Code Review and CI tests + * Ensure CI tests pass: + * Before sharing anything to the community, create a pull request in your own fork against the dev branch + and make sure that all GitHub Actions CI tests pass. See the Continuous Integration section below for more information. + * Ensure that static analysis passes on your development machine. See the Static Analysis section + below to see how to do this. + * Create a pull request: + * When you are ready to share you changes to the community, create a pull request from your branch + to facebook:dev. You can do this very easily by clicking 'Create Pull Request' on your fork's home + page. + * From there, select the branch where you made changes as your source branch and facebook:dev + as the destination. + * Examine the diff presented between the two branches to make sure there is nothing unexpected. + * Write a good pull request description: + * While there is no strict template that our contributors follow, we would like them to + sufficiently summarize and motivate the changes they are proposing. We recommend all pull requests, + at least indirectly, address the following points. + * Is this pull request important and why? + * Is it addressing an issue? If so, what issue? (provide links for convenience please) + * Is this a new feature? If so, why is it useful and/or necessary? + * Are there background references and documents that reviewers should be aware of to properly assess this change? + * Note: make sure to point out any design and architectural decisions that you made and the rationale behind them. + * Note: if you have been working with a specific user and would like them to review your work, make sure you mention them using (@) + * Submit the pull request and iterate with feedback. +3. Merge and Release + * Getting approval: + * You will have to iterate on your changes with feedback from other collaborators to reach a point + where your pull request can be safely merged. + * To avoid too many comments on style and convention, make sure that you have a + look at our style section below before creating a pull request. + * Eventually, someone from the zstd team will approve your pull request and not long after merge it into + the dev branch. + * Housekeeping: + * Most PRs are linked with one or more Github issues. If this is the case for your PR, make sure + the corresponding issue is mentioned. If your change 'fixes' or completely addresses the + issue at hand, then please indicate this by requesting that an issue be closed by commenting. + * Just because your changes have been merged does not mean the topic or larger issue is complete. Remember + that the change must make it to an official zstd release for it to be meaningful. We recommend + that contributors track the activity on their pull request and corresponding issue(s) page(s) until + their change makes it to the next release of zstd. Users will often discover bugs in your code or + suggest ways to refine and improve your initial changes even after the pull request is merged. + +## Static Analysis +Static analysis is a process for examining the correctness or validity of a program without actually +executing it. It usually helps us find many simple bugs. Zstd uses clang's `scan-build` tool for +static analysis. You can install it by following the instructions for your OS on https://clang-analyzer.llvm.org/scan-build. + +Once installed, you can ensure that our static analysis tests pass on your local development machine +by running: +``` +make staticAnalyze +``` + +In general, you can use `scan-build` to static analyze any build script. For example, to static analyze +just `contrib/largeNbDicts` and nothing else, you can run: + +``` +scan-build make -C contrib/largeNbDicts largeNbDicts +``` + +### Pitfalls of static analysis +`scan-build` is part of our regular CI suite. Other static analyzers are not. + +It can be useful to look at additional static analyzers once in a while (and we do), but it's not a good idea to multiply the nb of analyzers run continuously at each commit and PR. The reasons are : + +- Static analyzers are full of false positive. The signal to noise ratio is actually pretty low. +- A good CI policy is "zero-warning tolerance". That means that all issues must be solved, including false positives. This quickly becomes a tedious workload. +- Multiple static analyzers will feature multiple kind of false positives, sometimes applying to the same code but in different ways leading to : + + tortuous code, trying to please multiple constraints, hurting readability and therefore maintenance. Sometimes, such complexity introduce other more subtle bugs, that are just out of scope of the analyzers. + + sometimes, these constraints are mutually exclusive : if one try to solve one, the other static analyzer will complain, they can't be both happy at the same time. +- As if that was not enough, the list of false positives change with each version. It's hard enough to follow one static analyzer, but multiple ones with their own update agenda, this quickly becomes a massive velocity reducer. + +This is different from running a static analyzer once in a while, looking at the output, and __cherry picking__ a few warnings that seem helpful, either because they detected a genuine risk of bug, or because it helps expressing the code in a way which is more readable or more difficult to misuse. These kinds of reports can be useful, and are accepted. + +## Continuous Integration +CI tests run every time a pull request (PR) is created or updated. The exact tests +that get run will depend on the destination branch you specify. Some tests take +longer to run than others. Currently, our CI is set up to run a short +series of tests when creating a PR to the dev branch and a longer series of tests +when creating a PR to the release branch. You can look in the configuration files +of the respective CI platform for more information on what gets run when. + +Most people will just want to create a PR with the destination set to their local dev +branch of zstd. You can then find the status of the tests on the PR's page. You can also +re-run tests and cancel running tests from the PR page or from the respective CI's dashboard. + +Almost all of zstd's CI runs on GitHub Actions (configured at `.github/workflows`), which will automatically run on PRs to your +own fork. A small number of tests run on other services (e.g. Travis CI, Circle CI, Appveyor). +These require work to set up on your local fork, and (at least for Travis CI) cost money. +Therefore, if the PR on your local fork passes GitHub Actions, feel free to submit a PR +against the main repo. + +### Third-party CI +A small number of tests cannot run on GitHub Actions, or have yet to be migrated. +For these, we use a variety of third-party services (listed below). It is not necessary to set +these up on your fork in order to contribute to zstd; however, we do link to instructions for those +who want earlier signal. + +| Service | Purpose | Setup Links | Config Path | +|-----------|------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------| +| Travis CI | Used for testing on non-x86 architectures such as PowerPC | https://docs.travis-ci.com/user/tutorial/#to-get-started-with-travis-ci-using-github
https://github.com/marketplace/travis-ci | `.travis.yml` | +| AppVeyor | Used for some Windows testing (e.g. cygwin, mingw) | https://www.appveyor.com/blog/2018/10/02/github-apps-integration/
https://github.com/marketplace/appveyor | `appveyor.yml` | +| Cirrus CI | Used for testing on FreeBSD | https://github.com/marketplace/cirrus-ci/ | `.cirrus.yml` | +| Circle CI | Historically was used to provide faster signal,
but we may be able to migrate these to Github Actions | https://circleci.com/docs/2.0/getting-started/#setting-up-circleci
https://youtu.be/Js3hMUsSZ2c
https://circleci.com/docs/2.0/enable-checks/ | `.circleci/config.yml` | + +Note: the instructions linked above mostly cover how to set up a repository with CI from scratch. +The general idea should be the same for setting up CI on your fork of zstd, but you may have to +follow slightly different steps. In particular, please ignore any instructions related to setting up +config files (since zstd already has configs for each of these services). + +## Performance +Performance is extremely important for zstd and we only merge pull requests whose performance +landscape and corresponding trade-offs have been adequately analyzed, reproduced, and presented. +This high bar for performance means that every PR which has the potential to +impact performance takes a very long time for us to properly review. That being said, we +always welcome contributions to improve performance (or worsen performance for the trade-off of +something else). Please keep the following in mind before submitting a performance related PR: + +1. Zstd isn't as old as gzip but it has been around for time now and its evolution is +very well documented via past Github issues and pull requests. It may be the case that your +particular performance optimization has already been considered in the past. Please take some +time to search through old issues and pull requests using keywords specific to your +would-be PR. Of course, just because a topic has already been discussed (and perhaps rejected +on some grounds) in the past, doesn't mean it isn't worth bringing up again. But even in that case, +it will be helpful for you to have context from that topic's history before contributing. +2. The distinction between noise and actual performance gains can unfortunately be very subtle +especially when microbenchmarking extremely small wins or losses. The only remedy to getting +something subtle merged is extensive benchmarking. You will be doing us a great favor if you +take the time to run extensive, long-duration, and potentially cross-(os, platform, process, etc) +benchmarks on your end before submitting a PR. Of course, you will not be able to benchmark +your changes on every single processor and os out there (and neither will we) but do that best +you can:) We've added some things to think about when benchmarking below in the Benchmarking +Performance section which might be helpful for you. +3. Optimizing performance for a certain OS, processor vendor, compiler, or network system is a perfectly +legitimate thing to do as long as it does not harm the overall performance health of Zstd. +This is a hard balance to strike but please keep in mind other aspects of Zstd when +submitting changes that are clang-specific, windows-specific, etc. + +## Benchmarking Performance +Performance microbenchmarking is a tricky subject but also essential for Zstd. We value empirical +testing over theoretical speculation. This guide it not perfect but for most scenarios, it +is a good place to start. + +### Stability +Unfortunately, the most important aspect in being able to benchmark reliably is to have a stable +benchmarking machine. A virtual machine, a machine with shared resources, or your laptop +will typically not be stable enough to obtain reliable benchmark results. If you can get your +hands on a desktop, this is usually a better scenario. + +Of course, benchmarking can be done on non-hyper-stable machines as well. You will just have to +do a little more work to ensure that you are in fact measuring the changes you've made and not +noise. Here are some things you can do to make your benchmarks more stable: + +1. The most simple thing you can do to drastically improve the stability of your benchmark is +to run it multiple times and then aggregate the results of those runs. As a general rule of +thumb, the smaller the change you are trying to measure, the more samples of benchmark runs +you will have to aggregate over to get reliable results. Here are some additional things to keep in +mind when running multiple trials: + * How you aggregate your samples are important. You might be tempted to use the mean of your + results. While this is certainly going to be a more stable number than a raw single sample + benchmark number, you might have more luck by taking the median. The mean is not robust to + outliers whereas the median is. Better still, you could simply take the fastest speed your + benchmark achieved on each run since that is likely the fastest your process will be + capable of running your code. In our experience, this (aggregating by just taking the sample + with the fastest running time) has been the most stable approach. + * The more samples you have, the more stable your benchmarks should be. You can verify + your improved stability by looking at the size of your confidence intervals as you + increase your sample count. These should get smaller and smaller. Eventually hopefully + smaller than the performance win you are expecting. + * Most processors will take some time to get `hot` when running anything. The observations + you collect during that time period will very different from the true performance number. Having + a very large number of sample will help alleviate this problem slightly but you can also + address is directly by simply not including the first `n` iterations of your benchmark in + your aggregations. You can determine `n` by simply looking at the results from each iteration + and then hand picking a good threshold after which the variance in results seems to stabilize. +2. You cannot really get reliable benchmarks if your host machine is simultaneously running +another cpu/memory-intensive application in the background. If you are running benchmarks on your +personal laptop for instance, you should close all applications (including your code editor and +browser) before running your benchmarks. You might also have invisible background applications +running. You can see what these are by looking at either Activity Monitor on Mac or Task Manager +on Windows. You will get more stable benchmark results of you end those processes as well. + * If you have multiple cores, you can even run your benchmark on a reserved core to prevent + pollution from other OS and user processes. There are a number of ways to do this depending + on your OS: + * On linux boxes, you have use https://github.com/lpechacek/cpuset. + * On Windows, you can "Set Processor Affinity" using https://www.thewindowsclub.com/processor-affinity-windows + * On Mac, you can try to use their dedicated affinity API https://developer.apple.com/library/archive/releasenotes/Performance/RN-AffinityAPI/#//apple_ref/doc/uid/TP40006635-CH1-DontLinkElementID_2 +3. To benchmark, you will likely end up writing a separate c/c++ program that will link libzstd. +Dynamically linking your library will introduce some added variation (not a large amount but +definitely some). Statically linking libzstd will be more stable. Static libraries should +be enabled by default when building zstd. +4. Use a profiler with a good high resolution timer. See the section below on profiling for +details on this. +5. Disable frequency scaling, turbo boost and address space randomization (this will vary by OS) +6. Try to avoid storage. On some systems you can use tmpfs. Putting the program, inputs and outputs on +tmpfs avoids touching a real storage system, which can have a pretty big variability. + +Also check our LLVM's guide on benchmarking here: https://llvm.org/docs/Benchmarking.html + +### Zstd benchmark +The fastest signal you can get regarding your performance changes is via the in-build zstd cli +bench option. You can run Zstd as you typically would for your scenario using some set of options +and then additionally also specify the `-b#` option. Doing this will run our benchmarking pipeline +for that options you have just provided. If you want to look at the internals of how this +benchmarking script works, you can check out programs/benchzstd.c + +For example: say you have made a change that you believe improves the speed of zstd level 1. The +very first thing you should use to assess whether you actually achieved any sort of improvement +is `zstd -b`. You might try to do something like this. Note: you can use the `-i` option to +specify a running time for your benchmark in seconds (default is 3 seconds). +Usually, the longer the running time, the more stable your results will be. + +``` +$ git checkout +$ make && cp zstd zstd-old +$ git checkout +$ make && cp zstd zstd-new +$ zstd-old -i5 -b1 + 1 : 8990 -> 3992 (2.252), 302.6 MB/s , 626.4 MB/s +$ zstd-new -i5 -b1 + 1 : 8990 -> 3992 (2.252), 302.8 MB/s , 628.4 MB/s +``` + +Unless your performance win is large enough to be visible despite the intrinsic noise +on your computer, benchzstd alone will likely not be enough to validate the impact of your +changes. For example, the results of the example above indicate that effectively nothing +changed but there could be a small <3% improvement that the noise on the host machine +obscured. So unless you see a large performance win (10-15% consistently) using just +this method of evaluation will not be sufficient. + +### Profiling +There are a number of great profilers out there. We're going to briefly mention how you can +profile your code using `instruments` on mac, `perf` on linux and `visual studio profiler` +on Windows. + +Say you have an idea for a change that you think will provide some good performance gains +for level 1 compression on Zstd. Typically this means, you have identified a section of +code that you think can be made to run faster. + +The first thing you will want to do is make sure that the piece of code is actually taking up +a notable amount of time to run. It is usually not worth optimizing something which accounts for less than +0.0001% of the total running time. Luckily, there are tools to help with this. +Profilers will let you see how much time your code spends inside a particular function. +If your target code snippet is only part of a function, it might be worth trying to +isolate that snippet by moving it to its own function (this is usually not necessary but +might be). + +Most profilers (including the profilers discussed below) will generate a call graph of +functions for you. Your goal will be to find your function of interest in this call graph +and then inspect the time spent inside of it. You might also want to look at the annotated +assembly which most profilers will provide you with. + +#### Instruments +We will once again consider the scenario where you think you've identified a piece of code +whose performance can be improved upon. Follow these steps to profile your code using +Instruments. + +1. Open Instruments +2. Select `Time Profiler` from the list of standard templates +3. Close all other applications except for your instruments window and your terminal +4. Run your benchmarking script from your terminal window + * You will want a benchmark that runs for at least a few seconds (5 seconds will + usually be long enough). This way the profiler will have something to work with + and you will have ample time to attach your profiler to this process:) + * I will just use benchzstd as my benchmarmking script for this example: +``` +$ zstd -b1 -i5 # this will run for 5 seconds +``` +5. Once you run your benchmarking script, switch back over to instruments and attach your +process to the time profiler. You can do this by: + * Clicking on the `All Processes` drop down in the top left of the toolbar. + * Selecting your process from the dropdown. In my case, it is just going to be labeled + `zstd` + * Hitting the bright red record circle button on the top left of the toolbar +6. You profiler will now start collecting metrics from your benchmarking script. Once +you think you have collected enough samples (usually this is the case after 3 seconds of +recording), stop your profiler. +7. Make sure that in toolbar of the bottom window, `profile` is selected. +8. You should be able to see your call graph. + * If you don't see the call graph or an incomplete call graph, make sure you have compiled + zstd and your benchmarking script using debug flags. On mac and linux, this just means + you will have to supply the `-g` flag alone with your build script. You might also + have to provide the `-fno-omit-frame-pointer` flag +9. Dig down the graph to find your function call and then inspect it by double clicking +the list item. You will be able to see the annotated source code and the assembly side by +side. + +#### Perf + +This wiki has a pretty detailed tutorial on getting started working with perf so we'll +leave you to check that out of you're getting started: + +https://perf.wiki.kernel.org/index.php/Tutorial + +Some general notes on perf: +* Use `perf stat -r # ` to quickly get some relevant timing and +counter statistics. Perf uses a high resolution timer and this is likely one +of the first things your team will run when assessing your PR. +* Perf has a long list of hardware counters that can be viewed with `perf --list`. +When measuring optimizations, something worth trying is to make sure the hardware +counters you expect to be impacted by your change are in fact being so. For example, +if you expect the L1 cache misses to decrease with your change, you can look at the +counter `L1-dcache-load-misses` +* Perf hardware counters will not work on a virtual machine. + +#### Visual Studio + +Build Zstd with symbols first (for example `cmake -B build -S build/cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo && ninja -C build zstd`) so the profiler resolves call stacks. + +* Launch Visual Studio’s Performance Profiler (`Alt+F2`), enable CPU Usage (optionally Instrumentation), and point it at the `programs/zstd` benchmark you want to run. +* If you prefer to start the benchmark from a terminal, use “Attach to running process” to latch onto it mid-run; keep frame pointers (`-fno-omit-frame-pointer`) for clean stacks. +* When you stop the capture, review the call tree, hot path, and annotated source panes +* Microsoft’s [Performance Profiling docs](https://learn.microsoft.com/en-us/visualstudio/profiling/?view=vs-2022) cover deeper sampling, ETW, and collection options if required. + ## Issues We use GitHub issues to track public bugs. Please ensure your description is clear and has sufficient instructions to be able to reproduce the issue. @@ -34,8 +388,106 @@ Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe disclosure of security bugs. In those cases, please go through the process outlined on that page and do not file a public issue. -## Coding Style +## Coding Style +It's a pretty long topic, which is difficult to summarize in a single paragraph. +As a rule of thumbs, try to imitate the coding style of +similar lines of codes around your contribution. +The following is a non-exhaustive list of rules employed in zstd code base: + +### C90 +This code base is following strict C90 standard, +with 2 extensions : 64-bit `long long` types, and variadic macros. +This rule is applied strictly to code within `lib/` and `programs/`. +Sub-project in `contrib/` are allowed to use other conventions. + +### C++ direct compatibility : symbol mangling +All public symbol declarations must be wrapped in `extern “C” { … }`, +so that this project can be compiled as C++98 code, +and linked into C++ applications. + +### Minimal Frugal +This design requirement is fundamental to preserve the portability of the code base. +#### Dependencies +- Reduce dependencies to the minimum possible level. + Any dependency should be considered “bad” by default, + and only tolerated because it provides a service in a better way than can be achieved locally. + The only external dependencies this repository tolerates are + standard C libraries, and in rare cases, system level headers. +- Within `lib/`, this policy is even more drastic. + The only external dependencies allowed are ``, ``, ``, + and even then, not directly. + In particular, no function shall ever allocate on heap directly, + and must use instead `ZSTD_malloc()` and equivalent. + Other accepted non-symbol headers are `` and ``. +- Within the project, there is a strict hierarchy of dependencies that must be respected. + `programs/` is allowed to depend on `lib/`, but only its public API. + Within `lib/`, `lib/common` doesn't depend on any other directory. + `lib/compress` and `lib/decompress` shall not depend on each other. + `lib/dictBuilder` can depend on `lib/common` and `lib/compress`, but not `lib/decompress`. +#### Resources +- Functions in `lib/` must use very little stack space, + several dozens of bytes max. + Everything larger must use the heap allocator, + or require a scratch buffer to be emplaced manually. + +### Naming +* All public symbols are prefixed with `ZSTD_` + + private symbols, with a scope limited to their own unit, are free of this restriction. + However, since `libzstd` source code can be amalgamated, + each symbol name must attempt to be (and remain) unique. + Avoid too generic names that could become ground for future collisions. + This generally implies usage of some form of prefix. +* For symbols (functions and variables), naming convention is `PREFIX_camelCase`. + + In some advanced cases, one can also find : + - `PREFIX_prefix2_camelCase` + - `PREFIX_camelCase_extendedQualifier` +* Multi-words names generally consist of an action followed by object: + - for example : `ZSTD_createCCtx()` +* Prefer positive actions + - `goBackward` rather than `notGoForward` +* Type names (`struct`, etc.) follow similar convention, + except that they are allowed and even invited to start by an Uppercase letter. + Example : `ZSTD_CCtx`, `ZSTD_CDict` +* Macro names are all Capital letters. + The same composition rules (`PREFIX_NAME_QUALIFIER`) apply. +* File names are all lowercase letters. + The convention is `snake_case`. + File names **must** be unique across the entire code base, + even when they stand in clearly separated directories. + +### Qualifiers +* This code base is `const` friendly, if not `const` fanatical. + Any variable that can be `const` (aka. read-only) **must** be `const`. + Any pointer which content will not be modified must be `const`. + This property is then controlled at compiler level. + `const` variables are an important signal to readers that this variable isn't modified. + Conversely, non-const variables are a signal to readers to watch out for modifications later on in the function. +* If a function must be inlined, mention it explicitly, + using project's own portable macros, such as `FORCE_INLINE_ATTR`, + defined in `lib/common/compiler.h`. + +### Debugging +* **Assertions** are welcome, and should be used very liberally, + to control any condition the code expects for its correct execution. + These assertion checks will be run in debug builds, and disabled in production. +* For traces, this project provides its own debug macros, + in particular `DEBUGLOG(level, ...)`, defined in `lib/common/debug.h`. + +### Code documentation +* Avoid code documentation that merely repeats what the code is already stating. + Whenever applicable, prefer employing the code as the primary way to convey explanations. + Example 1 : `int nbTokens = n;` instead of `int i = n; /* i is a nb of tokens *./`. + Example 2 : `assert(size > 0);` instead of `/* here, size should be positive */`. +* At declaration level, the documentation explains how to use the function or variable + and when applicable why it's needed, of the scenarios where it can be useful. +* At implementation level, the documentation explains the general outline of the algorithm employed, + and when applicable why this specific choice was preferred. + +### General layout * 4 spaces for indentation rather than tabs +* Code documentation shall directly precede function declaration or implementation +* Function implementations and its code documentation should be preceded and followed by an empty line + ## License By contributing to Zstandard, you agree that your contributions will be licensed diff --git a/LICENSE b/LICENSE index a793a802892..75800288cc2 100644 --- a/LICENSE +++ b/LICENSE @@ -2,7 +2,7 @@ BSD License For Zstandard software -Copyright (c) 2016-present, Facebook, Inc. All rights reserved. +Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -14,9 +14,9 @@ are permitted provided that the following conditions are met: this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. + * Neither the name Facebook, nor Meta, nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED diff --git a/Makefile b/Makefile index f2ec8c9b107..b726f7970ce 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,16 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the # LICENSE file in the root directory of this source tree) and the GPLv2 (found # in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. # ################################################################ +# verbose mode (print commands) on V=1 or VERBOSE=1 +Q = $(if $(filter 1,$(V) $(VERBOSE)),,@) + PRGDIR = programs ZSTDDIR = lib BUILDIR = build @@ -17,10 +21,20 @@ FUZZDIR = $(TESTDIR)/fuzz # Define nul output VOID = /dev/null -ifneq (,$(filter Windows%,$(OS))) -EXT =.exe +# When cross-compiling from linux to windows, you might +# need to specify this as "Windows." Fedora build fails +# without it. +# +# Note: mingw-w64 build from linux to windows does not +# fail on other tested distros (ubuntu, debian) even +# without manually specifying the TARGET_SYSTEM. +TARGET_SYSTEM ?= $(OS) +CP ?= cp + +ifneq (,$(filter Windows%,$(TARGET_SYSTEM))) + EXT =.exe else -EXT = + EXT = endif ## default: Build lib-release and zstd-release @@ -36,27 +50,27 @@ allmost: allzstd zlibwrapper # skip zwrapper, can't build that on alternate architectures without the proper zlib installed .PHONY: allzstd allzstd: lib - $(MAKE) -C $(PRGDIR) all - $(MAKE) -C $(TESTDIR) all + $(Q)$(MAKE) -C $(PRGDIR) all + $(Q)$(MAKE) -C $(TESTDIR) all .PHONY: all32 all32: $(MAKE) -C $(PRGDIR) zstd32 $(MAKE) -C $(TESTDIR) all32 -.PHONY: lib lib-release libzstd.a -lib lib-release : - @$(MAKE) -C $(ZSTDDIR) $@ +.PHONY: lib lib-release lib-mt lib-nomt +lib lib-release lib-mt lib-nomt: + $(Q)$(MAKE) -C $(ZSTDDIR) $@ .PHONY: zstd zstd-release zstd zstd-release: - @$(MAKE) -C $(PRGDIR) $@ - cp $(PRGDIR)/zstd$(EXT) . + $(Q)$(MAKE) -C $(PRGDIR) $@ + $(Q)ln -sf $(PRGDIR)/zstd$(EXT) zstd$(EXT) .PHONY: zstdmt zstdmt: - @$(MAKE) -C $(PRGDIR) $@ - cp $(PRGDIR)/zstd$(EXT) ./zstdmt$(EXT) + $(Q)$(MAKE) -C $(PRGDIR) $@ + $(Q)$(CP) $(PRGDIR)/zstd$(EXT) ./zstdmt$(EXT) .PHONY: zlibwrapper zlibwrapper: lib @@ -65,24 +79,28 @@ zlibwrapper: lib ## test: run long-duration tests .PHONY: test DEBUGLEVEL ?= 1 -test: MOREFLAGS += -g -DDEBUGLEVEL=$(DEBUGLEVEL) -Werror +test: MOREFLAGS += -g -Werror test: - MOREFLAGS="$(MOREFLAGS)" $(MAKE) -j -C $(PRGDIR) allVariants - $(MAKE) -C $(TESTDIR) $@ - -## shortest: same as `make check` -.PHONY: shortest -shortest: + DEBUGLEVEL=$(DEBUGLEVEL) MOREFLAGS="$(MOREFLAGS)" $(MAKE) -j -C $(PRGDIR) allVariants $(MAKE) -C $(TESTDIR) $@ + ZSTD=../../programs/zstd $(MAKE) -C doc/educational_decoder $@ ## check: run basic tests for `zstd` cli .PHONY: check -check: shortest +check: + $(Q)$(MAKE) -C $(TESTDIR) $@ + +.PHONY: automated_benchmarking +automated_benchmarking: + $(MAKE) -C $(TESTDIR) $@ -## examples: build all examples in `/examples` directory +.PHONY: benchmarking +benchmarking: automated_benchmarking + +## examples: build all examples in `examples/` directory .PHONY: examples examples: lib - CPPFLAGS=-I../lib LDFLAGS=-L../lib $(MAKE) -C examples/ all + $(MAKE) -C examples all ## manual: generate API documentation in html format .PHONY: manual @@ -99,8 +117,11 @@ man: contrib: lib $(MAKE) -C contrib/pzstd all $(MAKE) -C contrib/seekable_format/examples all - $(MAKE) -C contrib/adaptive-compression all + $(MAKE) -C contrib/seekable_format/tests test $(MAKE) -C contrib/largeNbDicts all + $(MAKE) -C contrib/externalSequenceProducer all + cd build/single_file_libs/ ; ./build_decoder_test.sh + cd build/single_file_libs/ ; ./build_library_test.sh .PHONY: cleanTabs cleanTabs: @@ -108,31 +129,36 @@ cleanTabs: .PHONY: clean clean: - @$(MAKE) -C $(ZSTDDIR) $@ > $(VOID) - @$(MAKE) -C $(PRGDIR) $@ > $(VOID) - @$(MAKE) -C $(TESTDIR) $@ > $(VOID) - @$(MAKE) -C $(ZWRAPDIR) $@ > $(VOID) - @$(MAKE) -C examples/ $@ > $(VOID) - @$(MAKE) -C contrib/gen_html $@ > $(VOID) - @$(MAKE) -C contrib/pzstd $@ > $(VOID) - @$(MAKE) -C contrib/seekable_format/examples $@ > $(VOID) - @$(MAKE) -C contrib/adaptive-compression $@ > $(VOID) - @$(MAKE) -C contrib/largeNbDicts $@ > $(VOID) - @$(RM) zstd$(EXT) zstdmt$(EXT) tmp* - @$(RM) -r lz4 + $(Q)$(MAKE) -C $(ZSTDDIR) $@ > $(VOID) + $(Q)$(MAKE) -C $(PRGDIR) $@ > $(VOID) + $(Q)$(MAKE) -C $(TESTDIR) $@ > $(VOID) + $(Q)$(MAKE) -C $(ZWRAPDIR) $@ > $(VOID) + $(Q)$(MAKE) -C examples/ $@ > $(VOID) + $(Q)$(MAKE) -C contrib/gen_html $@ > $(VOID) + $(Q)$(MAKE) -C contrib/pzstd $@ > $(VOID) + $(Q)$(MAKE) -C contrib/seekable_format/examples $@ > $(VOID) + $(Q)$(MAKE) -C contrib/seekable_format/tests $@ > $(VOID) + $(Q)$(MAKE) -C contrib/largeNbDicts $@ > $(VOID) + $(Q)$(MAKE) -C contrib/externalSequenceProducer $@ > $(VOID) + $(Q)$(RM) zstd$(EXT) zstdmt$(EXT) tmp* + $(Q)$(RM) -r lz4 cmakebuild mesonbuild install @echo Cleaning completed +LIBZSTD_MK_DIR = $(ZSTDDIR) +include $(LIBZSTD_MK_DIR)/install_oses.mk # UNAME, INSTALL_OS_LIST + #------------------------------------------------------------------------------ # make install is validated only for Linux, macOS, Hurd and some BSD targets #------------------------------------------------------------------------------ -ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD DragonFly NetBSD MSYS_NT Haiku)) +ifneq (,$(filter $(INSTALL_OS_LIST),$(UNAME))) HOST_OS = POSIX -CMAKE_PARAMS = -DZSTD_BUILD_CONTRIB:BOOL=ON -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON -DZSTD_ZLIB_SUPPORT:BOOL=ON -DZSTD_LZMA_SUPPORT:BOOL=ON -DCMAKE_BUILD_TYPE=Release + +MKDIR ?= mkdir -p HAVE_COLORNEVER = $(shell echo a | egrep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) EGREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) +ifeq ($(HAVE_COLORNEVER), 1) EGREP_OPTIONS += --color=never endif EGREP = egrep $(EGREP_OPTIONS) @@ -143,7 +169,7 @@ EGREP = egrep $(EGREP_OPTIONS) ## list: Print all targets and their descriptions (if provided) .PHONY: list list: - @TARGETS=$$($(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null \ + $(Q)TARGETS=$$($(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null \ | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' \ | $(EGREP) -v -e '^[^[:alnum:]]' | sort); \ { \ @@ -156,36 +182,42 @@ list: done \ } | column -t -s $$'\t' -.PHONY: install armtest usan asan uasan +.PHONY: install armtest usan asan uasan msan asan32 install: - @$(MAKE) -C $(ZSTDDIR) $@ - @$(MAKE) -C $(PRGDIR) $@ + $(Q)$(MAKE) -C $(ZSTDDIR) $@ + $(Q)$(MAKE) -C $(PRGDIR) $@ .PHONY: uninstall uninstall: - @$(MAKE) -C $(ZSTDDIR) $@ - @$(MAKE) -C $(PRGDIR) $@ + $(Q)$(MAKE) -C $(ZSTDDIR) $@ + $(Q)$(MAKE) -C $(PRGDIR) $@ .PHONY: travis-install travis-install: $(MAKE) install PREFIX=~/install_test_dir -.PHONY: gcc5build +.PHONY: clangbuild-darwin-fat +clangbuild-darwin-fat: clean + clang -v + CXX=clang++ CC=clang CFLAGS+="-Werror -Wconversion -Wno-sign-conversion -Wdocumentation -arch arm64" $(MAKE) zstd-release + mv programs/zstd programs/zstd_arm64 + CXX=clang++ CC=clang CFLAGS+="-Werror -Wconversion -Wno-sign-conversion -Wdocumentation -arch x86_64" $(MAKE) zstd-release + mv programs/zstd programs/zstd_x64 + lipo -create programs/zstd_x64 programs/zstd_arm64 -output programs/zstd + +.PHONY: gcc5build gcc6build gcc7build clangbuild m32build armbuild aarch64build ppcbuild ppc64build gcc5build: clean gcc-5 -v - CC=gcc-5 $(MAKE) all MOREFLAGS="-Werror" + CC=gcc-5 $(MAKE) all MOREFLAGS="-Werror $(MOREFLAGS)" -.PHONY: gcc6build gcc6build: clean gcc-6 -v - CC=gcc-6 $(MAKE) all MOREFLAGS="-Werror" + CC=gcc-6 $(MAKE) all MOREFLAGS="-Werror $(MOREFLAGS)" -.PHONY: gcc7build gcc7build: clean gcc-7 -v - CC=gcc-7 $(MAKE) all MOREFLAGS="-Werror" + CC=gcc-7 $(MAKE) all MOREFLAGS="-Werror $(MOREFLAGS)" -.PHONY: clangbuild clangbuild: clean clang -v CXX=clang++ CC=clang CFLAGS="-Werror -Wconversion -Wno-sign-conversion -Wdocumentation" $(MAKE) all @@ -198,109 +230,131 @@ armbuild: clean CC=arm-linux-gnueabi-gcc CFLAGS="-Werror" $(MAKE) allzstd aarch64build: clean - CC=aarch64-linux-gnu-gcc CFLAGS="-Werror" $(MAKE) allzstd + CC=aarch64-linux-gnu-gcc CFLAGS="-Werror -O0" $(MAKE) allzstd ppcbuild: clean - CC=powerpc-linux-gnu-gcc CFLAGS="-m32 -Wno-attributes -Werror" $(MAKE) allzstd + CC=powerpc-linux-gnu-gcc CFLAGS="-m32 -Wno-attributes -Werror" $(MAKE) -j allzstd ppc64build: clean - CC=powerpc-linux-gnu-gcc CFLAGS="-m64 -Werror" $(MAKE) allzstd + CC=powerpc-linux-gnu-gcc CFLAGS="-m64 -Werror" $(MAKE) -j allzstd +.PHONY: armfuzz aarch64fuzz ppcfuzz ppc64fuzz armfuzz: clean - CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static MOREFLAGS="-static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest aarch64fuzz: clean ld -v - CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static MOREFLAGS="-static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest ppcfuzz: clean - CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static MOREFLAGS="-static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest ppc64fuzz: clean - CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS="-m64 -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest -.PHONY: cxxtest +.PHONY: cxxtest gcc5test gcc6test armtest aarch64test ppctest ppc64test cxxtest: CXXFLAGS += -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror cxxtest: clean $(MAKE) -C $(PRGDIR) all CC="$(CXX) -Wno-deprecated" CFLAGS="$(CXXFLAGS)" # adding -Wno-deprecated to avoid clang++ warning on dealing with C files directly gcc5test: clean gcc-5 -v - $(MAKE) all CC=gcc-5 MOREFLAGS="-Werror" + $(MAKE) all CC=gcc-5 MOREFLAGS="-Werror $(MOREFLAGS)" gcc6test: clean gcc-6 -v - $(MAKE) all CC=gcc-6 MOREFLAGS="-Werror" + $(MAKE) all CC=gcc-6 MOREFLAGS="-Werror $(MOREFLAGS)" armtest: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" aarch64test: $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" ppctest: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" ppc64test: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" +.PHONY: arm-ppc-compilation arm-ppc-compilation: - $(MAKE) -C $(PRGDIR) clean zstd CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" - $(MAKE) -C $(PRGDIR) clean zstd CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" - $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" - $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" + $(MAKE) -C $(PRGDIR) clean zstd CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" + $(MAKE) -C $(PRGDIR) clean zstd CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" + $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static $(MOREFLAGS)" + $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static $(MOREFLAGS)" regressiontest: $(MAKE) -C $(FUZZDIR) regressiontest uasanregressiontest: - $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=address,undefined" CXXFLAGS="-O3 -fsanitize=address,undefined" + $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=address,undefined -Werror" CXXFLAGS="-O3 -fsanitize=address,undefined -Werror" msanregressiontest: - $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=memory" CXXFLAGS="-O3 -fsanitize=memory" - -# run UBsan with -fsanitize-recover=signed-integer-overflow -# due to a bug in UBsan when doing pointer subtraction -# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303 - + $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=memory -Werror" CXXFLAGS="-O3 -fsanitize=memory -Werror" + +update_regressionResults : REGRESS_RESULTS_DIR := /tmp/regress_results_dir/ +update_regressionResults: + $(MAKE) -j -C programs zstd + $(MAKE) -j -C tests/regression test + $(RM) -r $(REGRESS_RESULTS_DIR) + $(MKDIR) $(REGRESS_RESULTS_DIR) + ./tests/regression/test \ + --cache tests/regression/cache \ + --output $(REGRESS_RESULTS_DIR)/results.csv \ + --zstd programs/zstd + echo "Showing results differences" + ! diff tests/regression/results.csv $(REGRESS_RESULTS_DIR)/results.csv + echo "Updating results.csv" + $(CP) $(REGRESS_RESULTS_DIR)/results.csv tests/regression/results.csv + + +# run UBsan with -fsanitize-recover=pointer-overflow +# this only works with recent compilers such as gcc 8+ usan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=undefined -Werror" + $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=undefined -Werror $(MOREFLAGS)" asan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address -Werror" + $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address -Werror $(MOREFLAGS)" asan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address -Werror" $(MAKE) -C $(TESTDIR) $* + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* msan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer -Werror" HAVE_LZMA=0 # datagen.c fails this test for no obvious reason + $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer -Werror $(MOREFLAGS)" HAVE_LZMA=0 # datagen.c fails this test for no obvious reason -msan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer -Werror" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) HAVE_LZMA=0 $* +msan-%: + $(MAKE) clean + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer -Werror $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -j -C $(TESTDIR) HAVE_LZMA=0 $* asan32: clean - $(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address" + $(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address $(MOREFLAGS)" uasan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined -Werror" + $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address,undefined -Werror $(MOREFLAGS)" uasan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined -Werror" $(MAKE) -C $(TESTDIR) $* + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address,undefined -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* tsan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread -Werror" $(MAKE) -C $(TESTDIR) $* FUZZER_FLAGS=--no-big-tests + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" +.PHONY: apt-install apt-install: + # TODO: uncomment once issue 3011 is resolved and remove hack from Github Actions .yml + # sudo apt-get update sudo apt-get -yq --no-install-suggests --no-install-recommends --force-yes install $(APT_PACKAGES) +.PHONY: apt-add-repo apt-add-repo: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test sudo apt-get update -y -qq +.PHONY: ppcinstall arminstall valgrindinstall libc6install gcc6install gcc7install gcc8install gpp6install clang38install ppcinstall: APT_PACKAGES="qemu-system-ppc qemu-user-static gcc-powerpc-linux-gnu" $(MAKE) apt-install @@ -328,48 +382,72 @@ gpp6install: apt-add-repo clang38install: APT_PACKAGES="clang-3.8" $(MAKE) apt-install -# Ubuntu 14.04 ships a too-old lz4 -lz4install: - [ -e lz4 ] || git clone https://github.com/lz4/lz4 && sudo $(MAKE) -C lz4 install - endif -ifneq (,$(filter MSYS%,$(shell uname))) +ifneq (,$(filter MSYS%,$(shell sh -c 'MSYSTEM="MSYS" uname') )) HOST_OS = MSYS -CMAKE_PARAMS = -G"MSYS Makefiles" -DZSTD_MULTITHREAD_SUPPORT:BOOL=OFF -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON endif - #------------------------------------------------------------------------ # target specific tests #------------------------------------------------------------------------ -ifneq (,$(filter $(HOST_OS),MSYS POSIX)) -cmakebuild: - cmake --version - $(RM) -r $(BUILDIR)/cmake/build - mkdir $(BUILDIR)/cmake/build - cd $(BUILDIR)/cmake/build ; cmake -DCMAKE_INSTALL_PREFIX:PATH=~/install_test_dir $(CMAKE_PARAMS) .. ; $(MAKE) install ; $(MAKE) uninstall +ifneq (,$(filter MSYS POSIX,$(HOST_OS))) -c90build: clean +CMAKE ?= cmake +CMAKE_PARAMS = -DZSTD_BUILD_CONTRIB:BOOL=ON -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON -DZSTD_ZLIB_SUPPORT:BOOL=ON -DZSTD_LZMA_SUPPORT:BOOL=ON + +ifneq (,$(filter MSYS%,$(shell sh -c 'MSYSTEM="MSYS" uname'))) +CMAKE_PARAMS = -G"MSYS Makefiles" -DZSTD_MULTITHREAD_SUPPORT:BOOL=OFF -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON +endif + +.PHONY: cmakebuild +cmakebuild: + $(CMAKE) --version + $(RM) -r cmakebuild install + $(MKDIR) cmakebuild install + cd cmakebuild; $(CMAKE) -Wdev -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS="-Werror -O0" -DCMAKE_INSTALL_PREFIX=install $(CMAKE_PARAMS) ../build/cmake + $(CMAKE) --build cmakebuild --target install -- -j V=1 + cd cmakebuild; ctest -V -L Medium + +MESON ?= meson +NINJA ?= ninja + +.PHONY: mesonbuild +mesonbuild: + $(MESON) setup \ + --buildtype=debugoptimized \ + -Db_lundef=false \ + -Dauto_features=enabled \ + -Dbin_programs=true \ + -Dbin_tests=true \ + -Dbin_contrib=true \ + -Ddefault_library=both \ + build/meson mesonbuild + $(NINJA) -C mesonbuild/ + $(MESON) test -C mesonbuild/ --print-errorlogs + $(MESON) install -C mesonbuild --destdir staging/ + +.PHONY: c89build gnu90build c99build gnu99build c11build bmix64build bmix32build bmi32build staticAnalyze +c89build: clean $(CC) -v - CFLAGS="-std=c90 -Werror" $(MAKE) allmost # will fail, due to missing support for `long long` + CFLAGS="-std=c89 -Werror -Wno-attributes -Wpedantic -Wno-long-long -Wno-variadic-macros -O0" $(MAKE) lib zstd gnu90build: clean $(CC) -v - CFLAGS="-std=gnu90 -Werror" $(MAKE) allmost + CFLAGS="-std=gnu90 -Werror -O0" $(MAKE) allmost c99build: clean $(CC) -v - CFLAGS="-std=c99 -Werror" $(MAKE) allmost + CFLAGS="-std=c99 -Werror -O0" $(MAKE) allmost gnu99build: clean $(CC) -v - CFLAGS="-std=gnu99 -Werror" $(MAKE) allmost + CFLAGS="-std=gnu99 -Werror -O0" $(MAKE) allmost c11build: clean $(CC) -v - CFLAGS="-std=c11 -Werror" $(MAKE) allmost + CFLAGS="-std=c11 -Werror -O0" $(MAKE) allmost bmix64build: clean $(CC) -v @@ -388,5 +466,5 @@ bmi32build: clean staticAnalyze: SCANBUILD ?= scan-build staticAnalyze: $(CC) -v - CC=$(CC) CPPFLAGS=-g $(SCANBUILD) --status-bugs -v $(MAKE) allzstd examples contrib + CC=$(CC) CPPFLAGS=-g $(SCANBUILD) --status-bugs -v $(MAKE) zstd endif diff --git a/Package.swift b/Package.swift new file mode 100644 index 00000000000..97f2c6a5251 --- /dev/null +++ b/Package.swift @@ -0,0 +1,36 @@ +// swift-tools-version:5.0 +// The swift-tools-version declares the minimum version of Swift required to build this package. + +import PackageDescription + +let package = Package( + name: "zstd", + platforms: [ + .macOS(.v10_10), .iOS(.v9), .tvOS(.v9) + ], + products: [ + // Products define the executables and libraries a package produces, and make them visible to other packages. + .library( + name: "libzstd", + targets: [ "libzstd" ]) + ], + dependencies: [ + // Dependencies declare other packages that this package depends on. + // .package(url: /* package url */, from: "1.0.0"), + ], + targets: [ + // Targets are the basic building blocks of a package. A target can define a module or a test suite. + // Targets can depend on other targets in this package, and on products in packages this package depends on. + .target( + name: "libzstd", + path: "lib", + sources: [ "common", "compress", "decompress", "dictBuilder" ], + publicHeadersPath: ".", + cSettings: [ + .headerSearchPath(".") + ]) + ], + swiftLanguageVersions: [.v5], + cLanguageStandard: .gnu11, + cxxLanguageStandard: .gnucxx14 +) diff --git a/README.md b/README.md index 290341cc78e..3aa01a0c1d1 100644 --- a/README.md +++ b/README.md @@ -4,53 +4,60 @@ __Zstandard__, or `zstd` as short version, is a fast lossless compression algori targeting real-time compression scenarios at zlib-level and better compression ratios. It's backed by a very fast entropy stage, provided by [Huff0 and FSE library](https://github.com/Cyan4973/FiniteStateEntropy). -The project is provided as an open-source dual [BSD](LICENSE) and [GPLv2](COPYING) licensed **C** library, +Zstandard's format is stable and documented in [RFC8878](https://datatracker.ietf.org/doc/html/rfc8878). Multiple independent implementations are already available. +This repository represents the reference implementation, provided as an open-source dual [BSD](LICENSE) OR [GPLv2](COPYING) licensed **C** library, and a command line utility producing and decoding `.zst`, `.gz`, `.xz` and `.lz4` files. Should your project require another programming language, -a list of known ports and bindings is provided on [Zstandard homepage](http://www.zstd.net/#other-languages). +a list of known ports and bindings is provided on [Zstandard homepage](https://facebook.github.io/zstd/#other-languages). **Development branch status:** [![Build Status][travisDevBadge]][travisLink] -[![Build status][AppveyorDevBadge]][AppveyorLink] [![Build status][CircleDevBadge]][CircleLink] [![Build status][CirrusDevBadge]][CirrusLink] +[![Fuzzing Status][OSSFuzzBadge]][OSSFuzzLink] -[travisDevBadge]: https://travis-ci.org/facebook/zstd.svg?branch=dev "Continuous Integration test suite" -[travisLink]: https://travis-ci.org/facebook/zstd -[AppveyorDevBadge]: https://ci.appveyor.com/api/projects/status/xt38wbdxjk5mrbem/branch/dev?svg=true "Windows test suite" -[AppveyorLink]: https://ci.appveyor.com/project/YannCollet/zstd-p0yf0 +[travisDevBadge]: https://api.travis-ci.com/facebook/zstd.svg?branch=dev "Continuous Integration test suite" +[travisLink]: https://travis-ci.com/facebook/zstd [CircleDevBadge]: https://circleci.com/gh/facebook/zstd/tree/dev.svg?style=shield "Short test suite" [CircleLink]: https://circleci.com/gh/facebook/zstd [CirrusDevBadge]: https://api.cirrus-ci.com/github/facebook/zstd.svg?branch=dev [CirrusLink]: https://cirrus-ci.com/github/facebook/zstd +[OSSFuzzBadge]: https://oss-fuzz-build-logs.storage.googleapis.com/badges/zstd.svg +[OSSFuzzLink]: https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:zstd ## Benchmarks For reference, several fast compression algorithms were tested and compared -on a server running Arch Linux (`Linux version 5.0.5-arch1-1`), -with a Core i9-9900K CPU @ 5.0GHz, +on a desktop featuring a Core i7-9700K CPU @ 4.9GHz +and running Ubuntu 24.04 (`Linux 6.8.0-53-generic`), using [lzbench], an open-source in-memory benchmark by @inikep -compiled with [gcc] 8.2.1, +compiled with [gcc] 14.2.0, on the [Silesia compression corpus]. [lzbench]: https://github.com/inikep/lzbench -[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia +[Silesia compression corpus]: https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia [gcc]: https://gcc.gnu.org/ | Compressor name | Ratio | Compression| Decompress.| | --------------- | ------| -----------| ---------- | -| **zstd 1.4.0 -1** | 2.884 | 530 MB/s | 1360 MB/s | -| zlib 1.2.11 -1 | 2.743 | 110 MB/s | 440 MB/s | -| brotli 1.0.7 -0 | 2.701 | 430 MB/s | 470 MB/s | -| quicklz 1.5.0 -1 | 2.238 | 600 MB/s | 800 MB/s | -| lzo1x 2.09 -1 | 2.106 | 680 MB/s | 950 MB/s | -| lz4 1.8.3 | 2.101 | 800 MB/s | 4220 MB/s | -| snappy 1.1.4 | 2.073 | 580 MB/s | 2020 MB/s | -| lzf 3.6 -1 | 2.077 | 440 MB/s | 930 MB/s | - -[zlib]: http://www.zlib.net/ -[LZ4]: http://www.lz4.org/ +| **zstd 1.5.7 -1** | 2.896 | 510 MB/s | 1550 MB/s | +| brotli 1.1.0 -1 | 2.883 | 290 MB/s | 425 MB/s | +| [zlib] 1.3.1 -1 | 2.743 | 105 MB/s | 390 MB/s | +| **zstd 1.5.7 --fast=1** | 2.439 | 545 MB/s | 1850 MB/s | +| quicklz 1.5.0 -1 | 2.238 | 520 MB/s | 750 MB/s | +| **zstd 1.5.7 --fast=4** | 2.146 | 665 MB/s | 2050 MB/s | +| lzo1x 2.10 -1 | 2.106 | 650 MB/s | 780 MB/s | +| [lz4] 1.10.0 | 2.101 | 675 MB/s | 3850 MB/s | +| snappy 1.2.1 | 2.089 | 520 MB/s | 1500 MB/s | +| lzf 3.6 -1 | 2.077 | 410 MB/s | 820 MB/s | + +[zlib]: https://www.zlib.net/ +[lz4]: https://lz4.github.io/lz4/ + +The negative compression levels, specified with `--fast=#`, +offer faster compression and decompression speed +at the cost of compression ratio. Zstd can also offer stronger compression ratios at the cost of compression speed. Speed vs Compression trade-off is configurable by small increments. @@ -113,23 +120,55 @@ Dictionary gains are mostly effective in the first few KB. Then, the compression ## Build instructions +`make` is the main build system of this project. +It is the reference, and other build systems are periodically updated to stay compatible. +However, small drifts and feature differences can be present, since perfect synchronization is difficult. +For this reason, when your build system allows it, prefer employing `make`. + ### Makefile -If your system is compatible with standard `make` (or `gmake`), -invoking `make` in root directory will generate `zstd` cli in root directory. +Assuming your system supports standard `make` (or `gmake`), +just invoking `make` in root directory generates `zstd` cli at root, +and also generates `libzstd` into `lib/`. + +Other standard targets include: +- `make install` : install zstd cli, library and man pages +- `make check` : run `zstd`, test its essential behavior on local platform + +The `Makefile` follows the [GNU Standard Makefile conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html), +allowing staged install, standard compilation flags, directory variables and command variables. -Other available options include: -- `make install` : create and install zstd cli, library and man pages -- `make check` : create and run `zstd`, tests its behavior on local platform +For advanced use cases, specialized flags which control binary generation and installation paths are documented +in [`lib/README.md`](lib/README.md#modular-build) for the `libzstd` library +and in [`programs/README.md`](programs/README.md#compilation-variables) for the `zstd` CLI. ### cmake -A `cmake` project generator is provided within `build/cmake`. -It can generate Makefiles or other build scripts -to create `zstd` binary, and `libzstd` dynamic and static libraries. +A `cmake` project generator is available for generating Makefiles or other build scripts +to create the `zstd` binary as well as `libzstd` dynamic and static libraries. +The repository root now contains a minimal `CMakeLists.txt` that forwards to `build/cmake`, +so you can configure the project with a standard `cmake -S .` invocation, +while the historical `cmake -S build/cmake` entry point remains fully supported. + +```bash +cmake -S . -B build-cmake +cmake --build build-cmake +``` By default, `CMAKE_BUILD_TYPE` is set to `Release`. +#### Support for Fat (Universal2) Output + +`zstd` can be built and installed with support for both Apple Silicon (M1/M2) as well as Intel by using CMake's Universal2 support. +To perform a Fat/Universal2 build and install use the following commands: + +```bash +cmake -S . -B build-cmake-debug -G Ninja -DCMAKE_OSX_ARCHITECTURES="x86_64;x86_64h;arm64" +cd build-cmake-debug +ninja +sudo ninja install +``` + ### Meson A Meson project is provided within [`build/meson`](build/meson). Follow @@ -140,31 +179,66 @@ example about how Meson is used to build this project. Note that default build type is **release**. +### VCPKG +You can build and install zstd [vcpkg](https://github.com/Microsoft/vcpkg/) dependency manager: + + git clone https://github.com/Microsoft/vcpkg.git + cd vcpkg + ./bootstrap-vcpkg.sh + ./vcpkg integrate install + ./vcpkg install zstd + +The zstd port in vcpkg is kept up to date by Microsoft team members and community contributors. +If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. + +### Conan + +You can install pre-built binaries for zstd or build it from source using [Conan](https://conan.io/). Use the following command: + +```bash +conan install --requires="zstd/[*]" --build=missing +``` + +The zstd Conan recipe is kept up to date by Conan maintainers and community contributors. +If the version is out of date, please [create an issue or pull request](https://github.com/conan-io/conan-center-index) on the ConanCenterIndex repository. + ### Visual Studio (Windows) Going into `build` directory, you will find additional possibilities: -- Projects for Visual Studio 2005, 2008 and 2010. +- Projects for Visual Studio 2008 and 2010. + VS2010 project is compatible with VS2012, VS2013, VS2015 and VS2017. - Automated build scripts for Visual compiler by [@KrzysFR](https://github.com/KrzysFR), in `build/VS_scripts`, which will build `zstd` cli and `libzstd` library without any need to open Visual Studio solution. +- It is now recommended to generate Visual Studio solutions from `cmake` ### Buck You can build the zstd binary via buck by executing: `buck build programs:zstd` from the root of the repo. The output binary will be in `buck-out/gen/programs/`. +### Bazel + +You can integrate zstd into your Bazel project by using the module hosted on the [Bazel Central Repository](https://registry.bazel.build/modules/zstd). + +## Testing + +You can run quick local smoke tests by running `make check`. +If you can't use `make`, execute the `playTest.sh` script from the `src/tests` directory. +Two env variables `$ZSTD_BIN` and `$DATAGEN_BIN` are needed for the test script to locate the `zstd` and `datagen` binary. +For information on CI testing, please refer to `TESTING.md`. + ## Status -Zstandard is currently deployed within Facebook. It is used continuously to compress large amounts of data in multiple formats and use cases. -Zstandard is considered safe for production environments. +Zstandard is deployed within Meta and many other large cloud infrastructures, +to compress humongous amounts of data in various formats and use cases. +It is also continuously fuzzed for security issues by Google's [oss-fuzz](https://github.com/google/oss-fuzz/tree/master/projects/zstd) program. ## License -Zstandard is dual-licensed under [BSD](LICENSE) and [GPLv2](COPYING). +Zstandard is dual-licensed under [BSD](LICENSE) OR [GPLv2](COPYING). ## Contributing -The "dev" branch is the one where all contributions are merged before reaching "master". -If you plan to propose a patch, please commit into the "dev" branch, or its own feature branch. -Direct commit to "master" are not permitted. +The `dev` branch is the one where all contributions are merged before reaching `release`. +Direct commit to `release` are not permitted. For more information, please read [CONTRIBUTING](CONTRIBUTING.md). diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000000..a5f9a7e1fdb --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,15 @@ +# Reporting and Fixing Security Issues + +Please do not open GitHub issues or pull requests - this makes the problem immediately visible to everyone, including malicious actors. Security issues in this open source project can be safely reported via the Meta Bug Bounty program: + +https://www.facebook.com/whitehat + +Meta's security team will triage your report and determine whether or not is it eligible for a bounty under our program. + +# Receiving Vulnerability Notifications + +In the case that a significant security vulnerability is reported to us or discovered by us---without being publicly known---we will, at our discretion, notify high-profile, high-exposure users of Zstandard ahead of our public disclosure of the issue and associated fix. + +If you believe your project would benefit from inclusion in this list, please reach out to one of the maintainers. + + diff --git a/TESTING.md b/TESTING.md index 551981b1405..df842cc9411 100644 --- a/TESTING.md +++ b/TESTING.md @@ -11,7 +11,7 @@ They consist of the following tests: - Compilation on all supported targets (x86, x86_64, ARM, AArch64, PowerPC, and PowerPC64) - Compilation on various versions of gcc, clang, and g++ - `tests/playTests.sh` on x86_64, without the tests on long data (CLI tests) -- Small tests (`tests/legacy.c`, `tests/longmatch.c`, `tests/symbols.c`) on x64_64 +- Small tests (`tests/legacy.c`, `tests/longmatch.c`) on x64_64 Medium Tests ------------ @@ -19,15 +19,15 @@ Medium tests run on every commit and pull request to `dev` branch, on TravisCI. They consist of the following tests: - The following tests run with UBsan and Asan on x86_64 and x86, as well as with Msan on x86_64 - - `tests/playTests.sh --test-long-data` + - `tests/playTests.sh --test-large-data` - Fuzzer tests: `tests/fuzzer.c`, `tests/zstreamtest.c`, and `tests/decodecorpus.c` - `tests/zstreamtest.c` under Tsan (streaming mode, including multithreaded mode) -- Valgrind Test (`make -C tests valgrindTest`) (testing CLI and fuzzer under valgrind) +- Valgrind Test (`make -C tests test-valgrind`) (testing CLI and fuzzer under `valgrind`) - Fuzzer tests (see above) on ARM, AArch64, PowerPC, and PowerPC64 Long Tests ---------- -Long tests run on all commits to `master` branch, +Long tests run on all commits to `release` branch, and once a day on the current version of `dev` branch, on TravisCI. They consist of the following tests: @@ -40,5 +40,4 @@ They consist of the following tests: - Versions test (ensuring `zstd` can decode files from all previous versions) - `pzstd` with asan and tsan, as well as in 32-bits mode - Testing `zstd` with legacy mode off -- Testing `zbuff` (old streaming API) - Entire test suite and make install on macOS diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 35f019dd6b0..00000000000 --- a/appveyor.yml +++ /dev/null @@ -1,251 +0,0 @@ -- - version: 1.0.{build} - branches: - only: - - master - - appveyorTest - - /visual*/ - environment: - matrix: - - COMPILER: "gcc" - HOST: "mingw" - PLATFORM: "x64" - SCRIPT: "make allzstd MOREFLAGS=-static && make -C tests test-symbols fullbench-lib" - ARTIFACT: "true" - BUILD: "true" - - COMPILER: "gcc" - HOST: "mingw" - PLATFORM: "x86" - SCRIPT: "make allzstd MOREFLAGS=-static" - ARTIFACT: "true" - BUILD: "true" - - COMPILER: "clang" - HOST: "mingw" - PLATFORM: "x64" - SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allzstd" - BUILD: "true" - - - COMPILER: "gcc" - HOST: "mingw" - PLATFORM: "x64" - SCRIPT: "" - TEST: "cmake" - - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "x64" - CONFIGURATION: "Debug" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "Win32" - CONFIGURATION: "Debug" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "x64" - CONFIGURATION: "Release" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "Win32" - CONFIGURATION: "Release" - - install: - - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION% - - SET PATH_ORIGINAL=%PATH% - - if [%HOST%]==[mingw] ( - SET "PATH_MINGW32=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin" && - SET "PATH_MINGW64=C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin" && - COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin\make.exe && - COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin\make.exe - ) - - IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] ( - SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;" - ) - - build_script: - - if [%HOST%]==[mingw] ( - ( if [%PLATFORM%]==[x64] ( - SET "PATH=%PATH_MINGW64%;%PATH_ORIGINAL%" - ) else if [%PLATFORM%]==[x86] ( - SET "PATH=%PATH_MINGW32%;%PATH_ORIGINAL%" - ) ) - ) - - if [%HOST%]==[mingw] if [%BUILD%]==[true] ( - make -v && - sh -c "%COMPILER% -v" && - ECHO Building zlib to static link && - SET "CC=%COMPILER%" && - sh -c "cd .. && git clone --depth 1 --branch v1.2.11 https://github.com/madler/zlib" && - sh -c "cd ../zlib && make -f win32/Makefile.gcc libz.a" - ECHO Building zstd && - SET "CPPFLAGS=-I../../zlib" && - SET "LDFLAGS=../../zlib/libz.a" && - sh -c "%SCRIPT%" && - ( if [%COMPILER%]==[gcc] if [%ARTIFACT%]==[true] - ECHO Creating artifacts && - ECHO %cd% && - lib\dll\example\build_package.bat && - make -C programs DEBUGFLAGS= clean zstd && - cd programs\ && 7z a -tzip -mx9 zstd-win-binary-%PLATFORM%.zip zstd.exe && - appveyor PushArtifact zstd-win-binary-%PLATFORM%.zip && - cp zstd.exe ..\bin\zstd.exe && - git clone --depth 1 --branch master https://github.com/facebook/zstd && - cd zstd && - git archive --format=tar master -o zstd-src.tar && - ..\zstd -19 zstd-src.tar && - appveyor PushArtifact zstd-src.tar.zst && - certUtil -hashfile zstd-src.tar.zst SHA256 > zstd-src.tar.zst.sha256.sig && - appveyor PushArtifact zstd-src.tar.zst.sha256.sig && - cd ..\..\bin\ && - 7z a -tzip -mx9 zstd-win-release-%PLATFORM%.zip * && - appveyor PushArtifact zstd-win-release-%PLATFORM%.zip - ) - ) - - if [%HOST%]==[visual] ( - ECHO *** && - ECHO *** Building Visual Studio 2008 %PLATFORM%\%CONFIGURATION% in %APPVEYOR_BUILD_FOLDER% && - ECHO *** && - msbuild "build\VS2008\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v90 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2008\bin\%PLATFORM%\%CONFIGURATION%\*.exe && - MD5sum build/VS2008/bin/%PLATFORM%/%CONFIGURATION%/*.exe && - COPY build\VS2008\bin\%PLATFORM%\%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2008_%PLATFORM%_%CONFIGURATION%.exe && - ECHO *** && - ECHO *** Building Visual Studio 2010 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" %ADDITIONALPARAM% /m /verbosity:minimal /property:PlatformToolset=v100 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - msbuild "build\VS2010\zstd.sln" %ADDITIONALPARAM% /m /verbosity:minimal /property:PlatformToolset=v100 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2010_%PLATFORM%_%CONFIGURATION%.exe && - ECHO *** && - ECHO *** Building Visual Studio 2012 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2012_%PLATFORM%_%CONFIGURATION%.exe && - ECHO *** && - ECHO *** Building Visual Studio 2013 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v120 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v120 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2013_%PLATFORM%_%CONFIGURATION%.exe && - ECHO *** && - ECHO *** Building Visual Studio 2015 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2015_%PLATFORM%_%CONFIGURATION%.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\ - ) - - test_script: - - ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION% - - SET "CC=gcc" - - SET "CXX=g++" - - if [%TEST%]==[cmake] ( - mkdir build\cmake\build && - cd build\cmake\build && - cmake -G "Visual Studio 14 2015 Win64" .. && - cd ..\..\.. && - make clean - ) - - SET "FUZZERTEST=-T30s" - - if [%HOST%]==[visual] if [%CONFIGURATION%]==[Release] ( - CD tests && - SET ZSTD=./zstd.exe && - sh -e playTests.sh --test-large-data && - fullbench.exe -i1 && - fullbench.exe -i1 -P0 && - fuzzer_VS2008_%PLATFORM%_Release.exe %FUZZERTEST% && - fuzzer_VS2010_%PLATFORM%_Release.exe %FUZZERTEST% && - fuzzer_VS2012_%PLATFORM%_Release.exe %FUZZERTEST% && - fuzzer_VS2013_%PLATFORM%_Release.exe %FUZZERTEST% && - fuzzer_VS2015_%PLATFORM%_Release.exe %FUZZERTEST% - ) - -- - version: 1.0.{build} - environment: - matrix: - - COMPILER: "gcc" - HOST: "mingw" - PLATFORM: "x64" - SCRIPT: "CPPFLAGS=-DDEBUGLEVEL=2 CFLAGS=-Werror make -j allzstd DEBUGLEVEL=2" - - COMPILER: "gcc" - HOST: "mingw" - PLATFORM: "x86" - SCRIPT: "CFLAGS=-Werror make -j allzstd" - - COMPILER: "clang" - HOST: "mingw" - PLATFORM: "x64" - SCRIPT: "CFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make -j allzstd" - - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "x64" - CONFIGURATION: "Debug" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "Win32" - CONFIGURATION: "Debug" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "x64" - CONFIGURATION: "Release" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "Win32" - CONFIGURATION: "Release" - - install: - - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION% - - SET PATH_ORIGINAL=%PATH% - - if [%HOST%]==[mingw] ( - SET "PATH_MINGW32=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin" && - SET "PATH_MINGW64=C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin" && - COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin\make.exe && - COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin\make.exe - ) - - IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] ( - SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;" - ) - - build_script: - - ECHO Building %COMPILER% %PLATFORM% %CONFIGURATION% - - if [%HOST%]==[mingw] ( - ( if [%PLATFORM%]==[x64] ( - SET "PATH=%PATH_MINGW64%;%PATH_ORIGINAL%" - ) else if [%PLATFORM%]==[x86] ( - SET "PATH=%PATH_MINGW32%;%PATH_ORIGINAL%" - ) ) && - make -v && - sh -c "%COMPILER% -v" && - set "CC=%COMPILER%" && - sh -c "%SCRIPT%" - ) - - if [%HOST%]==[visual] ( - ECHO *** && - ECHO *** Building Visual Studio 2015 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2015_%PLATFORM%_%CONFIGURATION%.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\ - ) diff --git a/build/.gitignore b/build/.gitignore index 1ceb70ebcc8..5a18b30a09b 100644 --- a/build/.gitignore +++ b/build/.gitignore @@ -29,3 +29,5 @@ compile_commands.json CTestTestfile.cmake build lib +!cmake/lib +!meson/lib diff --git a/build/LICENSE b/build/LICENSE new file mode 100644 index 00000000000..e69de29bb2d diff --git a/build/VS2008/fullbench/fullbench.vcproj b/build/VS2008/fullbench/fullbench.vcproj index 2246262df80..5e349dce956 100644 --- a/build/VS2008/fullbench/fullbench.vcproj +++ b/build/VS2008/fullbench/fullbench.vcproj @@ -364,6 +364,18 @@ RelativePath="..\..\..\lib\compress\zstd_compress.c" > + + + + + + @@ -451,7 +463,7 @@ > + + + + + + + + diff --git a/build/VS2008/fuzzer/fuzzer.vcproj b/build/VS2008/fuzzer/fuzzer.vcproj index c81791f4cde..32f28468473 100644 --- a/build/VS2008/fuzzer/fuzzer.vcproj +++ b/build/VS2008/fuzzer/fuzzer.vcproj @@ -412,6 +412,18 @@ RelativePath="..\..\..\lib\compress\zstd_compress.c" > + + + + + + @@ -471,7 +483,7 @@ > + + + + + + + + diff --git a/build/VS2008/zstd/zstd.vcproj b/build/VS2008/zstd/zstd.vcproj index 745f2e875fa..de1501d2056 100644 --- a/build/VS2008/zstd/zstd.vcproj +++ b/build/VS2008/zstd/zstd.vcproj @@ -356,6 +356,10 @@ RelativePath="..\..\..\programs\dibio.c" > + + @@ -384,6 +388,10 @@ RelativePath="..\..\..\programs\fileio.c" > + + @@ -424,6 +432,18 @@ RelativePath="..\..\..\lib\compress\zstd_compress.c" > + + + + + + @@ -468,6 +488,10 @@ RelativePath="..\..\..\programs\zstdcli.c" > + + @@ -543,7 +567,7 @@ > + + + + + + + + diff --git a/build/VS2008/zstdlib/zstdlib.vcproj b/build/VS2008/zstdlib/zstdlib.vcproj index 4dc91516e06..88c1aee26d5 100644 --- a/build/VS2008/zstdlib/zstdlib.vcproj +++ b/build/VS2008/zstdlib/zstdlib.vcproj @@ -396,6 +396,18 @@ RelativePath="..\..\..\lib\compress\zstd_compress.c" > + + + + + + @@ -483,7 +495,7 @@ > + + + + + + + + diff --git a/build/VS2010/datagen/datagen.vcxproj b/build/VS2010/datagen/datagen.vcxproj index a66358a0d3a..aaba4788b9c 100644 --- a/build/VS2010/datagen/datagen.vcxproj +++ b/build/VS2010/datagen/datagen.vcxproj @@ -157,6 +157,8 @@ + + diff --git a/build/VS2010/fullbench-dll/fullbench-dll.vcxproj b/build/VS2010/fullbench-dll/fullbench-dll.vcxproj deleted file mode 100644 index befdc044513..00000000000 --- a/build/VS2010/fullbench-dll/fullbench-dll.vcxproj +++ /dev/null @@ -1,189 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {00000000-1CC8-4FD7-9281-6B8DBB9D3DF8} - Win32Proj - fullbench-dll - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - MultiByte - - - Application - true - MultiByte - - - Application - false - true - MultiByte - - - Application - false - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(UniversalCRT_IncludePath); - false - - - true - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(UniversalCRT_IncludePath); - false - - - false - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(UniversalCRT_IncludePath); - false - - - false - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(UniversalCRT_IncludePath); - false - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - false - - - Console - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - libzstd.lib;%(AdditionalDependencies) - false - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - false - - - Console - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - libzstd.lib;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - libzstd.lib;%(AdditionalDependencies) - false - - - - - Level4 - - - MaxSpeed - true - true - WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - libzstd.lib;%(AdditionalDependencies) - - - - - - - - - - - - - - - - - - - {00000000-94d5-4bf9-8a50-7bd9929a0850} - - - - - - diff --git a/build/VS2010/fullbench/fullbench.vcxproj b/build/VS2010/fullbench/fullbench.vcxproj index 57ee337160c..c78a72eccdd 100644 --- a/build/VS2010/fullbench/fullbench.vcxproj +++ b/build/VS2010/fullbench/fullbench.vcxproj @@ -167,6 +167,10 @@ + + + + @@ -180,6 +184,7 @@ + @@ -187,12 +192,16 @@ - + + + + + diff --git a/build/VS2010/fuzzer/fuzzer.vcxproj b/build/VS2010/fuzzer/fuzzer.vcxproj index de649de1c44..ccbd2517de9 100644 --- a/build/VS2010/fuzzer/fuzzer.vcxproj +++ b/build/VS2010/fuzzer/fuzzer.vcxproj @@ -167,6 +167,10 @@ + + + + @@ -193,9 +197,13 @@ - + + + + + @@ -204,7 +212,7 @@ - + diff --git a/build/VS2010/libzstd-dll/libzstd-dll.rc b/build/VS2010/libzstd-dll/libzstd-dll.rc index ee9f5628043..13e8746ffb5 100644 --- a/build/VS2010/libzstd-dll/libzstd-dll.rc +++ b/build/VS2010/libzstd-dll/libzstd-dll.rc @@ -32,11 +32,11 @@ BEGIN BEGIN BLOCK "040904B0" BEGIN - VALUE "CompanyName", "Yann Collet, Facebook, Inc." + VALUE "CompanyName", "Meta Platforms, Inc." VALUE "FileDescription", "Zstandard - Fast and efficient compression algorithm" VALUE "FileVersion", ZSTD_VERSION_STRING VALUE "InternalName", "libzstd.dll" - VALUE "LegalCopyright", "Copyright (c) 2013-present, Yann Collet, Facebook, Inc." + VALUE "LegalCopyright", "Copyright (c) Meta Platforms, Inc. and affiliates." VALUE "OriginalFilename", "libzstd.dll" VALUE "ProductName", "Zstandard" VALUE "ProductVersion", ZSTD_VERSION_STRING diff --git a/build/VS2010/libzstd-dll/libzstd-dll.vcxproj b/build/VS2010/libzstd-dll/libzstd-dll.vcxproj index bcde584bed8..6925e0f9e58 100644 --- a/build/VS2010/libzstd-dll/libzstd-dll.vcxproj +++ b/build/VS2010/libzstd-dll/libzstd-dll.vcxproj @@ -31,6 +31,10 @@ + + + + @@ -41,9 +45,6 @@ - - - @@ -61,12 +62,11 @@ - + - @@ -78,6 +78,10 @@ + + + + @@ -166,7 +170,6 @@ Level4 Disabled ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) - true EnableFastChecks MultiThreadedDebugDLL EditAndContinue diff --git a/build/VS2010/libzstd/libzstd.vcxproj b/build/VS2010/libzstd/libzstd.vcxproj index 6b985bafeb1..82a2d8268fa 100644 --- a/build/VS2010/libzstd/libzstd.vcxproj +++ b/build/VS2010/libzstd/libzstd.vcxproj @@ -31,6 +31,10 @@ + + + + @@ -41,9 +45,6 @@ - - - @@ -61,12 +62,11 @@ - + - @@ -78,6 +78,10 @@ + + + + @@ -159,7 +163,6 @@ Level4 Disabled ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) - true EnableFastChecks MultiThreadedDebugDLL EditAndContinue diff --git a/build/VS2010/zstd.sln b/build/VS2010/zstd.sln index 12032db44fc..528aae8aa01 100644 --- a/build/VS2010/zstd.sln +++ b/build/VS2010/zstd.sln @@ -7,11 +7,6 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fuzzer", "fuzzer\fuzzer.vcx EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench", "fullbench\fullbench.vcxproj", "{61ABD629-1CC8-4FD7-9281-6B8DBB9D3DF8}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench-dll", "fullbench-dll\fullbench-dll.vcxproj", "{00000000-1CC8-4FD7-9281-6B8DBB9D3DF8}" - ProjectSection(ProjectDependencies) = postProject - {00000000-94D5-4BF9-8A50-7BD9929A0850} = {00000000-94D5-4BF9-8A50-7BD9929A0850} - EndProjectSection -EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "datagen", "datagen\datagen.vcxproj", "{037E781E-81A6-494B-B1B3-438AB1200523}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libzstd", "libzstd\libzstd.vcxproj", "{8BFD8150-94D5-4BF9-8A50-7BD9929A0850}" diff --git a/build/VS2010/zstd/zstd.rc b/build/VS2010/zstd/zstd.rc index f5e404730d2..a2118c2df10 100644 --- a/build/VS2010/zstd/zstd.rc +++ b/build/VS2010/zstd/zstd.rc @@ -32,11 +32,11 @@ BEGIN BEGIN BLOCK "040904B0" BEGIN - VALUE "CompanyName", "Yann Collet, Facebook, Inc." + VALUE "CompanyName", "Meta Platforms, Inc." VALUE "FileDescription", "Zstandard - Fast and efficient compression algorithm" VALUE "FileVersion", ZSTD_VERSION_STRING VALUE "InternalName", "zstd.exe" - VALUE "LegalCopyright", "Copyright (c) 2013-present, Yann Collet, Facebook, Inc." + VALUE "LegalCopyright", "Copyright (c) Meta Platforms, Inc. and affiliates." VALUE "OriginalFilename", "zstd.exe" VALUE "ProductName", "Zstandard" VALUE "ProductVersion", ZSTD_VERSION_STRING diff --git a/build/VS2010/zstd/zstd.vcxproj b/build/VS2010/zstd/zstd.vcxproj index 6e7ddca1e33..0558687f5f6 100644 --- a/build/VS2010/zstd/zstd.vcxproj +++ b/build/VS2010/zstd/zstd.vcxproj @@ -32,6 +32,10 @@ + + + + @@ -59,22 +63,29 @@ + + + - + - + + + + + @@ -105,6 +116,7 @@ zstd $(SolutionDir)bin\$(Platform)_$(Configuration)\ $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ + NotSet @@ -178,6 +190,7 @@ ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true false + $(InstructionSet) Console @@ -194,6 +207,7 @@ ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true false + $(InstructionSet) Console @@ -213,6 +227,7 @@ false false MultiThreaded + $(InstructionSet) Console @@ -235,6 +250,7 @@ false MultiThreaded /DZSTD_MULTITHREAD %(AdditionalOptions) + $(InstructionSet) Console diff --git a/build/VS_scripts/build.VSPreview.cmd b/build/VS_scripts/build.VSPreview.cmd new file mode 100644 index 00000000000..c594bb12c2b --- /dev/null +++ b/build/VS_scripts/build.VSPreview.cmd @@ -0,0 +1,7 @@ +@echo off + +rem build 32-bit +call "%~p0%build.generic.cmd" preview Win32 Release v143 + +rem build 64-bit +call "%~p0%build.generic.cmd" preview x64 Release v143 \ No newline at end of file diff --git a/build/VS_scripts/build.generic.cmd b/build/VS_scripts/build.generic.cmd index a7ca4d0670c..0912d2347a8 100644 --- a/build/VS_scripts/build.generic.cmd +++ b/build/VS_scripts/build.generic.cmd @@ -2,7 +2,7 @@ IF "%1%" == "" GOTO display_help -SETLOCAL +SETLOCAL ENABLEDELAYEDEXPANSION SET msbuild_version=%1 @@ -19,29 +19,34 @@ GOTO build :display_help echo Syntax: build.generic.cmd msbuild_version msbuild_platform msbuild_configuration msbuild_toolset -echo msbuild_version: VS installed version (VS2012, VS2013, VS2015, VS2017, ...) +echo msbuild_version: VS installed version (latest, VS2012, VS2013, VS2015, VS2017, VS2019, VS2022, ...) echo msbuild_platform: Platform (x64 or Win32) echo msbuild_configuration: VS configuration (Release or Debug) -echo msbuild_toolset: Platform Toolset (v100, v110, v120, v140, v141) +echo msbuild_toolset: Platform Toolset (v100, v110, v120, v140, v141, v142, v143, ...) EXIT /B 1 :build SET msbuild="%windir%\Microsoft.NET\Framework\v4.0.30319\MSBuild.exe" -SET msbuild_vs2017community="%programfiles(x86)%\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe" -SET msbuild_vs2017professional="%programfiles(x86)%\Microsoft Visual Studio\2017\Professional\MSBuild\15.0\Bin\MSBuild.exe" -SET msbuild_vs2017enterprise="%programfiles(x86)%\Microsoft Visual Studio\2017\Enterprise\MSBuild\15.0\Bin\MSBuild.exe" IF %msbuild_version% == VS2013 SET msbuild="%programfiles(x86)%\MSBuild\12.0\Bin\MSBuild.exe" IF %msbuild_version% == VS2015 SET msbuild="%programfiles(x86)%\MSBuild\14.0\Bin\MSBuild.exe" -IF %msbuild_version% == VS2017Community SET msbuild=%msbuild_vs2017community% -IF %msbuild_version% == VS2017Professional SET msbuild=%msbuild_vs2017professional% -IF %msbuild_version% == VS2017Enterprise SET msbuild=%msbuild_vs2017enterprise% -IF %msbuild_version% == VS2017 ( - IF EXIST %msbuild_vs2017community% SET msbuild=%msbuild_vs2017community% - IF EXIST %msbuild_vs2017professional% SET msbuild=%msbuild_vs2017professional% - IF EXIST %msbuild_vs2017enterprise% SET msbuild=%msbuild_vs2017enterprise% +IF %msbuild_version% == VS2017 SET vswhere_params=-version [15,16) -products * +IF %msbuild_version% == VS2017Community SET vswhere_params=-version [15,16) -products Community +IF %msbuild_version% == VS2017Enterprise SET vswhere_params=-version [15,16) -products Enterprise +IF %msbuild_version% == VS2017Professional SET vswhere_params=-version [15,16) -products Professional +IF %msbuild_version% == VS2019 SET vswhere_params=-version [16,17) -products * +IF %msbuild_version% == VS2022 SET vswhere_params=-version [17,18) -products * +REM Add the next Visual Studio version here. +IF %msbuild_version% == latest SET vswhere_params=-latest -products * +IF %msbuild_version% == preview SET vswhere_params=-prerelease -products * + +IF NOT DEFINED vswhere_params GOTO skip_vswhere +SET vswhere="%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" +FOR /F "USEBACKQ TOKENS=*" %%F IN (`%vswhere% !vswhere_params! -requires Microsoft.Component.MSBuild -find MSBuild\**\Bin\MSBuild.exe`) DO ( + SET msbuild="%%F" ) +:skip_vswhere SET project="%~p0\..\VS2010\zstd.sln" diff --git a/build/cmake/CMakeLists.txt b/build/cmake/CMakeLists.txt index 33c05aee856..7cb0b684378 100644 --- a/build/cmake/CMakeLists.txt +++ b/build/cmake/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -7,147 +7,75 @@ # in the COPYING file in the root directory of this source tree). # ################################################################ -cmake_minimum_required(VERSION 2.8.9 FATAL_ERROR) - -# As of 2018-12-26 ZSTD has been validated to build with cmake version 3.13.2 new policies. -# Set and use the newest cmake policies that are validated to work -set(ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION "3") -set(ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION "13") #Policies never changed at PATCH level -if("${CMAKE_MAJOR_VERSION}" LESS 3) - set(ZSTD_CMAKE_POLICY_VERSION "${CMAKE_VERSION}") -elseif( "${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}" EQUAL "${CMAKE_MAJOR_VERSION}" AND - "${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}" GREATER "${CMAKE_MINOR_VERSION}") - set(ZSTD_CMAKE_POLICY_VERSION "${CMAKE_VERSION}") -else() - set(ZSTD_CMAKE_POLICY_VERSION "${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}.${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}.0") -endif() -cmake_policy(VERSION ${ZSTD_CMAKE_POLICY_VERSION}) +cmake_minimum_required(VERSION 3.10 FATAL_ERROR) +#----------------------------------------------------------------------------- +# Setup CMake environment +#----------------------------------------------------------------------------- +set(CMAKE_BUILD_WITH_INSTALL_RPATH ON) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules") + +# Define project paths set(ZSTD_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../..") set(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib) -# Parse version -include(GetZstdLibraryVersion) -GetZstdLibraryVersion(${LIBRARY_DIR}/zstd.h zstd_VERSION_MAJOR zstd_VERSION_MINOR zstd_VERSION_PATCH) -if( CMAKE_MAJOR_VERSION LESS 3 ) - ## Provide cmake 3+ behavior for older versions of cmake - project(zstd) - set(PROJECT_VERSION_MAJOR ${zstd_VERSION_MAJOR}) - set(PROJECT_VERSION_MINOR ${zstd_VERSION_MINOR}) - set(PROJECT_VERSION_PATCH ${zstd_VERSION_PATCH}) - set(PROJECT_VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}") - enable_language(C) # Main library is in C - enable_language(CXX) # Testing contributed code also utilizes CXX -else() - project(zstd - VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}" - LANGUAGES C # Main library is in C - CXX # Testing contributed code also utilizes CXX - ) -endif() -message(STATUS "ZSTD VERSION: ${zstd_VERSION}") -set(zstd_HOMEPAGE_URL "http://www.zstd.net") -set(zstd_DESCRIPTION "Zstandard is a real-time compression algorithm, providing high compression ratios.") +#----------------------------------------------------------------------------- +# Configure CMake policies and version +#----------------------------------------------------------------------------- +include(ZstdVersion) + +#----------------------------------------------------------------------------- +# Project declaration +#----------------------------------------------------------------------------- +project(zstd + VERSION "${ZSTD_FULL_VERSION}" + LANGUAGES C # Main library is in C and ASM, ASM is enabled conditionally + HOMEPAGE_URL "${zstd_HOMEPAGE_URL}" + DESCRIPTION "${zstd_DESCRIPTION}" +) -# Set a default build type if none was specified +#----------------------------------------------------------------------------- +# Build type configuration +#----------------------------------------------------------------------------- if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) - message(STATUS "Setting build type to 'Release' as none was specified.") - set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) - # Set the possible values of build type for cmake-gui - set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") + message(STATUS "Setting build type to 'Release' as none was specified.") + set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") endif() -include(GNUInstallDirs) - #----------------------------------------------------------------------------- -# Add extra compilation flags +# Include standard modules #----------------------------------------------------------------------------- -include(AddZstdCompilationFlags) -ADD_ZSTD_COMPILATION_FLAGS() - -# Always hide XXHash symbols -add_definitions(-DXXH_NAMESPACE=ZSTD_) +include(GNUInstallDirs) #----------------------------------------------------------------------------- -# Installation variables +# Display installation information #----------------------------------------------------------------------------- message(STATUS "CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}") message(STATUS "CMAKE_INSTALL_LIBDIR: ${CMAKE_INSTALL_LIBDIR}") #----------------------------------------------------------------------------- -# Options +# Configure build options #----------------------------------------------------------------------------- +include(ZstdOptions) -# Legacy support -option(ZSTD_LEGACY_SUPPORT "LEGACY SUPPORT" OFF) - -if (ZSTD_LEGACY_SUPPORT) - message(STATUS "ZSTD_LEGACY_SUPPORT defined!") - add_definitions(-DZSTD_LEGACY_SUPPORT=5) -else () - message(STATUS "ZSTD_LEGACY_SUPPORT not defined!") - add_definitions(-DZSTD_LEGACY_SUPPORT=0) -endif () - -# Multi-threading support -option(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" ON) - -if (ZSTD_MULTITHREAD_SUPPORT) - message(STATUS "ZSTD_MULTITHREAD_SUPPORT is enabled") -else () - message(STATUS "ZSTD_MULTITHREAD_SUPPORT is disabled") -endif () - -option(ZSTD_BUILD_PROGRAMS "BUILD PROGRAMS" ON) -option(ZSTD_BUILD_CONTRIB "BUILD CONTRIB" OFF) -option(ZSTD_BUILD_TESTS "BUILD TESTS" OFF) -if (MSVC) - option(ZSTD_USE_STATIC_RUNTIME "LINK TO STATIC RUN-TIME LIBRARIES" OFF) -endif () - #----------------------------------------------------------------------------- -# External dependencies -#----------------------------------------------------------------------------- -if (ZSTD_MULTITHREAD_SUPPORT AND UNIX) - set(THREADS_PREFER_PTHREAD_FLAG ON) - find_package(Threads REQUIRED) - if(CMAKE_USE_PTHREADS_INIT) - set(THREADS_LIBS "${CMAKE_THREAD_LIBS_INIT}") - else() - message(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads") - endif() -endif () +# Configure compilation flags +#----------------------------------------------------------------------------- +include(AddZstdCompilationFlags) +ADD_ZSTD_COMPILATION_FLAGS(ON ZSTD_ENABLE_CXX ON) #----------------------------------------------------------------------------- -# Add source directories +# Configure dependencies #----------------------------------------------------------------------------- -add_subdirectory(lib) - -if (ZSTD_BUILD_PROGRAMS) - if (NOT ZSTD_BUILD_STATIC) - message(SEND_ERROR "You need to build static library to build zstd CLI") - endif () - - add_subdirectory(programs) -endif () - -if (ZSTD_BUILD_TESTS) - if (NOT ZSTD_BUILD_STATIC) - message(SEND_ERROR "You need to build static library to build tests") - endif () +include(ZstdDependencies) - add_subdirectory(tests) -endif () - -if (ZSTD_BUILD_CONTRIB) - add_subdirectory(contrib) -endif () +#----------------------------------------------------------------------------- +# Configure build targets +#----------------------------------------------------------------------------- +include(ZstdBuild) #----------------------------------------------------------------------------- -# Add clean-all target +# Configure package generation #----------------------------------------------------------------------------- -add_custom_target(clean-all - COMMAND ${CMAKE_BUILD_TOOL} clean - COMMAND rm -rf ${CMAKE_BINARY_DIR}/ -) +include(ZstdPackage) diff --git a/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake b/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake index 6cdf2b3afb0..3e7bcce5974 100644 --- a/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake +++ b/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake @@ -1,7 +1,18 @@ -include(CheckCXXCompilerFlag) include(CheckCCompilerFlag) +if(CMAKE_CXX_COMPILER) + include(CheckCXXCompilerFlag) +endif() -function(EnableCompilerFlag _flag _C _CXX) +if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.18) + set(ZSTD_HAVE_CHECK_LINKER_FLAG true) +else () + set(ZSTD_HAVE_CHECK_LINKER_FLAG false) +endif () +if (ZSTD_HAVE_CHECK_LINKER_FLAG) + include(CheckLinkerFlag) +endif() + +function(EnableCompilerFlag _flag _C _CXX _LD) string(REGEX REPLACE "\\+" "PLUS" varname "${_flag}") string(REGEX REPLACE "[^A-Za-z0-9]+" "_" varname "${varname}") string(REGEX REPLACE "^_+" "" varname "${varname}") @@ -12,37 +23,94 @@ function(EnableCompilerFlag _flag _C _CXX) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_flag}" PARENT_SCOPE) endif () endif () - if (_CXX) + if (_CXX AND CMAKE_CXX_COMPILER) CHECK_CXX_COMPILER_FLAG(${_flag} CXX_FLAG_${varname}) if (CXX_FLAG_${varname}) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${_flag}" PARENT_SCOPE) endif () endif () + if (_LD) + # We never add a linker flag with CMake < 3.18. We will + # implement CHECK_LINKER_FLAG() like feature for CMake < 3.18 + # or require CMake >= 3.18 when we need to add a required + # linker flag in future. + # + # We also skip linker flags check for MSVC compilers (which includes + # clang-cl) since currently check_linker_flag() doesn't give correct + # results for this configuration, + # see: https://gitlab.kitware.com/cmake/cmake/-/issues/22023 + if (ZSTD_HAVE_CHECK_LINKER_FLAG AND NOT MSVC) + CHECK_LINKER_FLAG(C ${_flag} LD_FLAG_${varname}) + else () + set(LD_FLAG_${varname} false) + endif () + if (LD_FLAG_${varname}) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${_flag}" PARENT_SCOPE) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${_flag}" PARENT_SCOPE) + endif () + endif () endfunction() -macro(ADD_ZSTD_COMPILATION_FLAGS) +macro(ADD_ZSTD_COMPILATION_FLAGS _C _CXX _LD) + # We set ZSTD_HAS_NOEXECSTACK if we are certain we've set all the required + # compiler flags to mark the stack as non-executable. + set(ZSTD_HAS_NOEXECSTACK false) + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang" OR MINGW) #Not only UNIX but also WIN32 for MinGW - #Set c++11 by default - EnableCompilerFlag("-std=c++11" false true) - #Set c99 by default - EnableCompilerFlag("-std=c99" true false) - EnableCompilerFlag("-Wall" true true) - EnableCompilerFlag("-Wextra" true true) - EnableCompilerFlag("-Wundef" true true) - EnableCompilerFlag("-Wshadow" true true) - EnableCompilerFlag("-Wcast-align" true true) - EnableCompilerFlag("-Wcast-qual" true true) - EnableCompilerFlag("-Wstrict-prototypes" true false) + # It's possible to select the exact standard used for compilation. + # It's not necessary, but can be employed for specific purposes. + # Note that zstd source code is compatible with both C++98 and above + # and C-gnu90 (c90 + long long + variadic macros ) and above + # EnableCompilerFlag("-std=c++11" false true) # Set C++ compilation to c++11 standard + # EnableCompilerFlag("-std=c99" true false) # Set C compilation to c99 standard + if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND MSVC) + # clang-cl normally maps -Wall to -Weverything. + EnableCompilerFlag("/clang:-Wall" _C _CXX false) + else () + EnableCompilerFlag("-Wall" _C _CXX false) + endif () + EnableCompilerFlag("-Wextra" _C _CXX false) + EnableCompilerFlag("-Wundef" _C _CXX false) + EnableCompilerFlag("-Wshadow" _C _CXX false) + EnableCompilerFlag("-Wcast-align" _C _CXX false) + EnableCompilerFlag("-Wcast-qual" _C _CXX false) + EnableCompilerFlag("-Wstrict-prototypes" _C false false) + # Enable asserts in Debug mode + if (CMAKE_BUILD_TYPE MATCHES "Debug") + EnableCompilerFlag("-DDEBUGLEVEL=1" _C _CXX false) + endif () + # Add noexecstack flags + # LDFLAGS + EnableCompilerFlag("-Wl,-z,noexecstack" false false _LD) + # CFLAGS & CXXFLAGS + EnableCompilerFlag("-Qunused-arguments" _C _CXX false) + EnableCompilerFlag("-Wa,--noexecstack" _C _CXX false) + # NOTE: Using 3 nested ifs because the variables are sometimes + # empty if the condition is false, and sometimes equal to false. + # This implicitly converts them to truthy values. There may be + # a better way to do this, but this reliably works. + if (${LD_FLAG_WL_Z_NOEXECSTACK}) + if (${C_FLAG_WA_NOEXECSTACK}) + if (${CXX_FLAG_WA_NOEXECSTACK}) + # We've succeeded in marking the stack as non-executable + set(ZSTD_HAS_NOEXECSTACK true) + endif() + endif() + endif() elseif (MSVC) # Add specific compilation flags for Windows Visual set(ACTIVATE_MULTITHREADED_COMPILATION "ON" CACHE BOOL "activate multi-threaded compilation (/MP flag)") if (CMAKE_GENERATOR MATCHES "Visual Studio" AND ACTIVATE_MULTITHREADED_COMPILATION) - EnableCompilerFlag("/MP" true true) + EnableCompilerFlag("/MP" _C _CXX false) endif () - + # UNICODE SUPPORT - EnableCompilerFlag("/D_UNICODE" true true) - EnableCompilerFlag("/DUNICODE" true true) + EnableCompilerFlag("/D_UNICODE" _C _CXX false) + EnableCompilerFlag("/DUNICODE" _C _CXX false) + # Enable asserts in Debug mode + if (CMAKE_BUILD_TYPE MATCHES "Debug") + EnableCompilerFlag("/DDEBUGLEVEL=1" _C _CXX false) + endif () endif () # Remove duplicates compilation flags @@ -52,7 +120,6 @@ macro(ADD_ZSTD_COMPILATION_FLAGS) CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) if( ${flag_var} ) separate_arguments(${flag_var}) - list(REMOVE_DUPLICATES ${flag_var}) string(REPLACE ";" " " ${flag_var} "${${flag_var}}") endif() endforeach () diff --git a/build/cmake/CMakeModules/FindLibLZ4.cmake b/build/cmake/CMakeModules/FindLibLZ4.cmake new file mode 100644 index 00000000000..d0fac06da1b --- /dev/null +++ b/build/cmake/CMakeModules/FindLibLZ4.cmake @@ -0,0 +1,49 @@ +# Find LibLZ4 +# +# Find LibLZ4 headers and library +# +# Result Variables +# +# LIBLZ4_FOUND - True if lz4 is found +# LIBLZ4_INCLUDE_DIRS - lz4 headers directories +# LIBLZ4_LIBRARIES - lz4 libraries +# LIBLZ4_VERSION_MAJOR - The major version of lz4 +# LIBLZ4_VERSION_MINOR - The minor version of lz4 +# LIBLZ4_VERSION_RELEASE - The release version of lz4 +# LIBLZ4_VERSION_STRING - version number string (e.g. 1.8.3) +# +# Hints +# +# Set ``LZ4_ROOT_DIR`` to the directory of lz4.h and lz4 library + +set(_LIBLZ4_ROOT_HINTS + ENV LZ4_ROOT_DIR) + +find_path( LIBLZ4_INCLUDE_DIR lz4.h + HINTS ${_LIBLZ4_ROOT_HINTS}) +find_library( LIBLZ4_LIBRARY NAMES lz4 liblz4 liblz4_static + HINTS ${_LIBLZ4_ROOT_HINTS}) + +if(LIBLZ4_INCLUDE_DIR) + file(STRINGS "${LIBLZ4_INCLUDE_DIR}/lz4.h" LIBLZ4_HEADER_CONTENT REGEX "#define LZ4_VERSION_[A-Z]+ +[0-9]+") + + string(REGEX REPLACE ".*#define LZ4_VERSION_MAJOR +([0-9]+).*" "\\1" LIBLZ4_VERSION_MAJOR "${LIBLZ4_HEADER_CONTENT}") + string(REGEX REPLACE ".*#define LZ4_VERSION_MINOR +([0-9]+).*" "\\1" LIBLZ4_VERSION_MINOR "${LIBLZ4_HEADER_CONTENT}") + string(REGEX REPLACE ".*#define LZ4_VERSION_RELEASE +([0-9]+).*" "\\1" LIBLZ4_VERSION_RELEASE "${LIBLZ4_HEADER_CONTENT}") + + set(LIBLZ4_VERSION_STRING "${LIBLZ4_VERSION_MAJOR}.${LIBLZ4_VERSION_MINOR}.${LIBLZ4_VERSION_RELEASE}") + unset(LIBLZ4_HEADER_CONTENT) +endif() + +include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(LibLZ4 REQUIRED_VARS LIBLZ4_INCLUDE_DIR + LIBLZ4_LIBRARY + VERSION_VAR LIBLZ4_VERSION_STRING + FAIL_MESSAGE "Could NOT find LZ4, try to set the paths to lz4.h and lz4 library in environment variable LZ4_ROOT_DIR") + +if (LIBLZ4_FOUND) + set(LIBLZ4_LIBRARIES ${LIBLZ4_LIBRARY}) + set(LIBLZ4_INCLUDE_DIRS ${LIBLZ4_INCLUDE_DIR}) +endif () + +mark_as_advanced( LIBLZ4_INCLUDE_DIR LIBLZ4_LIBRARY ) diff --git a/build/cmake/CMakeModules/JoinPaths.cmake b/build/cmake/CMakeModules/JoinPaths.cmake new file mode 100644 index 00000000000..c68d91b84db --- /dev/null +++ b/build/cmake/CMakeModules/JoinPaths.cmake @@ -0,0 +1,23 @@ +# This module provides function for joining paths +# known from most languages +# +# SPDX-License-Identifier: (MIT OR CC0-1.0) +# Copyright 2020 Jan Tojnar +# https://github.com/jtojnar/cmake-snips +# +# Modelled after Python’s os.path.join +# https://docs.python.org/3.7/library/os.path.html#os.path.join +# Windows not supported +function(join_paths joined_path first_path_segment) + set(temp_path "${first_path_segment}") + foreach(current_segment IN LISTS ARGN) + if(NOT ("${current_segment}" STREQUAL "")) + if(IS_ABSOLUTE "${current_segment}") + set(temp_path "${current_segment}") + else() + set(temp_path "${temp_path}/${current_segment}") + endif() + endif() + endforeach() + set(${joined_path} "${temp_path}" PARENT_SCOPE) +endfunction() diff --git a/build/cmake/CMakeModules/ZstdBuild.cmake b/build/cmake/CMakeModules/ZstdBuild.cmake new file mode 100644 index 00000000000..ada44a966ba --- /dev/null +++ b/build/cmake/CMakeModules/ZstdBuild.cmake @@ -0,0 +1,42 @@ +# ################################################################ +# ZSTD Build Targets Configuration +# ################################################################ + +# Always build the library first (this defines ZSTD_BUILD_STATIC/SHARED options) +add_subdirectory(lib) + +# Validate build configuration after lib options are defined +if(ZSTD_BUILD_PROGRAMS) + if(NOT ZSTD_BUILD_STATIC AND NOT ZSTD_PROGRAMS_LINK_SHARED) + message(SEND_ERROR "Static library required to build zstd CLI programs") + elseif(NOT ZSTD_BUILD_SHARED AND ZSTD_PROGRAMS_LINK_SHARED) + message(SEND_ERROR "Shared library required to build zstd CLI programs") + endif() +endif() + +if(ZSTD_BUILD_TESTS AND NOT ZSTD_BUILD_STATIC) + message(SEND_ERROR "Static library required to build test suite") +endif() + +# Add programs if requested +if(ZSTD_BUILD_PROGRAMS) + add_subdirectory(programs) +endif() + +# Add tests if requested +if(ZSTD_BUILD_TESTS) + enable_testing() + add_subdirectory(tests) +endif() + +# Add contrib utilities if requested +if(ZSTD_BUILD_CONTRIB) + add_subdirectory(contrib) +endif() + +# Clean-all target for thorough cleanup +add_custom_target(clean-all + COMMAND ${CMAKE_BUILD_TOOL} clean + COMMAND ${CMAKE_COMMAND} -E remove_directory ${CMAKE_BINARY_DIR}/ + COMMENT "Performing complete clean including build directory" +) diff --git a/build/cmake/CMakeModules/ZstdDependencies.cmake b/build/cmake/CMakeModules/ZstdDependencies.cmake new file mode 100644 index 00000000000..4e90c5f1b98 --- /dev/null +++ b/build/cmake/CMakeModules/ZstdDependencies.cmake @@ -0,0 +1,30 @@ +# ################################################################ +# ZSTD Dependencies Configuration +# ################################################################ + +# Function to handle HP-UX thread configuration +function(setup_hpux_threads) + find_package(Threads) + if(NOT Threads_FOUND) + set(CMAKE_USE_PTHREADS_INIT 1 PARENT_SCOPE) + set(CMAKE_THREAD_LIBS_INIT -lpthread PARENT_SCOPE) + set(CMAKE_HAVE_THREADS_LIBRARY 1 PARENT_SCOPE) + set(Threads_FOUND TRUE PARENT_SCOPE) + endif() +endfunction() + +# Configure threading support +if(ZSTD_MULTITHREAD_SUPPORT AND UNIX) + if(CMAKE_SYSTEM_NAME MATCHES "HP-UX") + setup_hpux_threads() + else() + set(THREADS_PREFER_PTHREAD_FLAG ON) + find_package(Threads REQUIRED) + endif() + + if(CMAKE_USE_PTHREADS_INIT) + set(THREADS_LIBS "${CMAKE_THREAD_LIBS_INIT}") + else() + message(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads") + endif() +endif() diff --git a/build/cmake/CMakeModules/ZstdOptions.cmake b/build/cmake/CMakeModules/ZstdOptions.cmake new file mode 100644 index 00000000000..3fca543a78e --- /dev/null +++ b/build/cmake/CMakeModules/ZstdOptions.cmake @@ -0,0 +1,68 @@ +# ################################################################ +# ZSTD Build Options Configuration +# ################################################################ + +# Legacy support configuration +option(ZSTD_LEGACY_SUPPORT "Enable legacy format support" ON) + +if(ZSTD_LEGACY_SUPPORT) + message(STATUS "ZSTD_LEGACY_SUPPORT enabled") + set(ZSTD_LEGACY_LEVEL 5 CACHE STRING "Legacy support level") + add_definitions(-DZSTD_LEGACY_SUPPORT=${ZSTD_LEGACY_LEVEL}) +else() + message(STATUS "ZSTD_LEGACY_SUPPORT disabled") + add_definitions(-DZSTD_LEGACY_SUPPORT=0) +endif() + +# Platform-specific options +if(APPLE) + option(ZSTD_FRAMEWORK "Build as Apple Framework" OFF) +endif() + +# Android-specific configuration +if(ANDROID) + set(ZSTD_MULTITHREAD_SUPPORT_DEFAULT OFF) + # Handle old Android API levels + if((NOT ANDROID_PLATFORM_LEVEL) OR (ANDROID_PLATFORM_LEVEL VERSION_LESS 24)) + message(STATUS "Configuring for old Android API - disabling fseeko/ftello") + add_compile_definitions(LIBC_NO_FSEEKO) + endif() +else() + set(ZSTD_MULTITHREAD_SUPPORT_DEFAULT ON) +endif() + +# Multi-threading support +option(ZSTD_MULTITHREAD_SUPPORT "Enable multi-threading support" ${ZSTD_MULTITHREAD_SUPPORT_DEFAULT}) + +if(ZSTD_MULTITHREAD_SUPPORT) + message(STATUS "Multi-threading support enabled") +else() + message(STATUS "Multi-threading support disabled") +endif() + +# Build component options +option(ZSTD_BUILD_PROGRAMS "Build command-line programs" ON) +option(ZSTD_BUILD_CONTRIB "Build contrib utilities" OFF) +option(ZSTD_PROGRAMS_LINK_SHARED "Link programs against shared library" OFF) + +# Test configuration +if(BUILD_TESTING) + set(ZSTD_BUILD_TESTS_default ON) +else() + set(ZSTD_BUILD_TESTS_default OFF) +endif() +option(ZSTD_BUILD_TESTS "Build test suite" ${ZSTD_BUILD_TESTS_default}) + +# MSVC-specific options +if(MSVC) + option(ZSTD_USE_STATIC_RUNTIME "Link to static runtime libraries" OFF) +endif() + +# C++ support (needed for tests) +set(ZSTD_ENABLE_CXX ${ZSTD_BUILD_TESTS}) +if(ZSTD_ENABLE_CXX) + enable_language(CXX) +endif() + +# Set global definitions +add_definitions(-DXXH_NAMESPACE=ZSTD_) diff --git a/build/cmake/CMakeModules/ZstdPackage.cmake b/build/cmake/CMakeModules/ZstdPackage.cmake new file mode 100644 index 00000000000..5e40dd2c1fd --- /dev/null +++ b/build/cmake/CMakeModules/ZstdPackage.cmake @@ -0,0 +1,42 @@ +# ################################################################ +# ZSTD Package Configuration +# ################################################################ + +include(CMakePackageConfigHelpers) + +# Generate version file +write_basic_package_version_file( + "${CMAKE_CURRENT_BINARY_DIR}/zstdConfigVersion.cmake" + VERSION ${zstd_VERSION} + COMPATIBILITY SameMajorVersion +) + +# Export targets for build directory +export(EXPORT zstdExports + FILE "${CMAKE_CURRENT_BINARY_DIR}/zstdTargets.cmake" + NAMESPACE zstd:: +) + +# Configure package for installation +set(ConfigPackageLocation ${CMAKE_INSTALL_LIBDIR}/cmake/zstd) + +# Install exported targets +install(EXPORT zstdExports + FILE zstdTargets.cmake + NAMESPACE zstd:: + DESTINATION ${ConfigPackageLocation} +) + +# Configure and install package config file +configure_package_config_file( + zstdConfig.cmake.in + "${CMAKE_CURRENT_BINARY_DIR}/zstdConfig.cmake" + INSTALL_DESTINATION ${ConfigPackageLocation} +) + +# Install config files +install(FILES + "${CMAKE_CURRENT_BINARY_DIR}/zstdConfig.cmake" + "${CMAKE_CURRENT_BINARY_DIR}/zstdConfigVersion.cmake" + DESTINATION ${ConfigPackageLocation} +) diff --git a/build/cmake/CMakeModules/ZstdVersion.cmake b/build/cmake/CMakeModules/ZstdVersion.cmake new file mode 100644 index 00000000000..fceb0ec0c23 --- /dev/null +++ b/build/cmake/CMakeModules/ZstdVersion.cmake @@ -0,0 +1,31 @@ +# ################################################################ +# ZSTD Version Configuration +# ################################################################ + +# Setup CMake policy version +set(ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION "3") +set(ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION "13") + +# Determine appropriate policy version +if("${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}" EQUAL "${CMAKE_MAJOR_VERSION}" AND + "${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}" GREATER "${CMAKE_MINOR_VERSION}") + set(ZSTD_CMAKE_POLICY_VERSION "${CMAKE_VERSION}") +else() + set(ZSTD_CMAKE_POLICY_VERSION "${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}.${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}.0") +endif() + +cmake_policy(VERSION ${ZSTD_CMAKE_POLICY_VERSION}) + +# Parse version from header file +include(GetZstdLibraryVersion) +GetZstdLibraryVersion(${LIBRARY_DIR}/zstd.h zstd_VERSION_MAJOR zstd_VERSION_MINOR zstd_VERSION_PATCH) + +# Set version variables +set(ZSTD_SHORT_VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}") +set(ZSTD_FULL_VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}") + +# Project metadata +set(zstd_HOMEPAGE_URL "https://facebook.github.io/zstd") +set(zstd_DESCRIPTION "Zstandard is a real-time compression algorithm, providing high compression ratios.") + +message(STATUS "ZSTD VERSION: ${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}") diff --git a/build/cmake/README.md b/build/cmake/README.md index 681b14cef39..41c8bbb1a65 100644 --- a/build/cmake/README.md +++ b/build/cmake/README.md @@ -1,13 +1,103 @@ # Cmake contributions Contributions to the cmake build configurations are welcome. Please -use case sensitivity that matches modern (ie. cmake version 2.6 and above) +use case sensitivity that matches modern (i.e. cmake version 2.6 and above) conventions of using lower-case for commands, and upper-case for variables. -# CMake Style Recommendations +## How to build -## Indent all code correctly, i.e. the body of +You can configure the project from the repository root thanks to the forwarding +`CMakeLists.txt`: +```sh +cmake -S . -B build-cmake +cmake --build build-cmake +``` +The historical workflow that starts configuration from `build/cmake` continues +to work as described below. + +As cmake doesn't support command like `cmake clean`, it's recommended to perform an "out of source build". +To do this, you can create a new directory and build in it: +```sh +cd build/cmake +mkdir builddir +cd builddir +cmake .. +make +``` +Then you can clean all cmake caches by simply delete the new directory: +```sh +rm -rf build/cmake/builddir +``` + +And of course, you can directly build in build/cmake: +```sh +cd build/cmake +cmake +make +``` + +To show cmake build options, you can: +```sh +cd build/cmake/builddir +cmake -LH .. +``` + +Bool options can be set to `ON/OFF` with `-D[option]=[ON/OFF]`. You can configure cmake options like this: +```sh +cd build/cmake/builddir +cmake -DZSTD_BUILD_TESTS=ON -DZSTD_LEGACY_SUPPORT=OFF .. +make +``` + +**Apple Frameworks** +It's generally recommended to have CMake with versions higher than 3.14 for [iOS-derived platforms](https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html#id27). +```sh +cmake -S. -B build-cmake -DZSTD_FRAMEWORK=ON -DCMAKE_SYSTEM_NAME=iOS +``` +Or you can utilize [iOS-CMake](https://github.com/leetal/ios-cmake) toolchain for CMake versions lower than 3.14 +```sh +cmake -B build -G Xcode -DCMAKE_TOOLCHAIN_FILE= -DPLATFORM=OS64 -DZSTD_FRAMEWORK=ON +``` + +### how to use it with CMake FetchContent + +For all options available, you can see it on +```cmake +include(FetchContent) + +set(ZSTD_BUILD_STATIC ON) +set(ZSTD_BUILD_SHARED OFF) + +FetchContent_Declare( + zstd + URL "https://github.com/facebook/zstd/releases/download/v1.5.5/zstd-1.5.5.tar.gz" + DOWNLOAD_EXTRACT_TIMESTAMP TRUE + SOURCE_SUBDIR build/cmake +) + +FetchContent_MakeAvailable(zstd) + +target_link_libraries( + ${PROJECT_NAME} + PRIVATE + libzstd_static +) + +# On windows and macos this is needed +target_include_directories( + ${PROJECT_NAME} + PRIVATE + ${zstd_SOURCE_DIR}/lib +) +``` + +### referring +[Looking for a 'cmake clean' command to clear up CMake output](https://stackoverflow.com/questions/9680420/looking-for-a-cmake-clean-command-to-clear-up-cmake-output) + +## CMake Style Recommendations + +### Indent all code correctly, i.e. the body of * if/else/endif * foreach/endforeach @@ -18,7 +108,7 @@ variables. Use spaces for indenting, 2, 3 or 4 spaces preferably. Use the same amount of spaces for indenting as is used in the rest of the file. Do not use tabs. -## Upper/lower casing +### Upper/lower casing Most important: use consistent upper- or lowercasing within one file ! @@ -38,7 +128,7 @@ Add_Executable(hello hello.c) aDd_ExEcUtAbLe(blub blub.c) ``` -## End commands +### End commands To make the code easier to read, use empty commands for endforeach(), endif(), endfunction(), endmacro() and endwhile(). Also, use empty else() commands. @@ -60,6 +150,6 @@ if(BARVAR) endif(BARVAR) ``` -## Other resources for best practices +### Other resources for best practices -`https://cmake.org/cmake/help/latest/manual/cmake-developer.7.html#modules` +https://cmake.org/cmake/help/latest/manual/cmake-developer.7.html#modules diff --git a/build/cmake/contrib/CMakeLists.txt b/build/cmake/contrib/CMakeLists.txt index f7631d08ca6..8df2a17b3a8 100644 --- a/build/cmake/contrib/CMakeLists.txt +++ b/build/cmake/contrib/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/build/cmake/contrib/gen_html/CMakeLists.txt b/build/cmake/contrib/gen_html/CMakeLists.txt index 8fdd61131ea..d1ff6c64bba 100644 --- a/build/cmake/contrib/gen_html/CMakeLists.txt +++ b/build/cmake/contrib/gen_html/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/build/cmake/contrib/pzstd/CMakeLists.txt b/build/cmake/contrib/pzstd/CMakeLists.txt index 5c30a91b139..e1c8e0672fe 100644 --- a/build/cmake/contrib/pzstd/CMakeLists.txt +++ b/build/cmake/contrib/pzstd/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -18,13 +18,20 @@ set(PZSTD_DIR ${ZSTD_SOURCE_DIR}/contrib/pzstd) include_directories(${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${PZSTD_DIR}) add_executable(pzstd ${PROGRAMS_DIR}/util.c ${PZSTD_DIR}/main.cpp ${PZSTD_DIR}/Options.cpp ${PZSTD_DIR}/Pzstd.cpp ${PZSTD_DIR}/SkippableFrame.cpp) +target_compile_features(pzstd PRIVATE cxx_std_11) set_property(TARGET pzstd APPEND PROPERTY COMPILE_DEFINITIONS "NDEBUG") set_property(TARGET pzstd APPEND PROPERTY COMPILE_OPTIONS "-Wno-shadow") +if (ZSTD_BUILD_SHARED) + set(ZSTD_LIB libzstd_shared) +else() + set(ZSTD_LIB libzstd_static) +endif() + set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) if (CMAKE_USE_PTHREADS_INIT) - target_link_libraries(pzstd libzstd_shared ${CMAKE_THREAD_LIBS_INIT}) + target_link_libraries(pzstd ${ZSTD_LIB} ${CMAKE_THREAD_LIBS_INIT}) else() message(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads") endif() diff --git a/build/cmake/lib/CMakeLists.txt b/build/cmake/lib/CMakeLists.txt index e415c159006..0cd02d6230c 100644 --- a/build/cmake/lib/CMakeLists.txt +++ b/build/cmake/lib/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -7,79 +7,76 @@ # in the COPYING file in the root directory of this source tree). # ################################################################ -project(libzstd) +project(libzstd C) # ASM language is conditionally enabled where supported set(CMAKE_INCLUDE_CURRENT_DIR TRUE) option(ZSTD_BUILD_STATIC "BUILD STATIC LIBRARIES" ON) option(ZSTD_BUILD_SHARED "BUILD SHARED LIBRARIES" ON) +option(ZSTD_BUILD_COMPRESSION "BUILD COMPRESSION MODULE" ON) +option(ZSTD_BUILD_DECOMPRESSION "BUILD DECOMPRESSION MODULE" ON) +option(ZSTD_BUILD_DICTBUILDER "BUILD DICTBUILDER MODULE" ON) +option(ZSTD_BUILD_DEPRECATED "BUILD DEPRECATED MODULE" OFF) + +set(ZSTDLIB_VISIBLE "" CACHE STRING "Visibility for ZSTDLIB API") +set(ZSTDERRORLIB_VISIBLE "" CACHE STRING "Visibility for ZSTDERRORLIB_VISIBLE API") +set(ZDICTLIB_VISIBLE "" CACHE STRING "Visibility for ZDICTLIB_VISIBLE API") +set(ZSTDLIB_STATIC_API "" CACHE STRING "Visibility for ZSTDLIB_STATIC_API API") +set(ZDICTLIB_STATIC_API "" CACHE STRING "Visibility for ZDICTLIB_STATIC_API API") + +set_property(CACHE ZSTDLIB_VISIBLE PROPERTY STRINGS "" "hidden" "default" "protected" "internal") +set_property(CACHE ZSTDERRORLIB_VISIBLE PROPERTY STRINGS "" "hidden" "default" "protected" "internal") +set_property(CACHE ZDICTLIB_VISIBLE PROPERTY STRINGS "" "hidden" "default" "protected" "internal") +set_property(CACHE ZSTDLIB_STATIC_API PROPERTY STRINGS "" "hidden" "default" "protected" "internal") +set_property(CACHE ZDICTLIB_STATIC_API PROPERTY STRINGS "" "hidden" "default" "protected" "internal") if(NOT ZSTD_BUILD_SHARED AND NOT ZSTD_BUILD_STATIC) message(SEND_ERROR "You need to build at least one flavor of libzstd") endif() -# Define library directory, where sources and header files are located -include_directories(${LIBRARY_DIR} ${LIBRARY_DIR}/common) - -set(Sources - ${LIBRARY_DIR}/common/entropy_common.c - ${LIBRARY_DIR}/common/fse_decompress.c - ${LIBRARY_DIR}/common/threading.c - ${LIBRARY_DIR}/common/pool.c - ${LIBRARY_DIR}/common/zstd_common.c - ${LIBRARY_DIR}/common/error_private.c - ${LIBRARY_DIR}/common/xxhash.c - ${LIBRARY_DIR}/compress/hist.c - ${LIBRARY_DIR}/compress/fse_compress.c - ${LIBRARY_DIR}/compress/huf_compress.c - ${LIBRARY_DIR}/compress/zstd_compress.c - ${LIBRARY_DIR}/compress/zstdmt_compress.c - ${LIBRARY_DIR}/compress/zstd_fast.c - ${LIBRARY_DIR}/compress/zstd_double_fast.c - ${LIBRARY_DIR}/compress/zstd_lazy.c - ${LIBRARY_DIR}/compress/zstd_opt.c - ${LIBRARY_DIR}/compress/zstd_ldm.c - ${LIBRARY_DIR}/decompress/huf_decompress.c - ${LIBRARY_DIR}/decompress/zstd_decompress.c - ${LIBRARY_DIR}/decompress/zstd_decompress_block.c - ${LIBRARY_DIR}/decompress/zstd_ddict.c - ${LIBRARY_DIR}/dictBuilder/cover.c - ${LIBRARY_DIR}/dictBuilder/fastcover.c - ${LIBRARY_DIR}/dictBuilder/divsufsort.c - ${LIBRARY_DIR}/dictBuilder/zdict.c - ${LIBRARY_DIR}/deprecated/zbuff_common.c - ${LIBRARY_DIR}/deprecated/zbuff_compress.c - ${LIBRARY_DIR}/deprecated/zbuff_decompress.c) - -set(Headers - ${LIBRARY_DIR}/zstd.h - ${LIBRARY_DIR}/common/debug.h - ${LIBRARY_DIR}/common/pool.h - ${LIBRARY_DIR}/common/threading.h - ${LIBRARY_DIR}/common/bitstream.h - ${LIBRARY_DIR}/common/error_private.h - ${LIBRARY_DIR}/common/zstd_errors.h - ${LIBRARY_DIR}/common/fse.h - ${LIBRARY_DIR}/common/huf.h - ${LIBRARY_DIR}/common/mem.h - ${LIBRARY_DIR}/common/zstd_internal.h - ${LIBRARY_DIR}/compress/hist.h - ${LIBRARY_DIR}/compress/zstd_compress_internal.h - ${LIBRARY_DIR}/compress/zstd_fast.h - ${LIBRARY_DIR}/compress/zstd_double_fast.h - ${LIBRARY_DIR}/compress/zstd_lazy.h - ${LIBRARY_DIR}/compress/zstd_opt.h - ${LIBRARY_DIR}/compress/zstd_ldm.h - ${LIBRARY_DIR}/compress/zstdmt_compress.h - ${LIBRARY_DIR}/decompress/zstd_decompress_internal.h - ${LIBRARY_DIR}/decompress/zstd_decompress_block.h - ${LIBRARY_DIR}/decompress/zstd_ddict.h - ${LIBRARY_DIR}/dictBuilder/zdict.h - ${LIBRARY_DIR}/dictBuilder/cover.h - ${LIBRARY_DIR}/deprecated/zbuff.h) +file(GLOB CommonSources ${LIBRARY_DIR}/common/*.c) +file(GLOB CompressSources ${LIBRARY_DIR}/compress/*.c) +file(GLOB DecompressSources ${LIBRARY_DIR}/decompress/*.c) +if (MSVC) + add_compile_options(-DZSTD_DISABLE_ASM) +else () + if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|AMD64.*|x86_64.*|X86_64.*" AND ${ZSTD_HAS_NOEXECSTACK}) + enable_language(ASM) + set(DecompressSources ${DecompressSources} ${LIBRARY_DIR}/decompress/huf_decompress_amd64.S) + else() + add_compile_options(-DZSTD_DISABLE_ASM) + endif() +endif () +file(GLOB DictBuilderSources ${LIBRARY_DIR}/dictBuilder/*.c) +file(GLOB DeprecatedSources ${LIBRARY_DIR}/deprecated/*.c) + +file(GLOB PublicHeaders ${LIBRARY_DIR}/*.h) +file(GLOB CommonHeaders ${LIBRARY_DIR}/common/*.h) +file(GLOB CompressHeaders ${LIBRARY_DIR}/compress/*.h) +file(GLOB DecompressHeaders ${LIBRARY_DIR}/decompress/*.h) +file(GLOB DictBuilderHeaders ${LIBRARY_DIR}/dictBuilder/*.h) +file(GLOB DeprecatedHeaders ${LIBRARY_DIR}/deprecated/*.h) + +set(Sources ${CommonSources}) +set(Headers ${PublicHeaders} ${CommonHeaders}) +if (ZSTD_BUILD_COMPRESSION) + set(Sources ${Sources} ${CompressSources}) + set(Headers ${Headers} ${CompressHeaders}) +endif() +if (ZSTD_BUILD_DECOMPRESSION) + set(Sources ${Sources} ${DecompressSources}) + set(Headers ${Headers} ${DecompressHeaders}) +endif() +if (ZSTD_BUILD_DICTBUILDER) + set(Sources ${Sources} ${DictBuilderSources}) + set(Headers ${Headers} ${DictBuilderHeaders}) +endif() +if (ZSTD_BUILD_DEPRECATED) + set(Sources ${Sources} ${DeprecatedSources}) + set(Headers ${Headers} ${DeprecatedHeaders}) +endif() if (ZSTD_LEGACY_SUPPORT) set(LIBRARY_LEGACY_DIR ${LIBRARY_DIR}/legacy) - include_directories(${LIBRARY_LEGACY_DIR}) set(Sources ${Sources} ${LIBRARY_LEGACY_DIR}/zstd_v01.c @@ -101,29 +98,91 @@ if (ZSTD_LEGACY_SUPPORT) ${LIBRARY_LEGACY_DIR}/zstd_v07.h) endif () -if (MSVC) +if (MSVC AND NOT (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")) set(MSVC_RESOURCE_DIR ${ZSTD_SOURCE_DIR}/build/VS2010/libzstd-dll) set(PlatformDependResources ${MSVC_RESOURCE_DIR}/libzstd-dll.rc) +else() + set(PlatformDependResources) endif () +# Explicitly set the language to C for all files, including ASM files. +# Our assembly expects to be compiled by a C compiler, and is only enabled for +# __GNUC__ compatible compilers. Otherwise all the ASM code is disabled by +# macros. +if(NOT CMAKE_ASM_COMPILER STREQUAL CMAKE_C_COMPILER) + set_source_files_properties(${Sources} PROPERTIES LANGUAGE C) +endif() + +macro (add_definition target var) + if (NOT ("${${var}}" STREQUAL "")) + target_compile_definitions(${target} PUBLIC "${var}=__attribute__((visibility(\"${${var}}\")))") + endif () +endmacro () + +# Define directories containing the library's public headers +set(PUBLIC_INCLUDE_DIRS ${LIBRARY_DIR}) +set(CMAKE_RC_FLAGS "${CMAKE_RC_FLAGS} /I \"${LIBRARY_DIR}\"") # Split project to static and shared libraries build +set(library_targets) if (ZSTD_BUILD_SHARED) add_library(libzstd_shared SHARED ${Sources} ${Headers} ${PlatformDependResources}) + target_include_directories(libzstd_shared INTERFACE $) + list(APPEND library_targets libzstd_shared) if (ZSTD_MULTITHREAD_SUPPORT) - set_property(TARGET libzstd_shared APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_MULTITHREAD") + target_compile_definitions(libzstd_shared PUBLIC ZSTD_MULTITHREAD) if (UNIX) target_link_libraries(libzstd_shared ${THREADS_LIBS}) endif () - endif() + endif () + add_definition(libzstd_shared ZSTDLIB_VISIBLE) + add_definition(libzstd_shared ZSTDERRORLIB_VISIBLE) + add_definition(libzstd_shared ZDICTLIB_VISIBLE) endif () if (ZSTD_BUILD_STATIC) add_library(libzstd_static STATIC ${Sources} ${Headers}) + target_include_directories(libzstd_static INTERFACE $) + list(APPEND library_targets libzstd_static) if (ZSTD_MULTITHREAD_SUPPORT) - set_property(TARGET libzstd_static APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_MULTITHREAD") + target_compile_definitions(libzstd_static PUBLIC ZSTD_MULTITHREAD) if (UNIX) target_link_libraries(libzstd_static ${THREADS_LIBS}) endif () endif () + add_definition(libzstd_static ZSTDLIB_VISIBLE) + add_definition(libzstd_static ZSTDERRORLIB_VISIBLE) + add_definition(libzstd_static ZDICTLIB_VISIBLE) + add_definition(libzstd_static ZSTDLIB_STATIC_API) + add_definition(libzstd_static ZDICTLIB_STATIC_API) +endif () +if (ZSTD_BUILD_SHARED AND NOT ZSTD_BUILD_STATIC) + if (NOT BUILD_SHARED_LIBS) + message(WARNING "BUILD_SHARED_LIBS is OFF, but ZSTD_BUILD_SHARED is ON and ZSTD_BUILD_STATIC is OFF, which takes precedence, so libzstd is a shared library") + endif () + add_library(libzstd INTERFACE) + target_link_libraries(libzstd INTERFACE libzstd_shared) + list(APPEND library_targets libzstd) +endif () +if (ZSTD_BUILD_STATIC AND NOT ZSTD_BUILD_SHARED) + if (BUILD_SHARED_LIBS) + message(WARNING "BUILD_SHARED_LIBS is ON, but ZSTD_BUILD_SHARED is OFF and ZSTD_BUILD_STATIC is ON, which takes precedence, is set so libzstd is a static library") + endif () + add_library(libzstd INTERFACE) + target_link_libraries(libzstd INTERFACE libzstd_static) + list(APPEND library_targets libzstd) +endif () +if (ZSTD_BUILD_SHARED AND ZSTD_BUILD_STATIC) + # If both ZSTD_BUILD_SHARED and ZSTD_BUILD_STATIC are set, which is the + # default, fallback to using BUILD_SHARED_LIBS to determine whether to + # set libzstd to static or shared. + if (BUILD_SHARED_LIBS) + add_library(libzstd INTERFACE) + target_link_libraries(libzstd INTERFACE libzstd_shared) + list(APPEND library_targets libzstd) + else () + add_library(libzstd INTERFACE) + target_link_libraries(libzstd INTERFACE libzstd_static) + list(APPEND library_targets libzstd) + endif () endif () # Add specific compile definitions for MSVC project @@ -137,7 +196,7 @@ if (MSVC) endif () # With MSVC static library needs to be renamed to avoid conflict with import library -if (MSVC) +if (MSVC OR (WIN32 AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT MINGW)) set(STATIC_LIBRARY_BASE_NAME zstd_static) else () set(STATIC_LIBRARY_BASE_NAME zstd) @@ -149,49 +208,83 @@ if (ZSTD_BUILD_SHARED) libzstd_shared PROPERTIES OUTPUT_NAME zstd - VERSION ${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH} + VERSION ${ZSTD_FULL_VERSION} SOVERSION ${zstd_VERSION_MAJOR}) + + if (ZSTD_FRAMEWORK) + set_target_properties( + libzstd_shared + PROPERTIES + FRAMEWORK TRUE + FRAMEWORK_VERSION "${ZSTD_FULL_VERSION}" + PRODUCT_BUNDLE_IDENTIFIER "github.com/facebook/zstd" + XCODE_ATTRIBUTE_INSTALL_PATH "@rpath" + PUBLIC_HEADER "${PublicHeaders}" + OUTPUT_NAME "zstd" + XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "" + XCODE_ATTRIBUTE_CODE_SIGNING_ALLOWED "NO" + XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED "NO" + MACOSX_FRAMEWORK_IDENTIFIER "github.com/facebook/zstd" + MACOSX_FRAMEWORK_BUNDLE_VERSION "${ZSTD_FULL_VERSION}" + MACOSX_FRAMEWORK_SHORT_VERSION_STRING "${ZSTD_SHORT_VERSION}" + MACOSX_RPATH TRUE + RESOURCE ${PublicHeaders}) + endif () endif () if (ZSTD_BUILD_STATIC) set_target_properties( libzstd_static PROPERTIES + POSITION_INDEPENDENT_CODE On OUTPUT_NAME ${STATIC_LIBRARY_BASE_NAME}) -endif () -if (UNIX) - # pkg-config - set(PREFIX "${CMAKE_INSTALL_PREFIX}") - set(LIBDIR "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") - set(INCLUDEDIR "${CMAKE_INSTALL_PREFIX}/include") - set(VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}") - add_custom_target(libzstd.pc ALL - ${CMAKE_COMMAND} -DIN="${LIBRARY_DIR}/libzstd.pc.in" -DOUT="libzstd.pc" - -DPREFIX="${PREFIX}" -DLIBDIR="${LIBDIR}" -DINCLUDEDIR="${INCLUDEDIR}" -DVERSION="${VERSION}" - -P "${CMAKE_CURRENT_SOURCE_DIR}/pkgconfig.cmake" - COMMENT "Creating pkg-config file") - - install(FILES "${CMAKE_CURRENT_BINARY_DIR}/libzstd.pc" DESTINATION "${LIBDIR}/pkgconfig") + if (ZSTD_FRAMEWORK) + set_target_properties( + libzstd_static + PROPERTIES + FRAMEWORK TRUE + FRAMEWORK_VERSION "${ZSTD_FULL_VERSION}" + PRODUCT_BUNDLE_IDENTIFIER "github.com/facebook/zstd/${STATIC_LIBRARY_BASE_NAME}" + XCODE_ATTRIBUTE_INSTALL_PATH "@rpath" + PUBLIC_HEADER "${PublicHeaders}" + OUTPUT_NAME "${STATIC_LIBRARY_BASE_NAME}" + XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "" + XCODE_ATTRIBUTE_CODE_SIGNING_ALLOWED "NO" + XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED "NO" + MACOSX_FRAMEWORK_IDENTIFIER "github.com/facebook/zstd/${STATIC_LIBRARY_BASE_NAME}" + MACOSX_FRAMEWORK_BUNDLE_VERSION "${ZSTD_FULL_VERSION}" + MACOSX_FRAMEWORK_SHORT_VERSION_STRING "${ZSTD_SHORT_VERSION}" + MACOSX_RPATH TRUE + RESOURCE ${PublicHeaders}) + endif () endif () +# pkg-config +include(JoinPaths) # can be replaced by cmake_path(APPEND) in CMake 3.20 +set(PREFIX "${CMAKE_INSTALL_PREFIX}") +set(EXEC_PREFIX "\${prefix}") +join_paths(LIBDIR "\${exec_prefix}" "${CMAKE_INSTALL_LIBDIR}") +join_paths(INCLUDEDIR "\${prefix}" "${CMAKE_INSTALL_INCLUDEDIR}") +set(LIBS_PRIVATE "${THREADS_LIBS}") +set(VERSION "${zstd_VERSION}") + +configure_file("${LIBRARY_DIR}/libzstd.pc.in" "${CMAKE_CURRENT_BINARY_DIR}/libzstd.pc" @ONLY) +install(FILES "${CMAKE_CURRENT_BINARY_DIR}/libzstd.pc" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") + # install target -install(FILES - ${LIBRARY_DIR}/zstd.h - ${LIBRARY_DIR}/deprecated/zbuff.h - ${LIBRARY_DIR}/dictBuilder/zdict.h - ${LIBRARY_DIR}/dictBuilder/cover.h - ${LIBRARY_DIR}/common/zstd_errors.h - DESTINATION "include") +install(FILES ${PublicHeaders} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}") -if (ZSTD_BUILD_SHARED) - install(TARGETS libzstd_shared RUNTIME DESTINATION "bin" - LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" - ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}") -endif() -if (ZSTD_BUILD_STATIC) - install(TARGETS libzstd_static ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}") -endif () +install(TARGETS ${library_targets} + EXPORT zstdExports + INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" + ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" + LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" + RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" + BUNDLE DESTINATION "${CMAKE_INSTALL_BINDIR}" + FRAMEWORK DESTINATION "${CMAKE_INSTALL_LIBDIR}" COMPONENT runtime OPTIONAL + PUBLIC_HEADER DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" + ) # uninstall target if (NOT TARGET uninstall) diff --git a/build/cmake/lib/pkgconfig.cmake b/build/cmake/lib/pkgconfig.cmake deleted file mode 100644 index 8f805a19703..00000000000 --- a/build/cmake/lib/pkgconfig.cmake +++ /dev/null @@ -1 +0,0 @@ -configure_file("${IN}" "${OUT}" @ONLY) diff --git a/build/cmake/programs/CMakeLists.txt b/build/cmake/programs/CMakeLists.txt index 03194fa0441..5e239e32a3d 100644 --- a/build/cmake/programs/CMakeLists.txt +++ b/build/cmake/programs/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -7,7 +7,7 @@ # in the COPYING file in the root directory of this source tree). # ################################################################ -project(programs) +project(programs C) set(CMAKE_INCLUDE_CURRENT_DIR TRUE) @@ -21,25 +21,38 @@ if (ZSTD_LEGACY_SUPPORT) include_directories(${PROGRAMS_LEGACY_DIR} ${LIBRARY_DIR}/legacy) endif () +if (ZSTD_PROGRAMS_LINK_SHARED) + set(PROGRAMS_ZSTD_LINK_TARGET libzstd_shared) +else () + set(PROGRAMS_ZSTD_LINK_TARGET libzstd_static) +endif () + if (MSVC) set(MSVC_RESOURCE_DIR ${ZSTD_SOURCE_DIR}/build/VS2010/zstd) set(PlatformDependResources ${MSVC_RESOURCE_DIR}/zstd.rc) endif () -add_executable(zstd ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/dibio.c ${PlatformDependResources}) -target_link_libraries(zstd libzstd_static) +file(GLOB ZSTD_PROGRAM_SRCS "${PROGRAMS_DIR}/*.c") +if (MSVC AND ZSTD_PROGRAMS_LINK_SHARED) + list(APPEND ZSTD_PROGRAM_SRCS ${LIBRARY_DIR}/common/pool.c ${LIBRARY_DIR}/common/threading.c) +endif () + +add_executable(zstd ${ZSTD_PROGRAM_SRCS}) +target_link_libraries(zstd ${PROGRAMS_ZSTD_LINK_TARGET}) if (CMAKE_SYSTEM_NAME MATCHES "(Solaris|SunOS)") target_link_libraries(zstd rt) endif () -install(TARGETS zstd RUNTIME DESTINATION "bin") +install(TARGETS zstd + RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" + BUNDLE DESTINATION "${CMAKE_INSTALL_BINDIR}") if (UNIX) add_custom_target(zstdcat ALL ${CMAKE_COMMAND} -E create_symlink zstd zstdcat DEPENDS zstd COMMENT "Creating zstdcat symlink") add_custom_target(unzstd ALL ${CMAKE_COMMAND} -E create_symlink zstd unzstd DEPENDS zstd COMMENT "Creating unzstd symlink") - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdcat DESTINATION "bin") - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/unzstd DESTINATION "bin") - install(PROGRAMS ${PROGRAMS_DIR}/zstdgrep DESTINATION "bin") - install(PROGRAMS ${PROGRAMS_DIR}/zstdless DESTINATION "bin") + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdcat DESTINATION "${CMAKE_INSTALL_BINDIR}") + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/unzstd DESTINATION "${CMAKE_INSTALL_BINDIR}") + install(PROGRAMS ${PROGRAMS_DIR}/zstdgrep DESTINATION "${CMAKE_INSTALL_BINDIR}") + install(PROGRAMS ${PROGRAMS_DIR}/zstdless DESTINATION "${CMAKE_INSTALL_BINDIR}") add_custom_target(zstd.1 ALL ${CMAKE_COMMAND} -E copy ${PROGRAMS_DIR}/zstd.1 . @@ -56,18 +69,22 @@ if (UNIX) # Define MAN_INSTALL_DIR if necessary if (MAN_INSTALL_DIR) else () - set(MAN_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/share/man/man1) + set(MAN_INSTALL_DIR ${CMAKE_INSTALL_MANDIR}/man1) endif () - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstd.1 DESTINATION "${MAN_INSTALL_DIR}") - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdcat.1 DESTINATION "${MAN_INSTALL_DIR}") - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/unzstd.1 DESTINATION "${MAN_INSTALL_DIR}") - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdgrep.1 DESTINATION "${MAN_INSTALL_DIR}") - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdless.1 DESTINATION "${MAN_INSTALL_DIR}") - - add_executable(zstd-frugal ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/fileio.c) - target_link_libraries(zstd-frugal libzstd_static) - set_property(TARGET zstd-frugal APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_NOBENCH;ZSTD_NODICT") + install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/zstd.1 + ${CMAKE_CURRENT_BINARY_DIR}/zstdcat.1 + ${CMAKE_CURRENT_BINARY_DIR}/unzstd.1 + ${CMAKE_CURRENT_BINARY_DIR}/zstdgrep.1 + ${CMAKE_CURRENT_BINARY_DIR}/zstdless.1 + DESTINATION "${MAN_INSTALL_DIR}") + + add_executable(zstd-frugal ${PROGRAMS_DIR}/zstdcli.c + ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c + ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/fileio_asyncio.c) + target_link_libraries(zstd-frugal ${PROGRAMS_ZSTD_LINK_TARGET}) + set_property(TARGET zstd-frugal APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_NOBENCH;ZSTD_NODICT;ZSTD_NOTRACE") endif () # Add multi-threading support definitions @@ -79,13 +96,15 @@ if (ZSTD_MULTITHREAD_SUPPORT) target_link_libraries(zstd ${THREADS_LIBS}) add_custom_target(zstdmt ALL ${CMAKE_COMMAND} -E create_symlink zstd zstdmt DEPENDS zstd COMMENT "Creating zstdmt symlink") - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdmt DESTINATION "bin") + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdmt DESTINATION "${CMAKE_INSTALL_BINDIR}") endif () endif () option(ZSTD_ZLIB_SUPPORT "ZLIB SUPPORT" OFF) option(ZSTD_LZMA_SUPPORT "LZMA SUPPORT" OFF) +option(ZSTD_LZ4_SUPPORT "LZ4 SUPPORT" OFF) +# Add gzip support if (ZSTD_ZLIB_SUPPORT) find_package(ZLIB REQUIRED) @@ -98,6 +117,7 @@ if (ZSTD_ZLIB_SUPPORT) endif () endif () +# Add lzma support if (ZSTD_LZMA_SUPPORT) find_package(LibLZMA REQUIRED) @@ -109,3 +129,16 @@ if (ZSTD_LZMA_SUPPORT) message(SEND_ERROR "lzma library is missing") endif () endif () + +# Add lz4 support +if (ZSTD_LZ4_SUPPORT) + find_package(LibLZ4 REQUIRED) + + if (LIBLZ4_FOUND) + include_directories(${LIBLZ4_INCLUDE_DIRS}) + target_link_libraries(zstd ${LIBLZ4_LIBRARIES}) + set_property(TARGET zstd APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_LZ4COMPRESS;ZSTD_LZ4DECOMPRESS") + else () + message(SEND_ERROR "lz4 library is missing") + endif () +endif () diff --git a/build/cmake/tests/.gitignore b/build/cmake/tests/.gitignore index 2ab62a3e1ef..ca2947f61f3 100644 --- a/build/cmake/tests/.gitignore +++ b/build/cmake/tests/.gitignore @@ -3,5 +3,4 @@ datagen fullbench fuzzer paramgrill -zbufftest diff --git a/build/cmake/tests/CMakeLists.txt b/build/cmake/tests/CMakeLists.txt index 077d824b52d..56104a4e341 100644 --- a/build/cmake/tests/CMakeLists.txt +++ b/build/cmake/tests/CMakeLists.txt @@ -1,6 +1,6 @@ # ################################################################ # zstd - Makefile -# Copyright (C) Yann Collet 2014-present +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # BSD license @@ -27,11 +27,21 @@ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # You can contact the author at : -# - zstd homepage : http://www.zstd.net/ +# - zstd homepage : https://facebook.github.io/zstd/ # ################################################################ project(tests) +# name: Cache variable name. The value is expected to be a semicolon-separated +# list of command line flags +# default_value: Value to initialize the option with. Can be space separated. +function(AddTestFlagsOption name default_value doc) + string(STRIP "${default_value}" default_value) + string(REGEX REPLACE " +" ";" default_value "${default_value}") + set(${name} ${default_value} CACHE STRING "${doc}") + mark_as_advanced(${name}) +endfunction() + set(CMAKE_INCLUDE_CURRENT_DIR TRUE) # Define programs directory, where sources and header files are located @@ -40,16 +50,69 @@ set(PROGRAMS_DIR ${ZSTD_SOURCE_DIR}/programs) set(TESTS_DIR ${ZSTD_SOURCE_DIR}/tests) include_directories(${TESTS_DIR} ${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${LIBRARY_DIR}/compress ${LIBRARY_DIR}/dictBuilder) -add_executable(datagen ${PROGRAMS_DIR}/datagen.c ${TESTS_DIR}/datagencli.c) +add_executable(datagen ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/lorem.c ${TESTS_DIR}/loremOut.c ${TESTS_DIR}/datagencli.c) target_link_libraries(datagen libzstd_static) -add_executable(fullbench ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${TESTS_DIR}/fullbench.c) +# +# fullbench +# +add_executable(fullbench ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/lorem.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${TESTS_DIR}/fullbench.c) +if (NOT MSVC) + target_compile_options(fullbench PRIVATE "-Wno-deprecated-declarations") +endif() target_link_libraries(fullbench libzstd_static) +add_test(NAME fullbench COMMAND "$" ${ZSTD_FULLBENCH_FLAGS}) +# +# fuzzer +# add_executable(fuzzer ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/fuzzer.c) +if (NOT MSVC) + target_compile_options(fuzzer PRIVATE "-Wno-deprecated-declarations") +endif() target_link_libraries(fuzzer libzstd_static) +AddTestFlagsOption(ZSTD_FUZZER_FLAGS "$ENV{FUZZERTEST} $ENV{FUZZER_FLAGS}" + "Semicolon-separated list of flags to pass to the fuzzer test (see `fuzzer -h` for usage)") +add_test(NAME fuzzer COMMAND "$" ${ZSTD_FUZZER_FLAGS}) +# Disable the timeout since the run time is too long for the default timeout of +# 1500 seconds and varies considerably between low-end and high-end CPUs. +# set_tests_properties(fuzzer PROPERTIES TIMEOUT 0) + +# +# zstreamtest +# +add_executable(zstreamtest ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/seqgen.c ${TESTS_DIR}/zstreamtest.c ${TESTS_DIR}/external_matchfinder.c) +if (NOT MSVC) + target_compile_options(zstreamtest PRIVATE "-Wno-deprecated-declarations") +endif() +target_link_libraries(zstreamtest libzstd_static) +AddTestFlagsOption(ZSTD_ZSTREAM_FLAGS "$ENV{ZSTREAM_TESTTIME} $ENV{FUZZER_FLAGS}" + "Semicolon-separated list of flags to pass to the zstreamtest test (see `zstreamtest -h` for usage)") +add_test(NAME zstreamtest COMMAND "$" ${ZSTD_ZSTREAM_FLAGS}) + +# +# playTests.sh +# +AddTestFlagsOption(ZSTD_PLAYTESTS_FLAGS "$ENV{PLAYTESTS_FLAGS}" + "Semicolon-separated list of flags to pass to the playTests.sh test") +add_test(NAME playTests COMMAND sh -c "\"${TESTS_DIR}/playTests.sh\" ${ZSTD_PLAYTESTS_FLAGS}") +find_program(UNAME uname) # Run script only in unix shell environments +if (ZSTD_BUILD_PROGRAMS AND UNAME) + set_property(TEST playTests APPEND PROPERTY ENVIRONMENT + "ZSTD_BIN=$" + "DATAGEN_BIN=$" + ) +else() + message(STATUS "Disabling playTests.sh test because requirements not met") + set_tests_properties(playTests PROPERTIES DISABLED YES) +endif() + +# Label the "Medium" set of tests (see TESTING.md) +set_property(TEST fuzzer zstreamtest playTests APPEND PROPERTY LABELS Medium) +add_executable(paramgrill ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/lorem.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/paramgrill.c) if (UNIX) - add_executable(paramgrill ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/paramgrill.c) target_link_libraries(paramgrill libzstd_static m) #m is math library +else() + target_link_libraries(paramgrill libzstd_static) endif () diff --git a/build/cmake/zstdConfig.cmake.in b/build/cmake/zstdConfig.cmake.in new file mode 100644 index 00000000000..f4190f989ba --- /dev/null +++ b/build/cmake/zstdConfig.cmake.in @@ -0,0 +1,10 @@ +@PACKAGE_INIT@ + +include(CMakeFindDependencyMacro) +if(@ZSTD_MULTITHREAD_SUPPORT@ AND "@UNIX@") + find_dependency(Threads) +endif() + +include("${CMAKE_CURRENT_LIST_DIR}/zstdTargets.cmake") + +check_required_components("zstd") diff --git a/build/meson/README.md b/build/meson/README.md index d79ed49696f..d393a063fc0 100644 --- a/build/meson/README.md +++ b/build/meson/README.md @@ -17,7 +17,7 @@ It outputs one `libzstd`, either shared or static, depending on `cd` to this meson directory (`build/meson`) ```sh -meson --buildtype=release -Dbuild_{programs,contrib}=true builddir +meson setup -Dbin_programs=true -Dbin_contrib=true builddir cd builddir ninja # to build ninja install # to install diff --git a/build/meson/contrib/gen_html/meson.build b/build/meson/contrib/gen_html/meson.build index cabff209dc9..3f302538d8e 100644 --- a/build/meson/contrib/gen_html/meson.build +++ b/build/meson/contrib/gen_html/meson.build @@ -17,6 +17,7 @@ gen_html_includes = include_directories(join_paths(zstd_rootdir, 'programs'), gen_html = executable('gen_html', join_paths(zstd_rootdir, 'contrib/gen_html/gen_html.cpp'), include_directories: gen_html_includes, + native: true, install: false) # Update zstd manual diff --git a/build/meson/contrib/pzstd/meson.build b/build/meson/contrib/pzstd/meson.build index 8f3822fd776..c3ee3d60a0e 100644 --- a/build/meson/contrib/pzstd/meson.build +++ b/build/meson/contrib/pzstd/meson.build @@ -18,7 +18,8 @@ pzstd_sources = [join_paths(zstd_rootdir, 'programs/util.c'), join_paths(zstd_rootdir, 'contrib/pzstd/SkippableFrame.cpp')] pzstd = executable('pzstd', pzstd_sources, - cpp_args: [ '-DNDEBUG', '-Wno-shadow', '-pedantic' ], + cpp_args: pzstd_warning_flags, include_directories: pzstd_includes, - dependencies: [ libzstd_dep, thread_dep ], + dependencies: [ libzstd_internal_dep, thread_dep ], + override_options: ['b_ndebug=true'], install: true) diff --git a/build/meson/lib/meson.build b/build/meson/lib/meson.build index f8014c62586..81fc313f954 100644 --- a/build/meson/lib/meson.build +++ b/build/meson/lib/meson.build @@ -14,8 +14,7 @@ libzstd_includes = [include_directories(join_paths(zstd_rootdir,'lib'), join_paths(zstd_rootdir, 'lib/common'), join_paths(zstd_rootdir, 'lib/compress'), join_paths(zstd_rootdir, 'lib/decompress'), - join_paths(zstd_rootdir, 'lib/dictBuilder'), - join_paths(zstd_rootdir, 'lib/deprecated'))] + join_paths(zstd_rootdir, 'lib/dictBuilder'))] libzstd_sources = [join_paths(zstd_rootdir, 'lib/common/entropy_common.c'), join_paths(zstd_rootdir, 'lib/common/fse_decompress.c'), @@ -28,6 +27,10 @@ libzstd_sources = [join_paths(zstd_rootdir, 'lib/common/entropy_common.c'), join_paths(zstd_rootdir, 'lib/compress/fse_compress.c'), join_paths(zstd_rootdir, 'lib/compress/huf_compress.c'), join_paths(zstd_rootdir, 'lib/compress/zstd_compress.c'), + join_paths(zstd_rootdir, 'lib/compress/zstd_compress_literals.c'), + join_paths(zstd_rootdir, 'lib/compress/zstd_compress_sequences.c'), + join_paths(zstd_rootdir, 'lib/compress/zstd_compress_superblock.c'), + join_paths(zstd_rootdir, 'lib/compress/zstd_preSplit.c'), join_paths(zstd_rootdir, 'lib/compress/zstdmt_compress.c'), join_paths(zstd_rootdir, 'lib/compress/zstd_fast.c'), join_paths(zstd_rootdir, 'lib/compress/zstd_double_fast.c'), @@ -41,10 +44,16 @@ libzstd_sources = [join_paths(zstd_rootdir, 'lib/common/entropy_common.c'), join_paths(zstd_rootdir, 'lib/dictBuilder/cover.c'), join_paths(zstd_rootdir, 'lib/dictBuilder/fastcover.c'), join_paths(zstd_rootdir, 'lib/dictBuilder/divsufsort.c'), - join_paths(zstd_rootdir, 'lib/dictBuilder/zdict.c'), - join_paths(zstd_rootdir, 'lib/deprecated/zbuff_common.c'), - join_paths(zstd_rootdir, 'lib/deprecated/zbuff_compress.c'), - join_paths(zstd_rootdir, 'lib/deprecated/zbuff_decompress.c')] + join_paths(zstd_rootdir, 'lib/dictBuilder/zdict.c')] + +# really we need anything that defines __GNUC__ as that is what ZSTD_ASM_SUPPORTED is gated on +# but these are the two compilers that are supported in tree and actually handle this correctly +# Otherwise, explicitly disable assembly. +if [compiler_gcc, compiler_clang].contains(cc_id) + libzstd_sources += join_paths(zstd_rootdir, 'lib/decompress/huf_decompress_amd64.S') +else + add_project_arguments('-DZSTD_DISABLE_ASM', language: 'c') +endif # Explicit define legacy support add_project_arguments('-DZSTD_LEGACY_SUPPORT=@0@'.format(legacy_level), @@ -75,7 +84,8 @@ libzstd_c_args = [] if cc_id == compiler_msvc if default_library_type != 'static' libzstd_sources += [windows_mod.compile_resources( - join_paths(zstd_rootdir, 'build/VS2010/libzstd-dll/libzstd-dll.rc'))] + join_paths(zstd_rootdir, 'build/VS2010/libzstd-dll/libzstd-dll.rc'), + include_directories: libzstd_includes)] libzstd_c_args += ['-DZSTD_DLL_EXPORT=1', '-DZSTD_HEAPMODE=0', '-D_CONSOLE', @@ -109,21 +119,57 @@ libzstd = library('zstd', libzstd_sources, include_directories: libzstd_includes, c_args: libzstd_c_args, + gnu_symbol_visibility: 'hidden', dependencies: libzstd_deps, install: true, version: zstd_libversion) libzstd_dep = declare_dependency(link_with: libzstd, - include_directories: libzstd_includes) + include_directories: join_paths(zstd_rootdir,'lib')) # Do not expose private headers + +if meson.version().version_compare('>=0.54.0') + meson.override_dependency('libzstd', libzstd_dep) +endif + +# we link to both: +# - the shared library (for public symbols) +# - the static library (for private symbols) +# +# this is needed because internally private symbols are used all the time, and +# -fvisibility=hidden means those cannot be found +if get_option('default_library') == 'static' + libzstd_static = libzstd + libzstd_internal_dep = declare_dependency(link_with: libzstd, + include_directories: libzstd_includes) +else + if get_option('default_library') == 'shared' + libzstd_static = static_library('zstd_objlib', + objects: libzstd.extract_all_objects(recursive: true), + build_by_default: false) + else + libzstd_static = libzstd.get_static_lib() + endif + + if cc_id == compiler_msvc + # msvc does not actually support linking to both, but errors out with: + # error LNK2005: ZSTD_ already defined in zstd.lib(zstd-1.dll) + libzstd_internal_dep = declare_dependency(link_with: libzstd_static, + include_directories: libzstd_includes) + else + libzstd_internal_dep = declare_dependency(link_with: libzstd, + # the static library must be linked after the shared one + dependencies: declare_dependency(link_with: libzstd_static), + include_directories: libzstd_includes) + endif +endif pkgconfig.generate(libzstd, name: 'libzstd', filebase: 'libzstd', description: 'fast lossless compression algorithm library', version: zstd_libversion, - url: 'http://www.zstd.net/') + url: 'https://facebook.github.io/zstd/') install_headers(join_paths(zstd_rootdir, 'lib/zstd.h'), - join_paths(zstd_rootdir, 'lib/deprecated/zbuff.h'), - join_paths(zstd_rootdir, 'lib/dictBuilder/zdict.h'), - join_paths(zstd_rootdir, 'lib/common/zstd_errors.h')) + join_paths(zstd_rootdir, 'lib/zdict.h'), + join_paths(zstd_rootdir, 'lib/zstd_errors.h')) diff --git a/build/meson/meson.build b/build/meson/meson.build index 7178403c2fc..a9768d43a53 100644 --- a/build/meson/meson.build +++ b/build/meson/meson.build @@ -10,19 +10,27 @@ project('zstd', ['c', 'cpp'], - license: ['BSD', 'GPLv2'], + license: 'BSD-3-Clause OR GPL-2.0-only', default_options : [ - 'c_std=gnu99', + # There shouldn't be any need to force a C standard convention for zstd + # but in case one would want that anyway, this can be done here. + # 'c_std=gnu99', + # c++11 standard is useful for pzstd 'cpp_std=c++11', - 'buildtype=release' + 'buildtype=release', + 'warning_level=3', + # -Wdocumentation does not actually pass, nor do the test binaries, + # so this isn't safe + #'werror=true' ], - version: 'DUMMY', - meson_version: '>=0.47.0') + version: run_command( + find_program('GetZstdLibraryVersion.py'), '../../lib/zstd.h', + check: true).stdout().strip(), + meson_version: '>=0.50.0') cc = meson.get_compiler('c') cxx = meson.get_compiler('cpp') pkgconfig = import('pkgconfig') -python3 = import('python').find_installation() windows_mod = import('windows') host_machine_os = host_machine.system() @@ -39,16 +47,6 @@ compiler_msvc = 'msvc' zstd_version = meson.project_version() -zstd_h_file = join_paths(meson.current_source_dir(), '../../lib/zstd.h') -GetZstdLibraryVersion_py = files('GetZstdLibraryVersion.py') -r = run_command(python3, GetZstdLibraryVersion_py, zstd_h_file) -if r.returncode() == 0 - zstd_version = r.stdout().strip() - message('Project version is now: @0@'.format(zstd_version)) -else - error('Cannot find project version in @0@'.format(zstd_h_file)) -endif - zstd_libversion = zstd_version # ============================================================================= @@ -67,7 +65,7 @@ zstd_docdir = join_paths(zstd_datadir, 'doc', meson.project_name()) # Built-in options use_debug = get_option('debug') -buildtype = get_option('buildtype') +default_library_type = get_option('default_library') # Custom options debug_level = get_option('debug_level') @@ -75,9 +73,9 @@ legacy_level = get_option('legacy_level') use_backtrace = get_option('backtrace') use_static_runtime = get_option('static_runtime') -build_programs = get_option('build_programs') -build_contrib = get_option('build_contrib') -build_tests = get_option('build_tests') +bin_programs = get_option('bin_programs') +bin_contrib = get_option('bin_contrib') +bin_tests = get_option('bin_tests') feature_multi_thread = get_option('multi_thread') feature_zlib = get_option('zlib') @@ -88,9 +86,14 @@ feature_lz4 = get_option('lz4') # Dependencies # ============================================================================= -libm_dep = cc.find_library('m', required: build_tests) -thread_dep = dependency('threads', required: feature_multi_thread) -use_multi_thread = thread_dep.found() +libm_dep = cc.find_library('m', required: false) +if host_machine_os == os_windows + thread_dep = dependency('', required: false) + use_multi_thread = not feature_multi_thread.disabled() +else + thread_dep = dependency('threads', required: feature_multi_thread) + use_multi_thread = thread_dep.found() +endif # Arguments in dependency should be equivalent to those passed to pkg-config zlib_dep = dependency('zlib', required: feature_zlib) use_zlib = zlib_dep.found() @@ -105,23 +108,29 @@ use_lz4 = lz4_dep.found() add_project_arguments('-DXXH_NAMESPACE=ZSTD_', language: ['c']) +pzstd_warning_flags = [] if [compiler_gcc, compiler_clang].contains(cc_id) - common_warning_flags = [ '-Wextra', '-Wundef', '-Wshadow', '-Wcast-align', '-Wcast-qual' ] + common_warning_flags = [ '-Wundef', '-Wshadow', '-Wcast-align', '-Wcast-qual' ] + pzstd_warning_flags = ['-Wno-shadow', '-Wno-deprecated-declarations'] if cc_id == compiler_clang - # Should use Meson's own --werror build option - #common_warning_flags += '-Werror' common_warning_flags += ['-Wconversion', '-Wno-sign-conversion', '-Wdocumentation'] endif - cc_compile_flags = cc.get_supported_arguments(common_warning_flags + ['-Wstrict-prototypes']) - cxx_compile_flags = cxx.get_supported_arguments(common_warning_flags) + noexecstack_flags = ['-Wa,--noexecstack' ] + noexecstack_link_flags = ['-Wl,-z,noexecstack'] + cc_compile_flags = cc.get_supported_arguments(common_warning_flags + noexecstack_flags + ['-Wstrict-prototypes']) + cxx_compile_flags = cxx.get_supported_arguments(common_warning_flags + noexecstack_flags) add_project_arguments(cc_compile_flags, language : 'c') add_project_arguments(cxx_compile_flags, language : 'cpp') + cc_link_flags = cc.get_supported_link_arguments(noexecstack_link_flags) + cxx_link_flags = cxx.get_supported_link_arguments(noexecstack_link_flags) + add_project_link_arguments(cc_link_flags, language: 'c') + add_project_link_arguments(cxx_link_flags, language: 'cpp') elif cc_id == compiler_msvc msvc_compile_flags = [ '/D_UNICODE', '/DUNICODE' ] if use_multi_thread msvc_compile_flags += '/MP' endif - if enable_static_runtime + if use_static_runtime msvc_compile_flags += '/MT' endif add_project_arguments(msvc_compile_flags, language: ['c', 'cpp']) @@ -133,14 +142,14 @@ endif subdir('lib') -if build_programs +if bin_programs or bin_tests subdir('programs') endif -if build_tests +if bin_tests subdir('tests') endif -if build_contrib +if bin_contrib subdir('contrib') endif diff --git a/build/meson/meson_options.txt b/build/meson/meson_options.txt index 349d915c7c4..4705178279f 100644 --- a/build/meson/meson_options.txt +++ b/build/meson/meson_options.txt @@ -10,24 +10,24 @@ # Read guidelines from https://wiki.gnome.org/Initiatives/GnomeGoals/MesonPorting -option('legacy_level', type: 'integer', min: 0, max: 7, value: '5', +option('legacy_level', type: 'integer', min: 0, max: 7, value: 5, description: 'Support any legacy format: 7 to 1 for v0.7+ to v0.1+') option('debug_level', type: 'integer', min: 0, max: 9, value: 1, description: 'Enable run-time debug. See lib/common/debug.h') -option('backtrace', type: 'boolean', value: false, +option('backtrace', type: 'feature', value: 'disabled', description: 'Display a stack backtrace when execution generates a runtime exception') option('static_runtime', type: 'boolean', value: false, description: 'Link to static run-time libraries on MSVC') -option('build_programs', type: 'boolean', value: true, +option('bin_programs', type: 'boolean', value: true, description: 'Enable programs build') -option('build_tests', type: 'boolean', value: false, +option('bin_tests', type: 'boolean', value: false, description: 'Enable tests build') -option('build_contrib', type: 'boolean', value: false, +option('bin_contrib', type: 'boolean', value: false, description: 'Enable contrib build') option('multi_thread', type: 'feature', value: 'enabled', - description: 'Enable multi-threading when pthread is detected') + description: 'Enable multi-threading when pthread or Windows is detected') option('zlib', type: 'feature', value: 'auto', description: 'Enable zlib support') option('lzma', type: 'feature', value: 'auto', diff --git a/build/meson/programs/meson.build b/build/meson/programs/meson.build index 363818f9d64..e103a629539 100644 --- a/build/meson/programs/meson.build +++ b/build/meson/programs/meson.build @@ -14,17 +14,27 @@ zstd_programs_sources = [join_paths(zstd_rootdir, 'programs/zstdcli.c'), join_paths(zstd_rootdir, 'programs/util.c'), join_paths(zstd_rootdir, 'programs/timefn.c'), join_paths(zstd_rootdir, 'programs/fileio.c'), + join_paths(zstd_rootdir, 'programs/fileio_asyncio.c'), join_paths(zstd_rootdir, 'programs/benchfn.c'), join_paths(zstd_rootdir, 'programs/benchzstd.c'), join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'programs/dibio.c')] + join_paths(zstd_rootdir, 'programs/lorem.c'), + join_paths(zstd_rootdir, 'programs/dibio.c'), + join_paths(zstd_rootdir, 'programs/zstdcli_trace.c')] +zstd_deps = [ libzstd_internal_dep ] zstd_c_args = libzstd_debug_cflags + +zstd_frugal_deps = [ libzstd_internal_dep ] +zstd_frugal_c_args = [ '-DZSTD_NOBENCH', '-DZSTD_NODICT', '-DZSTD_NOTRACE' ] + if use_multi_thread + zstd_deps += [ thread_dep ] zstd_c_args += [ '-DZSTD_MULTITHREAD' ] + zstd_frugal_deps += [ thread_dep ] + zstd_frugal_c_args += [ '-DZSTD_MULTITHREAD' ] endif -zstd_deps = [ libzstd_dep ] if use_zlib zstd_deps += [ zlib_dep ] zstd_c_args += [ '-DZSTD_GZCOMPRESS', '-DZSTD_GZDECOMPRESS' ] @@ -42,7 +52,8 @@ endif export_dynamic_on_windows = false # explicit backtrace enable/disable for Linux & Darwin -if not use_backtrace +have_execinfo = cc.has_header('execinfo.h', required: use_backtrace) +if not have_execinfo zstd_c_args += '-DBACKTRACE_ENABLE=0' elif use_debug and host_machine_os == os_windows # MinGW target zstd_c_args += '-DBACKTRACE_ENABLE=1' @@ -52,7 +63,8 @@ endif if cc_id == compiler_msvc if default_library_type != 'static' zstd_programs_sources += [windows_mod.compile_resources( - join_paths(zstd_rootdir, 'build/VS2010/zstd/zstd.rc'))] + join_paths(zstd_rootdir, 'build/VS2010/zstd/zstd.rc'), + include_directories: libzstd_includes)] endif endif @@ -61,19 +73,27 @@ zstd = executable('zstd', c_args: zstd_c_args, dependencies: zstd_deps, export_dynamic: export_dynamic_on_windows, # Since Meson 0.45.0 - install: true) + build_by_default: bin_programs, + install: bin_programs) + +if not bin_programs + # we generate rules to build the programs, but don't install anything + # so do not continue to installing scripts and manpages + subdir_done() +endif zstd_frugal_sources = [join_paths(zstd_rootdir, 'programs/zstdcli.c'), join_paths(zstd_rootdir, 'programs/timefn.c'), join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/fileio.c')] + join_paths(zstd_rootdir, 'programs/fileio.c'), + join_paths(zstd_rootdir, 'programs/fileio_asyncio.c')] # Minimal target, with only zstd compression and decompression. # No bench. No legacy. executable('zstd-frugal', zstd_frugal_sources, - dependencies: libzstd_dep, - c_args: [ '-DZSTD_NOBENCH', '-DZSTD_NODICT' ], + dependencies: zstd_frugal_deps, + c_args: zstd_frugal_c_args, install: true) install_data(join_paths(zstd_rootdir, 'programs/zstdgrep'), diff --git a/build/meson/tests/meson.build b/build/meson/tests/meson.build index 64eba6028d6..71ffc50693d 100644 --- a/build/meson/tests/meson.build +++ b/build/meson/tests/meson.build @@ -21,7 +21,6 @@ FUZZER_FLAGS = ['--no-big-tests'] FUZZERTEST = '-T200s' ZSTREAM_TESTTIME = '-T90s' DECODECORPUS_TESTTIME = '-T30' -ZSTDRTTEST = ['--test-large-data'] # ============================================================================= # Executables @@ -29,81 +28,72 @@ ZSTDRTTEST = ['--test-large-data'] test_includes = [ include_directories(join_paths(zstd_rootdir, 'programs')) ] -datagen_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'tests/datagencli.c')] +testcommon_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), + join_paths(zstd_rootdir, 'programs/lorem.c'), + join_paths(zstd_rootdir, 'programs/util.c'), + join_paths(zstd_rootdir, 'programs/timefn.c'), + join_paths(zstd_rootdir, 'programs/benchfn.c'), + join_paths(zstd_rootdir, 'programs/benchzstd.c')] + +testcommon = static_library('testcommon', + testcommon_sources, + # needed due to use of private symbol + -fvisibility=hidden + link_with: libzstd_static) + +testcommon_dep = declare_dependency(link_with: testcommon, + dependencies: libzstd_deps, + include_directories: libzstd_includes) + +datagen_sources = [join_paths(zstd_rootdir, 'tests/datagencli.c'), + join_paths(zstd_rootdir, 'tests/loremOut.c')] datagen = executable('datagen', datagen_sources, c_args: [ '-DNDEBUG' ], include_directories: test_includes, - dependencies: libzstd_dep, + dependencies: testcommon_dep, install: false) -fullbench_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'programs/benchfn.c'), - join_paths(zstd_rootdir, 'programs/benchzstd.c'), - join_paths(zstd_rootdir, 'tests/fullbench.c')] +fullbench_sources = [join_paths(zstd_rootdir, 'tests/fullbench.c')] fullbench = executable('fullbench', fullbench_sources, include_directories: test_includes, - dependencies: libzstd_dep, + dependencies: testcommon_dep, install: false) -fuzzer_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'tests/fuzzer.c')] +fuzzer_sources = [join_paths(zstd_rootdir, 'tests/fuzzer.c')] fuzzer = executable('fuzzer', fuzzer_sources, include_directories: test_includes, - dependencies: libzstd_dep, - install: false) - -zbufftest_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'tests/zbufftest.c')] -zbufftest = executable('zbufftest', - zbufftest_sources, - c_args: ['-Wno-deprecated-declarations'], - include_directories: test_includes, - dependencies: libzstd_dep, + dependencies: [ testcommon_dep, thread_dep ], install: false) -zstreamtest_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), +zstreamtest_sources = [ join_paths(zstd_rootdir, 'tests/seqgen.c'), - join_paths(zstd_rootdir, 'tests/zstreamtest.c')] + join_paths(zstd_rootdir, 'tests/zstreamtest.c'), + join_paths(zstd_rootdir, 'tests/external_matchfinder.c')] zstreamtest = executable('zstreamtest', zstreamtest_sources, include_directories: test_includes, - dependencies: libzstd_dep, + dependencies: testcommon_dep, install: false) -paramgrill_sources = [join_paths(zstd_rootdir, 'programs/benchfn.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'programs/benchzstd.c'), - join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'tests/paramgrill.c')] +paramgrill_sources = [join_paths(zstd_rootdir, 'tests/paramgrill.c')] paramgrill = executable('paramgrill', paramgrill_sources, include_directories: test_includes, - dependencies: [ libzstd_dep, libm_dep ], + dependencies: [ testcommon_dep, libm_dep ], install: false) roundTripCrash_sources = [join_paths(zstd_rootdir, 'tests/roundTripCrash.c')] roundTripCrash = executable('roundTripCrash', roundTripCrash_sources, - dependencies: [ libzstd_dep ], + dependencies: [ testcommon_dep ], install: false) longmatch_sources = [join_paths(zstd_rootdir, 'tests/longmatch.c')] longmatch = executable('longmatch', longmatch_sources, - dependencies: [ libzstd_dep ], + dependencies: [ libzstd_internal_dep ], install: false) invalidDictionaries_sources = [join_paths(zstd_rootdir, 'tests/invalidDictionaries.c')] @@ -122,34 +112,18 @@ if 0 < legacy_level and legacy_level <= 4 install: false) endif -decodecorpus_sources = [join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'tests/decodecorpus.c')] +decodecorpus_sources = [join_paths(zstd_rootdir, 'tests/decodecorpus.c')] decodecorpus = executable('decodecorpus', decodecorpus_sources, include_directories: test_includes, - dependencies: [ libzstd_dep, libm_dep ], + dependencies: [ testcommon_dep, libm_dep ], install: false) -symbols_sources = [join_paths(zstd_rootdir, 'tests/symbols.c')] -symbols = executable('symbols', - symbols_sources, - include_directories: test_includes, - c_args: host_machine_os == os_windows ? '-DZSTD_DLL_IMPORT=1' : [], - dependencies: [ libzstd_dep ], - install: false) - -poolTests_sources = [join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'tests/poolTests.c'), - join_paths(zstd_rootdir, 'lib/common/pool.c'), - join_paths(zstd_rootdir, 'lib/common/threading.c'), - join_paths(zstd_rootdir, 'lib/common/zstd_common.c'), - join_paths(zstd_rootdir, 'lib/common/error_private.c')] +poolTests_sources = [join_paths(zstd_rootdir, 'tests/poolTests.c')] poolTests = executable('poolTests', poolTests_sources, include_directories: test_includes, - dependencies: [ libzstd_dep, thread_dep ], + dependencies: [ testcommon_dep, thread_dep ], install: false) checkTag_sources = [join_paths(zstd_rootdir, 'tests/checkTag.c')] @@ -163,24 +137,38 @@ checkTag = executable('checkTag', # ============================================================================= if tests_supported_oses.contains(host_machine_os) - valgrind_prog = find_program('valgrind', ['/usr/bin/valgrind'], required: true) + valgrind_prog = find_program('valgrind', ['/usr/bin/valgrind'], required: false) valgrindTest_py = files('valgrindTest.py') - test('valgrindTest', - valgrindTest_py, - args: [valgrind_prog.path(), zstd, datagen, fuzzer, fullbench], - depends: [zstd, datagen, fuzzer, fullbench], - timeout: 600) # Timeout should work on HDD drive + if valgrind_prog.found() + test('valgrindTest', + valgrindTest_py, + args: [valgrind_prog.path(), zstd, datagen, fuzzer, fullbench], + depends: [zstd, datagen, fuzzer, fullbench], + timeout: 600) # Timeout should work on HDD drive + endif endif if host_machine_os != os_windows playTests_sh = find_program(join_paths(zstd_rootdir, 'tests/playTests.sh'), required: true) - test('test-zstd', - playTests_sh, - args: ZSTDRTTEST, - env: ['ZSTD=' + zstd.full_path()], - depends: [datagen], - workdir: meson.current_build_dir(), - timeout: 2800) # Timeout should work on HDD drive + + # add slow tests only if the meson version is new enough to support + # test setups with default-excluded suites + if meson.version().version_compare('>=0.57.0') + matrix = {'fast': [], 'slow': ['--test-large-data']} + else + matrix = {'fast': []} + endif + + foreach suite, opt: matrix + test('test-zstd-'+suite, + playTests_sh, + args: opt, + env: ['ZSTD_BIN=' + zstd.full_path(), 'DATAGEN_BIN=./datagen'], + depends: [datagen, zstd], + suite: suite, + workdir: meson.current_build_dir(), + timeout: 2800) # Timeout should work on HDD drive + endforeach endif test('test-fullbench-1', @@ -201,25 +189,16 @@ if use_zlib timeout: 480) endif -test('test-zbuff', - zbufftest, - args: [ZSTREAM_TESTTIME], - timeout: 120) test('test-zstream-1', zstreamtest, args: ['-v', ZSTREAM_TESTTIME] + FUZZER_FLAGS, timeout: 240) -test('test-zstream-2', - zstreamtest, - args: ['-mt', '-t1', ZSTREAM_TESTTIME] + FUZZER_FLAGS, - timeout: 120) test('test-zstream-3', zstreamtest, args: ['--newapi', '-t1', ZSTREAM_TESTTIME] + FUZZER_FLAGS, timeout: 120) test('test-longmatch', longmatch, timeout: 36) test('test-invalidDictionaries', invalidDictionaries) # should be fast -test('test-symbols', symbols) # should be fast if 0 < legacy_level and legacy_level <= 4 test('test-legacy', legacy) # should be fast endif @@ -228,3 +207,11 @@ test('test-decodecorpus', args: ['-t', DECODECORPUS_TESTTIME], timeout: 60) test('test-poolTests', poolTests) # should be fast + +if meson.version().version_compare('>=0.57.0') + add_test_setup('fast', + is_default: true, + exclude_suites: ['slow']) + add_test_setup('slow', + exclude_suites: ['fast']) +endif diff --git a/build/meson/tests/valgrindTest.py b/build/meson/tests/valgrindTest.py index 218f7458bbf..05d84878b9d 100644 --- a/build/meson/tests/valgrindTest.py +++ b/build/meson/tests/valgrindTest.py @@ -21,7 +21,7 @@ def valgrindTest(valgrind, datagen, fuzzer, zstd, fullbench): if subprocess.call([*VALGRIND_ARGS, zstd], stdout=subprocess.DEVNULL) == 0: - raise subprocess.CalledProcessError('zstd without argument should have failed') + raise subprocess.SubprocessError('zstd without argument should have failed') with subprocess.Popen([datagen, '-g80'], stdout=subprocess.PIPE) as p1, \ subprocess.Popen([*VALGRIND_ARGS, zstd, '-', '-c'], @@ -30,7 +30,7 @@ def valgrindTest(valgrind, datagen, fuzzer, zstd, fullbench): p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. p2.communicate() if p2.returncode != 0: - raise subprocess.CalledProcessError() + raise subprocess.SubprocessError() with subprocess.Popen([datagen, '-g16KB'], stdout=subprocess.PIPE) as p1, \ subprocess.Popen([*VALGRIND_ARGS, zstd, '-vf', '-', '-c'], @@ -39,7 +39,7 @@ def valgrindTest(valgrind, datagen, fuzzer, zstd, fullbench): p1.stdout.close() p2.communicate() if p2.returncode != 0: - raise subprocess.CalledProcessError() + raise subprocess.SubprocessError() with tempfile.NamedTemporaryFile() as tmp_fd: with subprocess.Popen([datagen, '-g2930KB'], stdout=subprocess.PIPE) as p1, \ @@ -48,7 +48,7 @@ def valgrindTest(valgrind, datagen, fuzzer, zstd, fullbench): p1.stdout.close() p2.communicate() if p2.returncode != 0: - raise subprocess.CalledProcessError() + raise subprocess.SubprocessError() subprocess.check_call([*VALGRIND_ARGS, zstd, '-vdf', tmp_fd.name, '-c'], stdout=subprocess.DEVNULL) @@ -60,7 +60,7 @@ def valgrindTest(valgrind, datagen, fuzzer, zstd, fullbench): p1.stdout.close() p2.communicate() if p2.returncode != 0: - raise subprocess.CalledProcessError() + raise subprocess.SubprocessError() subprocess.check_call([*VALGRIND_ARGS, fuzzer, '-T1mn', '-t1']) subprocess.check_call([*VALGRIND_ARGS, fullbench, '-i1']) diff --git a/build/single_file_libs/.gitignore b/build/single_file_libs/.gitignore new file mode 100644 index 00000000000..b769e108924 --- /dev/null +++ b/build/single_file_libs/.gitignore @@ -0,0 +1,10 @@ + +# build artifacts +zstddeclib.c +zstdenclib.c +zstd.c +zstd.h +zstd_errors.h + +# test artifacts +temp* diff --git a/build/single_file_libs/README.md b/build/single_file_libs/README.md new file mode 100644 index 00000000000..64c973a68d6 --- /dev/null +++ b/build/single_file_libs/README.md @@ -0,0 +1,33 @@ +# Single File Zstandard Libraries + +The script `combine.sh` creates an _amalgamated_ source file that can be used with or without `zstd.h`. This isn't a _header-only_ file but it does offer a similar level of simplicity when integrating into a project. + +All it now takes to support Zstd in your own projects is the addition of a single file, two if using the header, with no configuration or further build steps. + +Decompressor +------------ + +This is the most common use case. The decompression library is small, adding, for example, 26kB to an Emscripten compiled WebAssembly project. Native implementations add a little more, 40-70kB depending on the compiler and platform. + +Create `zstddeclib.c` from the Zstd source using: +``` +cd zstd/build/single_file_libs +python3 combine.py -r ../../lib -x legacy/zstd_legacy.h -o zstddeclib.c zstddeclib-in.c +``` +Then add the resulting file to your project (see the [example files](examples)). + +`create_single_file_decoder.sh` will run the above script, creating the file `zstddeclib.c` (`build_decoder_test.sh` will also create `zstddeclib.c`, then compile and test the result). + +Full Library +------------ + +The same tool can amalgamate the entire Zstd library for ease of adding both compression and decompression to a project. The [roundtrip example](examples/roundtrip.c) uses the original `zstd.h` with the remaining source files combined into `zstd.c` (currently just over 1.2MB) created from `zstd-in.c`. As with the standalone decoder the most useful compile flags have already been rolled-in and the resulting file can be added to a project as-is. + +Create `zstd.c` from the Zstd source using: +``` +cd zstd/build/single_file_libs +python3 combine.py -r ../../lib -x legacy/zstd_legacy.h -k zstd.h -o zstd.c zstd-in.c +``` +It's possible to create a compressor-only library but since the decompressor is so small in comparison this doesn't bring much of a gain (but for the curious, simply remove the files in the _decompress_ section at the end of `zstd-in.c`). + +`create_single_file_library.sh` will run the script to create `zstd.c` (`build_library_test.sh` will also create `zstd.c`, then compile and test the result). diff --git a/build/single_file_libs/build_decoder_test.sh b/build/single_file_libs/build_decoder_test.sh new file mode 100755 index 00000000000..c4ca55fa5dd --- /dev/null +++ b/build/single_file_libs/build_decoder_test.sh @@ -0,0 +1,91 @@ +#!/bin/sh + +# Temporary compiled binary +OUT_FILE="tempbin" + +# Optional temporary compiled WebAssembly +OUT_WASM="temp.wasm" + +# Source files to compile using Emscripten. +IN_FILES="examples/emscripten.c" + +# Emscripten build using emcc. +emscripten_emcc_build() { + # Compile the same example as above + CC_FLAGS="-Wall -Wextra -Wshadow -Werror -Os -g0 -flto" + emcc $CC_FLAGS -s WASM=1 -I. -o $OUT_WASM $IN_FILES + # Did compilation work? + if [ $? -ne 0 ]; then + echo "Compiling ${IN_FILES}: FAILED" + exit 1 + fi + echo "Compiling ${IN_FILES}: PASSED" + rm -f $OUT_WASM +} + +# Emscripten build using docker. +emscripten_docker_build() { + docker container run --rm \ + --volume $PWD:/code \ + --workdir /code \ + emscripten/emsdk:latest \ + emcc $CC_FLAGS -s WASM=1 -I. -o $OUT_WASM $IN_FILES + # Did compilation work? + if [ $? -ne 0 ]; then + echo "Compiling ${IN_FILES} (using docker): FAILED" + exit 1 + fi + echo "Compiling ${IN_FILES} (using docker): PASSED" + rm -f $OUT_WASM +} + +# Try Emscripten build using emcc or docker. +try_emscripten_build() { + which emcc > /dev/null + if [ $? -eq 0 ]; then + emscripten_emcc_build + return $? + fi + + which docker > /dev/null + if [ $? -eq 0 ]; then + emscripten_docker_build + return $? + fi + + echo "(Skipping Emscripten test)" +} + +# Amalgamate the sources +./create_single_file_decoder.sh +# Did combining work? +if [ $? -ne 0 ]; then + echo "Single file decoder creation script: FAILED" + exit 1 +fi +echo "Single file decoder creation script: PASSED" + +# Compile the generated output +cc -Wall -Wextra -Wshadow -Werror -Os -g0 -o $OUT_FILE examples/simple.c +# Did compilation work? +if [ $? -ne 0 ]; then + echo "Compiling simple.c: FAILED" + exit 1 +fi +echo "Compiling simple.c: PASSED" + +# Run then delete the compiled output +./$OUT_FILE +retVal=$? +rm -f $OUT_FILE +# Did the test work? +if [ $retVal -ne 0 ]; then + echo "Running simple.c: FAILED" + exit 1 +fi +echo "Running simple.c: PASSED" + +# Try Emscripten build if emcc or docker command is available. +try_emscripten_build + +exit 0 diff --git a/build/single_file_libs/build_library_test.sh b/build/single_file_libs/build_library_test.sh new file mode 100755 index 00000000000..b67cd99a0f2 --- /dev/null +++ b/build/single_file_libs/build_library_test.sh @@ -0,0 +1,98 @@ +#!/bin/sh + +# Where to find the sources (only used to copy zstd.h) +ZSTD_SRC_ROOT="../../lib" + +# Temporary compiled binary +OUT_FILE="tempbin" + +# Optional temporary compiled WebAssembly +OUT_WASM="temp.wasm" + +# Source files to compile using Emscripten. +IN_FILES="zstd.c examples/roundtrip.c" + +# Emscripten build using emcc. +emscripten_emcc_build() { + # Compile the same example as above + CC_FLAGS="-Wall -Wextra -Wshadow -Werror -Os -g0 -flto" + emcc $CC_FLAGS -s WASM=1 -I. -o $OUT_WASM $IN_FILES + # Did compilation work? + if [ $? -ne 0 ]; then + echo "Compiling ${IN_FILES}: FAILED" + exit 1 + fi + echo "Compiling ${IN_FILES}: PASSED" + rm -f $OUT_WASM +} + +# Emscripten build using docker. +emscripten_docker_build() { + docker container run --rm \ + --volume $PWD:/code \ + --workdir /code \ + emscripten/emsdk:latest \ + emcc $CC_FLAGS -s WASM=1 -I. -o $OUT_WASM $IN_FILES + # Did compilation work? + if [ $? -ne 0 ]; then + echo "Compiling ${IN_FILES} (using docker): FAILED" + exit 1 + fi + echo "Compiling ${IN_FILES} (using docker): PASSED" + rm -f $OUT_WASM +} + +# Try Emscripten build using emcc or docker. +try_emscripten_build() { + which emcc > /dev/null + if [ $? -eq 0 ]; then + emscripten_emcc_build + return $? + fi + + which docker > /dev/null + if [ $? -eq 0 ]; then + emscripten_docker_build + return $? + fi + + echo "(Skipping Emscripten test)" +} + +# Amalgamate the sources +./create_single_file_library.sh +# Did combining work? +if [ $? -ne 0 ]; then + echo "Single file library creation script: FAILED" + exit 1 +fi +echo "Single file library creation script: PASSED" + +# Copy the header to here (for the tests) +cp "$ZSTD_SRC_ROOT/zstd.h" examples/zstd.h +cp "$ZSTD_SRC_ROOT/zstd_errors.h" examples/zstd_errors.h + +# Compile the generated output +cc -Wall -Wextra -Werror -Wshadow -pthread -I. -Os -g0 -o $OUT_FILE zstd.c examples/roundtrip.c +# Did compilation work? +if [ $? -ne 0 ]; then + echo "Compiling roundtrip.c: FAILED" + exit 1 +fi +echo "Compiling roundtrip.c: PASSED" + +# Run then delete the compiled output +./$OUT_FILE +retVal=$? +rm -f $OUT_FILE +# Did the test work? +if [ $retVal -ne 0 ]; then + echo "Running roundtrip.c: FAILED" + exit 1 +fi +echo "Running roundtrip.c: PASSED" + +# Try Emscripten build if emcc or docker command is available. +try_emscripten_build + +exit 0 diff --git a/build/single_file_libs/combine.py b/build/single_file_libs/combine.py new file mode 100755 index 00000000000..771dd20bfb1 --- /dev/null +++ b/build/single_file_libs/combine.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 + +# Tool to bundle multiple C/C++ source files, inlining any includes. +# +# Note: there are two types of exclusion options: the '-x' flag, which besides +# excluding a file also adds an #error directive in place of the #include, and +# the '-k' flag, which keeps the #include and doesn't inline the file. The +# intended use cases are: '-x' for files that would normally be #if'd out, so +# features that 100% won't be used in the amalgamated file, for which every +# occurrence adds the error, and '-k' for headers that we wish to manually +# include, such as a project's public API, for which occurrences after the first +# are removed. +# +# Todo: the error handling could be better, which currently throws and halts +# (which is functional just not very friendly). +# +# Author: Carl Woffenden, Numfum GmbH (this script is released under a CC0 license/Public Domain) + +import argparse, re, sys + +from pathlib import Path +from typing import Any, List, Optional, Pattern, Set, TextIO + +# Set of file roots when searching (equivalent to -I paths for the compiler). +roots: Set[Path] = set() + +# Set of (canonical) file Path objects to exclude from inlining (and not only +# exclude but to add a compiler error directive when they're encountered). +excludes: Set[Path] = set() + +# Set of (canonical) file Path objects to keep as include directives. +keeps: Set[Path] = set() + +# Whether to keep the #pragma once directives (unlikely, since this will result +# in a warning, but the option is there). +keep_pragma: bool = False + +# Destination file object (or stdout if no output file was supplied). +destn: TextIO = sys.stdout + +# Set of file Path objects previously inlined (and to ignore if reencountering). +found: Set[Path] = set() + +# Compiled regex Pattern to handle "#pragma once" in various formats: +# +# #pragma once +# #pragma once +# # pragma once +# #pragma once +# #pragma once // comment +# +# Ignoring commented versions, same as include_regex. +# +pragma_regex: Pattern = re.compile(r'^\s*#\s*pragma\s*once\s*') + +# Compiled regex Pattern to handle the following type of file includes: +# +# #include "file" +# #include "file" +# # include "file" +# #include "file" +# #include "file" // comment +# #include "file" // comment with quote " +# +# And all combinations of, as well as ignoring the following: +# +# #include +# //#include "file" +# /*#include "file"*/ +# +# We don't try to catch errors since the compiler will do this (and the code is +# expected to be valid before processing) and we don't care what follows the +# file (whether it's a valid comment or not, since anything after the quoted +# string is ignored) +# +include_regex: Pattern = re.compile(r'^\s*#\s*include\s*"(.+?)"') + +# Simple tests to prove include_regex's cases. +# +def test_match_include() -> bool: + if (include_regex.match('#include "file"') and + include_regex.match(' #include "file"') and + include_regex.match('# include "file"') and + include_regex.match('#include "file"') and + include_regex.match('#include "file" // comment')): + if (not include_regex.match('#include ') and + not include_regex.match('//#include "file"') and + not include_regex.match('/*#include "file"*/')): + found = include_regex.match('#include "file" // "') + if (found and found.group(1) == 'file'): + print('#include match valid') + return True + return False + +# Simple tests to prove pragma_regex's cases. +# +def test_match_pragma() -> bool: + if (pragma_regex.match('#pragma once') and + pragma_regex.match(' #pragma once') and + pragma_regex.match('# pragma once') and + pragma_regex.match('#pragma once') and + pragma_regex.match('#pragma once // comment')): + if (not pragma_regex.match('//#pragma once') and + not pragma_regex.match('/*#pragma once*/')): + print('#pragma once match valid') + return True + return False + +# Finds 'file'. First the list of 'root' paths are searched, followed by the +# currently processing file's 'parent' path, returning a valid Path in +# canonical form. If no match is found None is returned. +# +def resolve_include(file: str, parent: Optional[Path] = None) -> Optional[Path]: + for root in roots: + found = root.joinpath(file).resolve() + if (found.is_file()): + return found + if (parent): + found = parent.joinpath(file).resolve(); + else: + found = Path(file) + if (found.is_file()): + return found + return None + +# Helper to resolve lists of files. 'file_list' is passed in from the arguments +# and each entry resolved to its canonical path (like any include entry, either +# from the list of root paths or the owning file's 'parent', which in this case +# is case is the input file). The results are stored in 'resolved'. +# +def resolve_excluded_files(file_list: Optional[List[str]], resolved: Set[Path], parent: Optional[Path] = None) -> None: + if (file_list): + for filename in file_list: + found = resolve_include(filename, parent) + if (found): + resolved.add(found) + else: + error_line(f'Warning: excluded file not found: {filename}') + +# Writes 'line' to the open 'destn' (or stdout). +# +def write_line(line: str) -> None: + print(line, file=destn) + +# Logs 'line' to stderr. This is also used for general notifications that we +# don't want to go to stdout (so the source can be piped). +# +def error_line(line: Any) -> None: + print(line, file=sys.stderr) + +# Inline the contents of 'file' (with any of its includes also inlined, etc.). +# +# Note: text encoding errors are ignored and replaced with ? when reading the +# input files. This isn't ideal, but it's more than likely in the comments than +# code and a) the text editor has probably also failed to read the same content, +# and b) the compiler probably did too. +# +def add_file(file: Path, file_name: str = None) -> None: + if (file.is_file()): + if (not file_name): + file_name = file.name + error_line(f'Processing: {file_name}') + with file.open('r', errors='replace') as opened: + for line in opened: + line = line.rstrip('\n') + match_include = include_regex.match(line); + if (match_include): + # We have a quoted include directive so grab the file + inc_name = match_include.group(1) + resolved = resolve_include(inc_name, file.parent) + if (resolved): + if (resolved in excludes): + # The file was excluded so error if the compiler uses it + write_line(f'#error Using excluded file: {inc_name} (re-amalgamate source to fix)') + error_line(f'Excluding: {inc_name}') + else: + if (resolved not in found): + # The file was not previously encountered + found.add(resolved) + if (resolved in keeps): + # But the include was flagged to keep as included + write_line(f'/**** *NOT* inlining {inc_name} ****/') + write_line(line) + error_line(f'Not inlining: {inc_name}') + else: + # The file was neither excluded nor seen before so inline it + write_line(f'/**** start inlining {inc_name} ****/') + add_file(resolved, inc_name) + write_line(f'/**** ended inlining {inc_name} ****/') + else: + write_line(f'/**** skipping file: {inc_name} ****/') + else: + # The include file didn't resolve to a file + write_line(f'#error Unable to find: {inc_name}') + error_line(f'Error: Unable to find: {inc_name}') + else: + # Skip any 'pragma once' directives, otherwise write the source line + if (keep_pragma or not pragma_regex.match(line)): + write_line(line) + else: + error_line(f'Error: Invalid file: {file}') + +# Start here +parser = argparse.ArgumentParser(description='Amalgamate Tool', epilog=f'example: {sys.argv[0]} -r ../my/path -r ../other/path -o out.c in.c') +parser.add_argument('-r', '--root', action='append', type=Path, help='file root search path') +parser.add_argument('-x', '--exclude', action='append', help='file to completely exclude from inlining') +parser.add_argument('-k', '--keep', action='append', help='file to exclude from inlining but keep the include directive') +parser.add_argument('-p', '--pragma', action='store_true', default=False, help='keep any "#pragma once" directives (removed by default)') +parser.add_argument('-o', '--output', type=argparse.FileType('w'), help='output file (otherwise stdout)') +parser.add_argument('input', type=Path, help='input file') +args = parser.parse_args() + +# Fail early on an invalid input (and store it so we don't recurse) +args.input = args.input.resolve(strict=True) +found.add(args.input) + +# Resolve all of the root paths upfront (we'll halt here on invalid roots) +if (args.root): + for path in args.root: + roots.add(path.resolve(strict=True)) + +# The remaining params: so resolve the excluded files and #pragma once directive +resolve_excluded_files(args.exclude, excludes, args.input.parent) +resolve_excluded_files(args.keep, keeps, args.input.parent) +keep_pragma = args.pragma; + +# Then recursively process the input file +try: + if (args.output): + destn = args.output + add_file(args.input) +finally: + if (destn): + destn.close() diff --git a/build/single_file_libs/combine.sh b/build/single_file_libs/combine.sh new file mode 100755 index 00000000000..222c2c32a51 --- /dev/null +++ b/build/single_file_libs/combine.sh @@ -0,0 +1,249 @@ +#!/bin/sh -e + +# Tool to bundle multiple C/C++ source files, inlining any includes. +# +# TODO: ROOTS, FOUND, etc., as arrays (since they fail on paths with spaces) +# +# Author: Carl Woffenden, Numfum GmbH (this script is released under a CC0 license/Public Domain) + +# Common file roots +ROOTS="." + +# -x option excluded includes +XINCS="" + +# -k option includes to keep as include directives +KINCS="" + +# Files previously visited +FOUND="" + +# Optional destination file (empty string to write to stdout) +DESTN="" + +# Whether the "#pragma once" directives should be written to the output +PONCE=0 + +# Prints the script usage then exits +usage() { + echo "Usage: $0 [-r ] [-x
] [-k
] [-o ] infile" + echo " -r file root search path" + echo " -x file to completely exclude from inlining" + echo " -k file to exclude from inlining but keep the include directive" + echo " -p keep any '#pragma once' directives (removed by default)" + echo " -o output file (otherwise stdout)" + echo "Example: $0 -r ../my/path - r ../other/path -o out.c in.c" + exit 1 +} + +# Tests that the grep implementation works as expected (older OSX grep fails) +test_deps() { + if ! echo '#include "foo"' | grep -Eq '^\s*#\s*include\s*".+"'; then + echo "Aborting: the grep implementation fails to parse include lines" + exit 1 + fi + if ! echo '"foo.h"' | sed -E 's/"([^"]+)"/\1/' | grep -Eq '^foo\.h$'; then + echo "Aborting: sed is unavailable or non-functional" + exit 1 + fi +} + +# Test if glob pattern $1 matches subject $2 (see fnmatch(3)) +fnmatch() { + case "$2" in + $1) + return 0 + ;; + esac + return 1 +} + +# Test if line $1 is local include directive +is_include_line() { + fnmatch "*#*include*" "$1" || return 1 + printf "%s\n" "$1" | grep -Eq '^\s*#\s*include\s*".+"' +} + +# Test if line $1 is pragma once directive +is_pragma_once_line() { + fnmatch "*#*pragma*once*" "$1" || return 1 + printf "%s\n" "$1" | grep -Eq '^\s*#\s*pragma\s*once\s*' +} + +# Tests if list $1 has item $2 (returning zero on a match) +# (originally used grep -Eq "(^|\s*)$2(\$|\s*)) +readonly list_FS="$IFS" +list_has_item() { + # Re: escaping glob pattern special characters in item string: + # + # bash (tested 3.2.57, 5.1.4), dash (tested 0.5.10.2), NetBSD /bin/sh + # (tested 8.2), and Solaris /bin/sh (tested 11.4) require escaping + # backslashes in a bracket expression despite POSIX specifying that + # backslash loses significance in a bracket expression. + # + # Conversely, neither FreeBSD /bin/sh (tested 12.2) nor OpenBSD /bin/sh + # (tested 7.1) obey backslash-escaping in case statement patterns even + # outside bracket expressions, so escape special characters using bracket + # expressions. + # + # Solaris /bin/sh (tested 11.4) requires vertical bar (|) to be escaped. + # + # All accommodations should behave as expected under strict POSIX semantics. + if fnmatch "*[\\*?[|]*" "$2"; then + set -- "$1" "$(printf '%s\n' "$2" | sed -e 's/[*?[|]/[&]/g; s/[\]/[\\&]/g')" + fi + for item_P in "*[$list_FS]$2[$list_FS]*" "*[$list_FS]$2" "$2[$list_FS]*" "$2"; do + fnmatch "${item_P}" "$1" && return 0 + done + return 1 +} + +# Adds a new line with the supplied arguments to $DESTN (or stdout) +write_line() { + if [ -n "$DESTN" ]; then + printf '%s\n' "$@" >> "$DESTN" + else + printf '%s\n' "$@" + fi +} + +log_line() { + echo $@ >&2 +} + +# Find this file! +resolve_include() { + local srcdir=$1 + local inc=$2 + for root in $srcdir $ROOTS; do + if [ -f "$root/$inc" ]; then + # Try to reduce the file path into a canonical form (so that multiple) + # includes of the same file are successfully deduplicated, even if they + # are expressed differently. + local relpath="$(realpath --relative-to . "$root/$inc" 2>/dev/null)" + if [ "$relpath" != "" ]; then # not all realpaths support --relative-to + echo "$relpath" + return 0 + fi + local relpath="$(realpath "$root/$inc" 2>/dev/null)" + if [ "$relpath" != "" ]; then # not all distros have realpath... + echo "$relpath" + return 0 + fi + # Fallback on Python to reduce the path if the above fails. + local relpath=$(python -c "import os,sys; print os.path.relpath(sys.argv[1])" "$root/$inc" 2>/dev/null) + if [ "$relpath" != "" ]; then # not all distros have realpath... + echo "$relpath" + return 0 + fi + # Worst case, fall back to just the root + relative include path. The + # problem with this is that it is possible to emit multiple different + # resolved paths to the same file, depending on exactly how its included. + # Since the main loop below keeps a list of the resolved paths it's + # already included, in order to avoid repeated includes, this failure to + # produce a canonical/reduced path can lead to multiple inclusions of the + # same file. But it seems like the resulting single file library still + # works (hurray include guards!), so I guess it's ok. + echo "$root/$inc" + return 0 + fi + done + return 1 +} + +# Adds the contents of $1 with any of its includes inlined +add_file() { + local file=$1 + if [ -n "$file" ]; then + log_line "Processing: $file" + # Get directory of the current so we can resolve relative includes + local srcdir="$(dirname "$file")" + # Read the file + local line= + while IFS= read -r line; do + if is_include_line "$line"; then + # We have an include directive so strip the (first) file + local inc=$(echo "$line" | grep -Eo '".*"' | sed -E 's/"([^"]+)"/\1/' | head -1) + local res_inc="$(resolve_include "$srcdir" "$inc")" + if list_has_item "$XINCS" "$inc"; then + # The file was excluded so error if the source attempts to use it + write_line "#error Using excluded file: $inc (re-amalgamate source to fix)" + log_line "Excluding: $inc" + else + if ! list_has_item "$FOUND" "$res_inc"; then + # The file was not previously encountered + FOUND="$FOUND $res_inc" + if list_has_item "$KINCS" "$inc"; then + # But the include was flagged to keep as included + write_line "/**** *NOT* inlining $inc ****/" + write_line "$line" + log_line "Not Inlining: $inc" + else + # The file was neither excluded nor seen before so inline it + write_line "/**** start inlining $inc ****/" + add_file "$res_inc" + write_line "/**** ended inlining $inc ****/" + fi + else + write_line "/**** skipping file: $inc ****/" + fi + fi + else + # Skip any 'pragma once' directives, otherwise write the source line + local write=$PONCE + if [ $write -eq 0 ]; then + if ! is_pragma_once_line "$line"; then + write=1 + fi + fi + if [ $write -ne 0 ]; then + write_line "$line" + fi + fi + done < "$file" + else + write_line "#error Unable to find \"$1\"" + log_line "Error: Unable to find: \"$1\"" + fi +} + +while getopts ":r:x:k:po:" opts; do + case $opts in + r) + ROOTS="$ROOTS $OPTARG" + ;; + x) + XINCS="$XINCS $OPTARG" + ;; + k) + KINCS="$KINCS $OPTARG" + ;; + p) + PONCE=1 + ;; + o) + DESTN="$OPTARG" + ;; + *) + usage + ;; + esac +done +shift $((OPTIND-1)) + +if [ -n "$1" ]; then + if [ -f "$1" ]; then + if [ -n "$DESTN" ]; then + printf "" > "$DESTN" + fi + test_deps + log_line "Processing using the slower shell script; this might take a while" + add_file "$1" + else + echo "Input file not found: \"$1\"" + exit 1 + fi +else + usage +fi +exit 0 diff --git a/build/single_file_libs/create_single_file_decoder.sh b/build/single_file_libs/create_single_file_decoder.sh new file mode 100755 index 00000000000..3c0c577df5f --- /dev/null +++ b/build/single_file_libs/create_single_file_decoder.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +# Where to find the sources +ZSTD_SRC_ROOT="../../lib" + +# Amalgamate the sources +echo "Amalgamating files..." +# Using the faster Python script if we have 3.8 or higher +if python3 -c 'import sys; assert sys.version_info >= (3,8)' 2>/dev/null; then + ./combine.py -r "$ZSTD_SRC_ROOT" -x legacy/zstd_legacy.h -o zstddeclib.c zstddeclib-in.c +else + ./combine.sh -r "$ZSTD_SRC_ROOT" -x legacy/zstd_legacy.h -o zstddeclib.c zstddeclib-in.c +fi +# Did combining work? +if [ $? -ne 0 ]; then + echo "Combine script: FAILED" + exit 1 +fi +echo "Combine script: PASSED" diff --git a/build/single_file_libs/create_single_file_library.sh b/build/single_file_libs/create_single_file_library.sh new file mode 100755 index 00000000000..a6f71f0f085 --- /dev/null +++ b/build/single_file_libs/create_single_file_library.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +# Where to find the sources +ZSTD_SRC_ROOT="../../lib" + +# Amalgamate the sources +echo "Amalgamating files..." +# Using the faster Python script if we have 3.8 or higher +if python3 -c 'import sys; assert sys.version_info >= (3,8)' 2>/dev/null; then + ./combine.py -r "$ZSTD_SRC_ROOT" -x legacy/zstd_legacy.h -o zstd.c zstd-in.c +else + ./combine.sh -r "$ZSTD_SRC_ROOT" -x legacy/zstd_legacy.h -o zstd.c zstd-in.c +fi +# Did combining work? +if [ $? -ne 0 ]; then + echo "Combine script: FAILED" + exit 1 +fi +echo "Combine script: PASSED" diff --git a/build/single_file_libs/examples/README.md b/build/single_file_libs/examples/README.md new file mode 100644 index 00000000000..e93bee35f1e --- /dev/null +++ b/build/single_file_libs/examples/README.md @@ -0,0 +1,11 @@ +# Single File ZStandard Examples + +The examples `#include` the generated `zstddeclib.c` directly but work equally as well when including `zstd.h` and compiling the amalgamated source separately. + +`simple.c` is the most basic example of decompressing content and verifying the result. + +`emscripten.c` is a bare-bones [Emscripten](https://github.com/emscripten-core/emscripten) compiled WebGL demo using Zstd to further compress a DXT1 texture (see the [original PNG image](testcard.png)). The 256x256 texture would normally be 32kB, but even when bundled with the Zstd decompressor the resulting WebAssembly weighs in at 41kB (`shell.html` is a support file to run the Wasm). + +`roundtrip.c` is an example to use with the optional [amalgamated library](../create_single_file_library.sh) showing compression the decompression. + +The example files in this directory are released under a [Creative Commons Zero license](https://creativecommons.org/publicdomain/zero/1.0/) (or Public Domain, whichever is applicable in your jurisdiction). diff --git a/build/single_file_libs/examples/emscripten.c b/build/single_file_libs/examples/emscripten.c new file mode 100644 index 00000000000..10dae86899a --- /dev/null +++ b/build/single_file_libs/examples/emscripten.c @@ -0,0 +1,340 @@ +/** + * \file emscripten.c + * Emscripten example of using the single-file \c zstddeclib. Draws a rotating + * textured quad with data from the in-line Zstd compressed DXT1 texture (DXT1 + * being hardware compression, further compressed with Zstd). + * \n + * Compile using: + * \code + * export CC_FLAGS="-Wall -Wextra -Werror -Os -g0 -flto --llvm-lto 3 -lGL -DNDEBUG=1" + * export EM_FLAGS="-s WASM=1 -s ENVIRONMENT=web --shell-file shell.html --closure 1" + * emcc $CC_FLAGS $EM_FLAGS -o out.html emscripten.c + * \endcode + * + * \author Carl Woffenden, Numfum GmbH (released under a CC0 license) + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "../zstddeclib.c" + +//************************* Test Data (DXT texture) **************************/ + +/** + * Zstd compressed DXT1 256x256 texture source. + * \n + * See \c testcard.png for the original. + */ +static uint8_t const srcZstd[] = { +#include "testcard-zstd.inl" +}; + +/** + * Uncompressed size of \c #srcZstd. + */ +#define DXT1_256x256 32768 + +/** + * Destination for decoding \c #srcZstd. + */ +static uint8_t dstDxt1[DXT1_256x256] = {}; + +#ifndef ZSTD_VERSION_MAJOR +/** + * For the case where the decompression library hasn't been included we add a + * dummy function to fake the process and stop the buffers being optimised out. + */ +size_t ZSTD_decompress(void* dst, size_t dstLen, const void* src, size_t srcLen) { + return (memcmp(dst, src, (srcLen < dstLen) ? srcLen : dstLen)) ? dstLen : 0; +} +#endif + +//*************************** Program and Shaders ***************************/ + +/** + * Program object ID. + */ +static GLuint progId = 0; + +/** + * Vertex shader ID. + */ +static GLuint vertId = 0; + +/** + * Fragment shader ID. + */ +static GLuint fragId = 0; + +//********************************* Uniforms *********************************/ + +/** + * Quad rotation angle ID. + */ +static GLint uRotId = -1; + +/** + * Draw colour ID. + */ +static GLint uTx0Id = -1; + +//******************************* Shader Source ******************************/ + +/** + * Vertex shader to draw texture mapped polys with an applied rotation. + */ +static GLchar const vertShader2D[] = +#if GL_ES_VERSION_2_0 + "#version 100\n" + "precision mediump float;\n" +#else + "#version 120\n" +#endif + "uniform float uRot;" // rotation + "attribute vec2 aPos;" // vertex position coords + "attribute vec2 aUV0;" // vertex texture UV0 + "varying vec2 vUV0;" // (passed to fragment shader) + "void main() {" + " float cosA = cos(radians(uRot));" + " float sinA = sin(radians(uRot));" + " mat3 rot = mat3(cosA, -sinA, 0.0," + " sinA, cosA, 0.0," + " 0.0, 0.0, 1.0);" + " gl_Position = vec4(rot * vec3(aPos, 1.0), 1.0);" + " vUV0 = aUV0;" + "}"; + +/** + * Fragment shader for the above polys. + */ +static GLchar const fragShader2D[] = +#if GL_ES_VERSION_2_0 + "#version 100\n" + "precision mediump float;\n" +#else + "#version 120\n" +#endif + "uniform sampler2D uTx0;" + "varying vec2 vUV0;" // (passed from fragment shader) + "void main() {" + " gl_FragColor = texture2D(uTx0, vUV0);" + "}"; + +/** + * Helper to compile a shader. + * + * \param type shader type + * \param text shader source + * \return the shader ID (or zero if compilation failed) + */ +static GLuint compileShader(GLenum const type, const GLchar* text) { + GLuint shader = glCreateShader(type); + if (shader) { + glShaderSource (shader, 1, &text, NULL); + glCompileShader(shader); + GLint compiled; + glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled); + if (compiled) { + return shader; + } else { + GLint logLen; + glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &logLen); + if (logLen > 1) { + GLchar* logStr = malloc(logLen); + glGetShaderInfoLog(shader, logLen, NULL, logStr); + #ifndef NDEBUG + printf("Shader compilation error: %s\n", logStr); + #endif + free(logStr); + } + glDeleteShader(shader); + } + } + return 0; +} + +//********************************** Helpers *********************************/ + +/** + * Vertex position index. + */ +#define GL_VERT_POSXY_ID 0 + +/** + * Vertex UV0 index. + */ +#define GL_VERT_TXUV0_ID 1 + + /** + * \c GL vec2 storage type. + */ +struct vec2 { + float x; + float y; +}; + +/** + * Combined 2D vertex and 2D texture coordinates. + */ +struct posTex2d { + struct vec2 pos; + struct vec2 uv0; +}; + +//****************************************************************************/ + +/** + * Current quad rotation angle (in degrees, updated per frame). + */ +static float rotDeg = 0.0f; + +/** + * Emscripten (single) GL context. + */ +static EMSCRIPTEN_WEBGL_CONTEXT_HANDLE glCtx = 0; + +/** + * Emscripten resize handler. + */ +static EM_BOOL resize(int type, const EmscriptenUiEvent* e, void* data) { + double surfaceW; + double surfaceH; + if (emscripten_get_element_css_size ("#canvas", &surfaceW, &surfaceH) == EMSCRIPTEN_RESULT_SUCCESS) { + emscripten_set_canvas_element_size("#canvas", surfaceW, surfaceH); + if (glCtx) { + glViewport(0, 0, (int) surfaceW, (int) surfaceH); + } + } + (void) type; + (void) data; + (void) e; + return EM_FALSE; +} + +/** + * Boilerplate to create a WebGL context. + */ +static EM_BOOL initContext() { + // Default attributes + EmscriptenWebGLContextAttributes attr; + emscripten_webgl_init_context_attributes(&attr); + if ((glCtx = emscripten_webgl_create_context("#canvas", &attr))) { + // Bind the context and fire a resize to get the initial size + emscripten_webgl_make_context_current(glCtx); + emscripten_set_resize_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, NULL, EM_FALSE, resize); + resize(0, NULL, NULL); + return EM_TRUE; + } + return EM_FALSE; +} + +/** + * Called once per frame (clears the screen and draws the rotating quad). + */ +static void tick() { + glClearColor(1.0f, 0.0f, 1.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + if (uRotId >= 0) { + glUniform1f(uRotId, rotDeg); + rotDeg += 0.1f; + if (rotDeg >= 360.0f) { + rotDeg -= 360.0f; + } + } + + glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, 0); + glFlush(); +} + +/** + * Creates the GL context, shaders and quad data, decompresses the Zstd data + * and 'uploads' the resulting texture. + * + * As a (naive) comparison, removing Zstd and building with "-Os -g0 s WASM=1 + * -lGL emscripten.c" results in a 15kB WebAssembly file; re-adding Zstd + * increases the Wasm by 26kB. + */ +int main() { + if (initContext()) { + // Compile shaders and set the initial GL state + if ((progId = glCreateProgram())) { + vertId = compileShader(GL_VERTEX_SHADER, vertShader2D); + fragId = compileShader(GL_FRAGMENT_SHADER, fragShader2D); + + glBindAttribLocation(progId, GL_VERT_POSXY_ID, "aPos"); + glBindAttribLocation(progId, GL_VERT_TXUV0_ID, "aUV0"); + + glAttachShader(progId, vertId); + glAttachShader(progId, fragId); + glLinkProgram (progId); + glUseProgram (progId); + uRotId = glGetUniformLocation(progId, "uRot"); + uTx0Id = glGetUniformLocation(progId, "uTx0"); + if (uTx0Id >= 0) { + glUniform1i(uTx0Id, 0); + } + + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + glEnable(GL_BLEND); + glDisable(GL_DITHER); + + glCullFace(GL_BACK); + glEnable(GL_CULL_FACE); + } + + GLuint vertsBuf = 0; + GLuint indexBuf = 0; + GLuint txName = 0; + // Create the textured quad (vert positions then UVs) + struct posTex2d verts2d[] = { + {{-0.85f, -0.85f}, {0.0f, 0.0f}}, // BL + {{ 0.85f, -0.85f}, {1.0f, 0.0f}}, // BR + {{-0.85f, 0.85f}, {0.0f, 1.0f}}, // TL + {{ 0.85f, 0.85f}, {1.0f, 1.0f}}, // TR + }; + uint16_t index2d[] = { + 0, 1, 2, + 2, 1, 3, + }; + glGenBuffers(1, &vertsBuf); + glBindBuffer(GL_ARRAY_BUFFER, vertsBuf); + glBufferData(GL_ARRAY_BUFFER, + sizeof(verts2d), verts2d, GL_STATIC_DRAW); + glVertexAttribPointer(GL_VERT_POSXY_ID, 2, + GL_FLOAT, GL_FALSE, sizeof(struct posTex2d), 0); + glVertexAttribPointer(GL_VERT_TXUV0_ID, 2, + GL_FLOAT, GL_FALSE, sizeof(struct posTex2d), + (void*) offsetof(struct posTex2d, uv0)); + glGenBuffers(1, &indexBuf); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuf); + glBufferData(GL_ELEMENT_ARRAY_BUFFER, + sizeof(index2d), index2d, GL_STATIC_DRAW); + glEnableVertexAttribArray(GL_VERT_POSXY_ID); + glEnableVertexAttribArray(GL_VERT_TXUV0_ID); + + // Decode the Zstd data and create the texture + if (ZSTD_decompress(dstDxt1, DXT1_256x256, srcZstd, sizeof srcZstd) == DXT1_256x256) { + glGenTextures(1, &txName); + glBindTexture(GL_TEXTURE_2D, txName); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glCompressedTexImage2D(GL_TEXTURE_2D, 0, + GL_COMPRESSED_RGB_S3TC_DXT1_EXT, + 256, 256, 0, DXT1_256x256, dstDxt1); + } else { + printf("Failed to decode Zstd data\n"); + } + emscripten_set_main_loop(tick, 0, EM_FALSE); + emscripten_exit_with_live_runtime(); + } + return EXIT_FAILURE; +} diff --git a/build/single_file_libs/examples/roundtrip.c b/build/single_file_libs/examples/roundtrip.c new file mode 100644 index 00000000000..b228de88e63 --- /dev/null +++ b/build/single_file_libs/examples/roundtrip.c @@ -0,0 +1,83 @@ +/* + * \file roundtrip.c + * In this example we include \c zstd.h and compile separately the amalgamated + * \c zstd.c: + * \code + * cc -Wall -Wextra -Werror -I. -Os -g0 zstd.c examples/roundtrip.c + * \endcode + * + * \author Carl Woffenden, Numfum GmbH (released under a CC0 license) + */ + +#include +#include +#include +#include +#include + +#include "zstd.h" + +//************************** Test Data (DXT texture) **************************/ + +/** + * Raw test data (borrowed from the Emscripten example). + * \n + * See \c testcard.png for the original. + */ +static uint8_t const rawData[] = { +#include "testcard-dxt1.inl" +}; + +#ifndef ZSTD_VERSION_MAJOR +/* + * For the case where the decompression library hasn't been included we add + * dummy functions to fake the process and stop the buffers being optimised out. + */ +size_t ZSTD_compressBound(size_t maxSrc) { + return maxSrc + 32; +} +int ZSTD_maxCLevel(void) { + return 20; +} +size_t ZSTD_compress(void* dst, size_t dstLen, const void* src, size_t srcLen, int level) { + return (memcmp(dst, src, (srcLen < dstLen) ? srcLen : dstLen)) ? level : dstLen; +} +unsigned ZSTD_isError(size_t code) { + return ((int) code) < 0; +} +size_t ZSTD_decompress(void* dst, size_t dstLen, const void* src, size_t srcLen) { + return (memcmp(dst, src, (srcLen < dstLen) ? srcLen : dstLen)) ? 0 : dstLen; +} +#endif + +//*****************************************************************************/ + +/** + * Simple single-file test to compress \c rawData, decompress the result, then + * compare the decompressed version with the original. + */ +int main() { + size_t bounds = ZSTD_compressBound(sizeof rawData); + void* compBuf = malloc(bounds); + void* testBuf = malloc(sizeof rawData); + int compare = -1; + if (compBuf && testBuf) { + size_t compSize = ZSTD_compress(compBuf, bounds, rawData, sizeof rawData, ZSTD_maxCLevel()); + if (!ZSTD_isError(compSize)) { + printf("Compression: PASSED (size: %lu, uncompressed: %lu)\n", (unsigned long) compSize, (unsigned long) (sizeof rawData)); + size_t decSize = ZSTD_decompress(testBuf, sizeof rawData, compBuf, compSize); + if (!ZSTD_isError(decSize)) { + printf("Decompression: PASSED\n"); + compare = memcmp(rawData, testBuf, decSize); + printf("Byte comparison: %s\n", (compare == 0) ? "PASSED" : "FAILED"); + } else { + printf("Decompression: FAILED\n"); + } + } else { + printf("Compression: FAILED\n"); + } + free(compBuf); + free(testBuf); + } + return (compare == 0) ? EXIT_SUCCESS : EXIT_FAILURE; +} diff --git a/build/single_file_libs/examples/shell.html b/build/single_file_libs/examples/shell.html new file mode 100644 index 00000000000..5beb0e6d557 --- /dev/null +++ b/build/single_file_libs/examples/shell.html @@ -0,0 +1,31 @@ + + + + + + + + Emscripten Shell + + + + + +{{{ SCRIPT }}} + + diff --git a/build/single_file_libs/examples/simple.c b/build/single_file_libs/examples/simple.c new file mode 100644 index 00000000000..ee63416a6e5 --- /dev/null +++ b/build/single_file_libs/examples/simple.c @@ -0,0 +1,75 @@ +/** + * \file simple.c + * Simple standalone example of using the single-file \c zstddeclib. + * + * \note In this simple example we include the amalgamated source and compile + * just this single file, but we could equally (and more conventionally) + * include \c zstd.h and compile both this file and \c zstddeclib.c (the + * resulting binaries differ slightly in size but perform the same). + * + * \author Carl Woffenden, Numfum GmbH (released under a CC0 license) + */ + +#include +#include +#include +#include +#include + +#include "../zstddeclib.c" + +//************************* Test Data (DXT texture) **************************/ + +/** + * Raw 256x256 DXT1 data (used to compare the result). + * \n + * See \c testcard.png for the original. + */ +static uint8_t const rawDxt1[] = { +#include "testcard-dxt1.inl" +}; + +/** + * Zstd compressed version of \c #rawDxt1. + * \n + * See \c testcard.png for the original. + */ +static uint8_t const srcZstd[] = { +#include "testcard-zstd.inl" +}; + +/** + * Destination for decoding \c #srcZstd. + */ +static uint8_t dstDxt1[sizeof rawDxt1] = {}; + +#ifndef ZSTD_VERSION_MAJOR +/** + * For the case where the decompression library hasn't been included we add a + * dummy function to fake the process and stop the buffers being optimised out. + */ +size_t ZSTD_decompress(void* dst, size_t dstLen, const void* src, size_t srcLen) { + return (memcmp(dst, src, (srcLen < dstLen) ? srcLen : dstLen)) ? 0 : dstLen; +} +#endif + +//****************************************************************************/ + +/** + * Simple single-file test to decompress \c #srcZstd into \c # dstDxt1 then + * compare the resulting bytes with \c #rawDxt1. + * \n + * As a (naive) comparison, removing Zstd and building with "-Os -g0 simple.c" + * results in a 44kB binary (macOS 10.14, Clang 10); re-adding Zstd increases + * the binary by 56kB (after calling \c strip). + */ +int main() { + size_t size = ZSTD_decompress(dstDxt1, sizeof dstDxt1, srcZstd, sizeof srcZstd); + int compare = memcmp(rawDxt1, dstDxt1, sizeof dstDxt1); + printf("Decompressed size: %s\n", (size == sizeof dstDxt1) ? "PASSED" : "FAILED"); + printf("Byte comparison: %s\n", (compare == 0) ? "PASSED" : "FAILED"); + if (size == sizeof dstDxt1 && compare == 0) { + return EXIT_SUCCESS; + } + return EXIT_FAILURE; +} diff --git a/build/single_file_libs/examples/testcard-dxt1.inl b/build/single_file_libs/examples/testcard-dxt1.inl new file mode 100644 index 00000000000..e099e66fcc1 --- /dev/null +++ b/build/single_file_libs/examples/testcard-dxt1.inl @@ -0,0 +1,2731 @@ +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0x30, 0x84, 0x00, 0x55, 0x55, 0xb5, +0xde, 0xf7, 0x30, 0x84, 0x00, 0x15, 0x35, 0x20, 0xdf, 0xff, 0x30, 0x84, +0x00, 0xd4, 0x80, 0x54, 0xde, 0xf7, 0x30, 0x84, 0x00, 0x2b, 0x7e, 0x55, +0xde, 0xf7, 0x10, 0x84, 0x00, 0xa0, 0x55, 0x55, 0xdf, 0xff, 0xe7, 0x39, +0x00, 0x2a, 0x3f, 0x3f, 0x82, 0x10, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0xff, 0xff, 0x28, 0x42, 0x00, 0xff, 0xff, 0xff, 0x41, 0x08, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0x8a, 0x52, 0x00, 0x3f, 0x3f, 0x3f, +0xff, 0xff, 0x08, 0x42, 0x00, 0xa8, 0xfc, 0xfc, 0xde, 0xf7, 0x30, 0x84, +0x00, 0x02, 0x55, 0x55, 0xff, 0xff, 0x10, 0x84, 0x00, 0xfa, 0xaf, 0x55, +0xdf, 0xff, 0x30, 0x84, 0x00, 0x15, 0x20, 0x35, 0xde, 0xf7, 0x30, 0x84, +0x00, 0x54, 0x54, 0x80, 0xff, 0xff, 0x30, 0x84, 0x00, 0x55, 0x55, 0x57, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x10, 0x84, 0x15, 0x15, 0x15, 0x2d, 0xff, 0xff, 0x30, 0x84, +0x54, 0xd4, 0x88, 0x5c, 0xff, 0xff, 0x10, 0x84, 0xb5, 0xe2, 0x57, 0x55, +0x7d, 0xef, 0x30, 0x84, 0xe0, 0x55, 0x55, 0x55, 0x41, 0x08, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x30, 0x84, 0xd4, 0x54, 0x54, 0x54, +0x9d, 0xef, 0x30, 0x84, 0x82, 0xd5, 0x55, 0x55, 0xff, 0xff, 0x10, 0x84, +0x57, 0xe2, 0xb5, 0x55, 0xff, 0xff, 0x30, 0x84, 0x15, 0x15, 0x38, 0x2d, +0xff, 0xff, 0x10, 0x84, 0x54, 0x54, 0x54, 0x5c, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x41, 0x08, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0x9e, 0xf7, 0x10, 0x84, 0x55, 0x55, 0x35, 0xe2, +0x5c, 0xe7, 0x10, 0x84, 0xb5, 0x83, 0x5c, 0x55, 0xff, 0xff, 0x30, 0x84, +0x38, 0x15, 0x15, 0x15, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xde, 0xf7, 0x30, 0x84, +0x88, 0x94, 0x54, 0x54, 0x5c, 0xe7, 0x10, 0x84, 0x57, 0x70, 0x0d, 0xd5, +0xbe, 0xf7, 0x30, 0x84, 0x55, 0x55, 0x57, 0x62, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0xff, 0xff, 0x08, 0x42, +0x3f, 0x3f, 0x2f, 0x00, 0x51, 0x8c, 0xdf, 0xff, 0x61, 0x25, 0x01, 0x55, +0xff, 0xff, 0x49, 0x4a, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0xff, 0xff, 0x69, 0x4a, +0x2f, 0xff, 0xff, 0x00, 0x51, 0x8c, 0xff, 0xff, 0x42, 0x66, 0x60, 0x55, +0xff, 0xff, 0x28, 0x42, 0xfc, 0xfc, 0xfc, 0x00, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0x99, 0xff, 0x00, 0x5a, +0xff, 0xff, 0xff, 0xff, 0x99, 0xff, 0x00, 0x5a, 0xff, 0xff, 0xff, 0xff, +0x99, 0xff, 0x00, 0x5a, 0xff, 0xff, 0xff, 0xff, 0x99, 0xff, 0x00, 0x5a, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0xde, 0xf7, 0x10, 0x84, 0x00, 0x55, 0xd5, 0x25, 0xbe, 0xf7, 0x30, 0x84, +0x00, 0xc9, 0x58, 0x57, 0xff, 0xff, 0x08, 0x42, 0x00, 0x3f, 0x3f, 0x3f, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08, 0x42, +0x00, 0xbc, 0xfc, 0xfc, 0xbe, 0xf7, 0x10, 0x84, 0x00, 0x58, 0xc9, 0x35, +0xff, 0xff, 0x10, 0x84, 0x00, 0x55, 0x55, 0x56, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0xf6, 0xff, 0xc1, 0x59, 0xff, 0xff, 0xff, 0xff, +0xf6, 0xff, 0xc1, 0x59, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xff, 0xc1, 0x59, +0xff, 0xff, 0xff, 0xff, 0xf6, 0xff, 0xc1, 0x59, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0x99, 0xff, 0x00, 0x5a, 0xff, 0xff, 0xff, 0xff, +0x99, 0xff, 0x00, 0x5a, 0xff, 0xff, 0xff, 0xff, 0x99, 0xff, 0x00, 0x5a, +0xff, 0xff, 0xff, 0xff, 0xe9, 0x93, 0xc7, 0x8b, 0xaa, 0xaa, 0xaa, 0x2a, +0xbe, 0xf7, 0x10, 0x84, 0x54, 0x94, 0x8c, 0x70, 0x7d, 0xef, 0x10, 0x84, +0x63, 0x5c, 0x55, 0x55, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0x71, 0x8c, 0xcf, 0x7b, 0xbf, 0xff, 0xff, 0xff, 0x9d, 0xef, 0x10, 0x84, +0x72, 0x8d, 0x95, 0x55, 0xbe, 0xf7, 0x10, 0x84, 0x15, 0x15, 0x1c, 0x23, +0xf6, 0xff, 0xc1, 0x59, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xff, 0xc1, 0x59, +0xff, 0xff, 0xff, 0xff, 0xf6, 0xff, 0xc1, 0x59, 0xff, 0xff, 0xff, 0xff, +0xf6, 0xff, 0xc1, 0x59, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0x99, 0xff, 0x00, 0x5a, 0xff, 0xff, 0xff, 0xff, 0x99, 0xff, 0x00, 0x5a, +0xff, 0xff, 0xff, 0xff, 0x56, 0xd6, 0xa1, 0x72, 0xff, 0xff, 0xff, 0x3f, +0x5c, 0xef, 0x2a, 0x94, 0x15, 0x8d, 0x73, 0x54, 0x41, 0x08, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x30, 0x84, 0x25, 0x15, 0x15, 0x15, 0x7c, 0xf7, 0x09, 0x94, +0x55, 0x5c, 0x73, 0x85, 0x09, 0x94, 0xa7, 0x8b, 0xff, 0xff, 0xff, 0xfe, +0xf6, 0xff, 0xc1, 0x59, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xff, 0xc1, 0x59, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0x99, 0xff, 0x00, 0x5a, +0xff, 0xff, 0xff, 0xff, 0x7c, 0xf7, 0xc8, 0x8b, 0x55, 0x55, 0xd5, 0x35, +0x3b, 0xef, 0x09, 0x94, 0x05, 0x61, 0x58, 0x57, 0x2a, 0x9c, 0xc7, 0x8b, +0xfc, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0xf9, 0xe6, 0x60, 0x6a, 0x3f, 0xff, 0xff, 0xff, +0x9d, 0xf7, 0x09, 0x94, 0x56, 0x52, 0x49, 0x35, 0xc2, 0x72, 0xce, 0xac, +0xaa, 0xaa, 0xaa, 0xa9, 0xf6, 0xff, 0xc1, 0x59, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xdf, 0xff, 0x10, 0x84, 0x00, 0x54, 0xd4, 0x34, +0xde, 0xf7, 0xef, 0x7b, 0x00, 0x73, 0x5c, 0x57, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0xde, 0xf7, 0xef, 0x7b, 0x00, 0x73, 0xcd, 0x35, +0xff, 0xff, 0x10, 0x84, 0x00, 0x15, 0x15, 0x17, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0x5d, 0xf7, 0xef, 0xaa, 0x55, 0x55, 0xd5, 0x35, +0xde, 0xf7, 0xef, 0x7b, 0xcc, 0x70, 0x5c, 0x54, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x2c, 0x63, 0xd3, 0x9c, 0x6a, 0xaa, 0xaa, 0xaa, 0xde, 0xf7, 0xef, 0x7b, +0x1c, 0x33, 0x0d, 0x35, 0xd4, 0x62, 0x57, 0x94, 0xaa, 0xaa, 0xaa, 0xa9, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xd8, 0xdd, 0xcb, 0x99, 0xff, 0xff, 0xff, 0x3f, +0xdb, 0xee, 0x30, 0xb3, 0x85, 0x61, 0x50, 0x54, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x1d, 0xe7, 0xb6, 0x83, 0x54, 0x52, 0x41, 0x85, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0x30, 0xb3, 0x0f, 0xb3, 0xff, 0xff, 0xff, 0xbf, +0x9e, 0xf7, 0x30, 0xb3, 0x35, 0xcd, 0x71, 0x5a, 0xf2, 0xbb, 0xae, 0xaa, +0xfe, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, +0x76, 0xfe, 0x01, 0xd8, 0xff, 0xff, 0xff, 0xff, 0x76, 0xfe, 0x01, 0xd8, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, +0xff, 0xff, 0xff, 0xff, 0xda, 0xf7, 0x81, 0xf7, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x5e, 0xef, 0x96, 0x7b, +0x15, 0xd5, 0x55, 0x55, 0x9e, 0xf7, 0xb6, 0x7b, 0x57, 0x5c, 0x53, 0x69, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0x7d, 0xf7, 0x2f, 0xb3, 0x95, 0x35, 0xc5, 0x69, 0xdb, 0xee, 0x0f, 0xb3, +0x54, 0x57, 0x55, 0x55, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0x5e, 0xef, 0xf2, 0x49, 0x8f, 0x3f, 0xbf, 0xff, 0x7e, 0xef, 0x96, 0x7b, +0x55, 0x57, 0x54, 0x5a, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x7d, 0xf7, 0x2f, 0xb3, 0x55, 0x55, 0x95, 0x35, 0x7d, 0xf7, 0x30, 0xb3, +0x53, 0x5c, 0x56, 0x55, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0x7e, 0xef, 0xb6, 0x83, 0x71, 0x4d, 0xa5, 0x15, +0x73, 0x5a, 0xb8, 0x9c, 0xaa, 0xaa, 0xaa, 0xa9, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xfc, 0xee, 0x70, 0xb3, +0xc5, 0x49, 0x51, 0x58, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0x19, 0xa5, 0xaf, 0x28, 0x2a, 0xaa, 0xaa, 0xaa, 0x1d, 0xe7, 0xd6, 0x83, +0x54, 0x58, 0x51, 0x49, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0xff, 0xff, 0x10, 0x84, +0x15, 0x15, 0x35, 0x00, 0x9e, 0xf7, 0x30, 0xb3, 0x54, 0x57, 0x55, 0x55, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0x41, 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x41, 0x08, 0x00, 0x00, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0x3d, 0xe7, 0xb6, 0x7b, 0xc5, 0x35, 0x15, 0xd5, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0x08, 0x42, +0x00, 0xff, 0xff, 0xff, 0xdf, 0xff, 0x71, 0x8c, 0x00, 0x11, 0x1b, 0x14, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x1f, 0x62, 0x1e, 0x5a, 0xea, 0xaa, 0xaa, 0xea, +0x1f, 0x62, 0xfd, 0x59, 0xfe, 0xfa, 0xfe, 0xfe, 0x1e, 0x5a, 0xfd, 0x59, +0x5f, 0x57, 0x57, 0x57, 0xfd, 0x59, 0xdb, 0x51, 0xfa, 0xfa, 0xfa, 0xfa, +0xfc, 0x51, 0xba, 0x51, 0xfe, 0xfe, 0xfa, 0xfe, 0xda, 0x51, 0x99, 0x49, +0xea, 0xea, 0xe8, 0xea, 0xb9, 0x51, 0x98, 0x49, 0x7e, 0x7e, 0x7e, 0x7e, +0x97, 0x49, 0x76, 0x41, 0xe8, 0xe8, 0xe8, 0xe8, 0x97, 0x49, 0x75, 0x41, +0x5f, 0x5f, 0x5f, 0x5f, 0x75, 0x41, 0x32, 0x39, 0xea, 0xea, 0xea, 0xea, +0x53, 0x39, 0x32, 0x39, 0x7e, 0x7e, 0x5e, 0x7e, 0x31, 0x39, 0x10, 0x31, +0xfa, 0xfa, 0x78, 0x7a, 0x0f, 0x31, 0xcd, 0x28, 0xa0, 0x80, 0x80, 0x80, +0xee, 0x30, 0xed, 0x28, 0x5e, 0x7e, 0x5e, 0x5e, 0xcc, 0x28, 0xaa, 0x20, +0xa0, 0xa0, 0xa0, 0xa0, 0xcc, 0x28, 0xaa, 0x20, 0x5f, 0x5f, 0x5f, 0x5f, +0xaa, 0x20, 0x88, 0x18, 0xfa, 0xfa, 0xfa, 0xfa, 0x88, 0x18, 0x66, 0x18, +0xe8, 0xe8, 0xe8, 0xe8, 0x87, 0x18, 0x45, 0x10, 0xfa, 0xfa, 0xfa, 0xfa, +0x66, 0x10, 0x44, 0x10, 0x7e, 0x7e, 0x7e, 0x7e, 0x44, 0x10, 0x22, 0x08, +0xa8, 0xe8, 0xa8, 0xe8, 0x44, 0x10, 0x22, 0x08, 0x5f, 0x7f, 0x7f, 0x7f, +0x22, 0x08, 0x01, 0x08, 0xea, 0xfa, 0xfa, 0xea, 0x01, 0x08, 0x00, 0x00, +0xaa, 0xea, 0xaa, 0xaa, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xff, 0x51, 0x8c, +0x00, 0x50, 0x78, 0x44, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x3c, 0xe7, 0x69, 0x4a, 0xbf, 0x3f, 0x2f, 0x8f, +0xff, 0xff, 0xef, 0x7b, 0x16, 0x17, 0x15, 0x15, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x1f, 0x62, 0x1e, 0x5a, 0xaa, 0xea, 0xaa, 0xea, 0x1f, 0x62, 0xfd, 0x59, +0xfe, 0xfe, 0xfe, 0xfe, 0x1e, 0x5a, 0xfd, 0x59, 0x57, 0x57, 0x57, 0x57, +0xfd, 0x59, 0xdb, 0x51, 0xfa, 0xfa, 0xfa, 0xfa, 0xdb, 0x51, 0xb9, 0x51, +0xa0, 0xa0, 0xa0, 0x80, 0xba, 0x51, 0xb9, 0x49, 0xe8, 0xe8, 0xe8, 0xe8, +0xda, 0x51, 0x98, 0x49, 0x5f, 0x7f, 0x7f, 0x5f, 0x97, 0x49, 0x76, 0x41, +0xe8, 0xe8, 0xe8, 0xe0, 0x97, 0x49, 0x75, 0x41, 0x57, 0x57, 0x5f, 0x5f, +0x75, 0x41, 0x32, 0x39, 0xea, 0xea, 0xea, 0xea, 0x53, 0x39, 0x32, 0x39, +0x5c, 0x5e, 0x7e, 0x5e, 0x32, 0x39, 0x0f, 0x31, 0xfa, 0xfa, 0xfa, 0xfa, +0x0f, 0x31, 0xee, 0x28, 0xe0, 0xe0, 0xa0, 0xa0, 0x10, 0x31, 0xed, 0x28, +0x5f, 0x5f, 0x5f, 0x5f, 0xcc, 0x28, 0xcb, 0x20, 0xe0, 0xe8, 0xe8, 0xe0, +0xcc, 0x28, 0xaa, 0x20, 0x5f, 0x5f, 0x5f, 0x5f, 0xa9, 0x20, 0x88, 0x18, +0xe8, 0xe8, 0xe8, 0xe8, 0x88, 0x18, 0x66, 0x18, 0xe8, 0xe8, 0xe8, 0xe8, +0x67, 0x18, 0x65, 0x10, 0xfe, 0xfa, 0xfe, 0xfa, 0x66, 0x10, 0x44, 0x10, +0x7e, 0x7e, 0x7e, 0x7e, 0x44, 0x10, 0x22, 0x08, 0xa8, 0xa8, 0xa8, 0xa8, +0x44, 0x10, 0x22, 0x08, 0x5f, 0x7f, 0x7f, 0x7f, 0x22, 0x08, 0x01, 0x08, +0xf8, 0xe8, 0xea, 0xea, 0x01, 0x08, 0x00, 0x00, 0xaa, 0xe8, 0xea, 0xe8, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x10, 0x84, 0xe4, 0x34, 0x94, 0xd4, +0x3c, 0xe7, 0x69, 0x4a, 0xff, 0xff, 0xfe, 0xfc, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x9d, 0xef, 0x30, 0x84, 0x4d, 0x79, 0x51, 0x5a, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x1f, 0x62, 0x1e, 0x5a, +0xea, 0xea, 0xea, 0xea, 0x1f, 0x62, 0xfd, 0x59, 0xfe, 0xfe, 0xfa, 0xfe, +0x1e, 0x5a, 0xfd, 0x59, 0x57, 0x5f, 0x57, 0x57, 0xfd, 0x59, 0xdb, 0x51, +0xfa, 0xfa, 0xfa, 0xfa, 0xdb, 0x51, 0xba, 0x51, 0xe0, 0xe0, 0xe0, 0xe0, +0xba, 0x51, 0xb9, 0x49, 0xe8, 0xe8, 0xe8, 0xe8, 0xda, 0x51, 0x98, 0x49, +0x5f, 0x5f, 0x5f, 0x5f, 0x97, 0x49, 0x76, 0x41, 0xe8, 0xe0, 0xe8, 0xe0, +0x97, 0x49, 0x75, 0x41, 0x5f, 0x5f, 0x5f, 0x5f, 0x75, 0x41, 0x32, 0x39, +0xea, 0xea, 0xea, 0xea, 0x53, 0x39, 0x32, 0x39, 0x7e, 0x7a, 0x7a, 0x7e, +0x31, 0x39, 0x10, 0x31, 0x78, 0xf8, 0xf8, 0x78, 0x0f, 0x31, 0xee, 0x28, +0xa0, 0xe0, 0xa0, 0xe0, 0x10, 0x31, 0xed, 0x28, 0x5f, 0x5f, 0x5f, 0x5f, +0xed, 0x28, 0xaa, 0x20, 0xea, 0xea, 0xea, 0xea, 0xcc, 0x28, 0xaa, 0x20, +0x5f, 0x5f, 0x5f, 0x5f, 0xaa, 0x20, 0x88, 0x18, 0xfa, 0xfa, 0xfa, 0xfa, +0x88, 0x18, 0x66, 0x18, 0xe8, 0xe8, 0xe8, 0xe8, 0x67, 0x18, 0x65, 0x10, +0xfa, 0xfa, 0xf8, 0xfa, 0x66, 0x10, 0x44, 0x10, 0x7e, 0x7e, 0x7e, 0x7e, +0x44, 0x10, 0x22, 0x08, 0xa8, 0xa8, 0xa8, 0xa8, 0x44, 0x10, 0x22, 0x08, +0x5f, 0x5f, 0x7f, 0x5f, 0x22, 0x08, 0x01, 0x08, 0xea, 0xea, 0xea, 0xe8, +0x01, 0x08, 0x00, 0x00, 0xea, 0xea, 0xe8, 0xea, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x08, 0x42, 0xfc, 0xfc, 0xfc, 0xfc, 0x9e, 0xf7, 0x30, 0x84, +0x5c, 0x5b, 0x51, 0x69, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0xff, 0xff, 0xef, 0x7b, 0x55, 0xd5, 0x95, 0x00, 0xbe, 0xf7, 0x10, 0x84, +0x5c, 0x54, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x1f, 0x62, 0x1e, 0x5a, 0xea, 0xaa, 0xea, 0xea, +0x1f, 0x62, 0xfd, 0x59, 0xfe, 0xfe, 0xfe, 0xfe, 0x1e, 0x5a, 0xfd, 0x59, +0x57, 0x57, 0x57, 0x57, 0xfd, 0x59, 0xdb, 0x51, 0xea, 0xfa, 0xfa, 0xea, +0xdb, 0x51, 0xb9, 0x51, 0x80, 0x80, 0xa0, 0x80, 0xba, 0x51, 0xb8, 0x49, +0xa8, 0xa8, 0xa8, 0xa0, 0xb9, 0x51, 0x98, 0x49, 0x7e, 0x7e, 0x7e, 0x7e, +0x97, 0x49, 0x76, 0x41, 0xa8, 0xe8, 0xe8, 0xe0, 0x97, 0x49, 0x75, 0x41, +0x5f, 0x5f, 0x57, 0x5f, 0x75, 0x41, 0x32, 0x39, 0xea, 0xea, 0xea, 0xea, +0x53, 0x39, 0x32, 0x39, 0x58, 0x7e, 0x7a, 0x7a, 0x32, 0x39, 0x0f, 0x31, +0xfa, 0xfa, 0xfa, 0xfa, 0x0f, 0x31, 0xee, 0x28, 0xa0, 0xe0, 0xe0, 0xe0, +0xef, 0x30, 0xed, 0x28, 0x5e, 0x7f, 0x5e, 0x7f, 0xed, 0x28, 0xaa, 0x20, +0xea, 0xea, 0xea, 0xea, 0xcc, 0x28, 0xaa, 0x20, 0x5f, 0x5f, 0x5f, 0x5f, +0xa9, 0x20, 0x88, 0x18, 0xf8, 0xe8, 0xe8, 0xe8, 0x88, 0x18, 0x66, 0x18, +0xe8, 0xe8, 0xe8, 0xe8, 0x67, 0x18, 0x65, 0x10, 0xfa, 0xfa, 0xfa, 0xfa, +0x66, 0x10, 0x44, 0x10, 0x7e, 0x7e, 0x7e, 0x7e, 0x44, 0x10, 0x22, 0x08, +0xa8, 0xa8, 0xa8, 0xe0, 0x44, 0x10, 0x22, 0x08, 0x7f, 0x7f, 0x5f, 0x5f, +0x22, 0x08, 0x01, 0x08, 0xfa, 0xf8, 0xfa, 0xf8, 0x01, 0x08, 0x00, 0x00, +0xa8, 0xa8, 0xa8, 0xa8, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0xbe, 0xf7, 0x10, 0x84, 0x4d, 0xc5, 0xb5, 0x00, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0xff, 0xff, 0x30, 0x84, +0x00, 0xe5, 0x45, 0x6d, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x8c, 0xf8, 0x6b, 0xf0, 0xaa, 0xaa, 0xaa, 0xea, 0x8c, 0xf0, 0x6b, 0xf0, +0xff, 0xdf, 0xff, 0xff, 0x8b, 0xf0, 0x6b, 0xe8, 0x5f, 0x5f, 0x7f, 0x5f, +0x6b, 0xe8, 0x6a, 0xd8, 0xfa, 0xfa, 0xfa, 0xfa, 0x6a, 0xd8, 0x6a, 0xd0, +0xe0, 0xa0, 0xe0, 0xe0, 0x6a, 0xd0, 0x69, 0xc8, 0xf8, 0xf8, 0x78, 0xf8, +0x69, 0xc8, 0x49, 0xb8, 0xa8, 0xe8, 0xe8, 0xe8, 0x69, 0xc0, 0x48, 0xb0, +0xfa, 0xfa, 0xfa, 0xfe, 0x69, 0xb8, 0x48, 0xa8, 0x5f, 0x5f, 0x5f, 0x5f, +0x48, 0xa8, 0x47, 0x98, 0x7a, 0xfa, 0xfa, 0xfa, 0x47, 0x98, 0x47, 0x90, +0x7c, 0x5c, 0x58, 0x5c, 0x47, 0x90, 0x46, 0x80, 0x7a, 0x7e, 0x7a, 0x7e, +0x46, 0x80, 0x45, 0x70, 0xf8, 0xf8, 0xf8, 0xf8, 0x45, 0x70, 0x25, 0x68, +0x78, 0x78, 0x78, 0x78, 0x25, 0x68, 0x24, 0x58, 0xfa, 0xfa, 0xfa, 0xfa, +0x24, 0x58, 0x24, 0x50, 0x78, 0x78, 0x78, 0x78, 0x24, 0x50, 0x23, 0x40, +0xfa, 0xfa, 0xfa, 0xfa, 0x23, 0x40, 0x23, 0x38, 0x78, 0x78, 0x78, 0x78, +0x23, 0x38, 0x02, 0x28, 0xea, 0xea, 0xea, 0xea, 0x22, 0x30, 0x02, 0x20, +0xfa, 0xfe, 0xfa, 0xfa, 0x02, 0x20, 0x01, 0x18, 0xe8, 0xe8, 0xe8, 0xf8, +0x01, 0x18, 0x01, 0x10, 0xf8, 0xf8, 0xf8, 0xe8, 0x01, 0x10, 0x00, 0x08, +0xe8, 0xe8, 0xe8, 0xe0, 0x01, 0x10, 0x00, 0x00, 0xfe, 0xfe, 0xfe, 0xfe, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0xff, 0xff, 0x10, 0x84, 0x00, 0xd5, 0x55, 0x55, 0xff, 0xff, 0x10, 0x84, +0x00, 0x56, 0x54, 0x5e, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0x7d, 0xef, 0x10, 0x84, 0x79, 0x51, 0x53, 0x5a, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x6c, 0xf8, 0x6b, 0xf0, +0xe8, 0xa8, 0xe8, 0xa8, 0x8b, 0xf8, 0x6b, 0xe8, 0xea, 0xfa, 0xfa, 0xea, +0x8b, 0xf0, 0x6b, 0xe8, 0x57, 0x57, 0x57, 0x57, 0x6b, 0xe8, 0x6a, 0xd8, +0xfa, 0xfa, 0xfa, 0xfa, 0x6a, 0xd8, 0x6a, 0xd0, 0xa0, 0xa0, 0xa0, 0xe0, +0x6a, 0xd0, 0x69, 0xc8, 0x60, 0x68, 0xf8, 0xe8, 0x6a, 0xd0, 0x69, 0xc0, +0x5f, 0x5f, 0x5f, 0x5f, 0x69, 0xc0, 0x48, 0xb0, 0xfa, 0xfa, 0xfa, 0xfa, +0x68, 0xb0, 0x48, 0xa8, 0x7e, 0x7a, 0x7e, 0x7a, 0x48, 0xa8, 0x47, 0x98, +0x7a, 0xfa, 0x7a, 0xfa, 0x47, 0x90, 0x47, 0x98, 0x09, 0x09, 0x09, 0x09, +0x47, 0x90, 0x46, 0x80, 0x7e, 0x7e, 0x7e, 0x7e, 0x46, 0x80, 0x25, 0x70, +0xe8, 0xe8, 0xe8, 0xf8, 0x46, 0x78, 0x25, 0x68, 0x7e, 0x7e, 0x7e, 0x7e, +0x45, 0x68, 0x24, 0x58, 0xfa, 0xfe, 0xfa, 0xfa, 0x24, 0x58, 0x24, 0x50, +0x78, 0x78, 0x78, 0x78, 0x24, 0x50, 0x23, 0x40, 0xfa, 0xfa, 0xfa, 0xfa, +0x23, 0x40, 0x23, 0x38, 0x78, 0x78, 0x78, 0x78, 0x23, 0x38, 0x02, 0x28, +0xea, 0xea, 0xfa, 0xea, 0x02, 0x30, 0x22, 0x20, 0xfe, 0xfe, 0xfa, 0xfa, +0x22, 0x28, 0x01, 0x18, 0x7e, 0x7e, 0xfe, 0xfe, 0x01, 0x18, 0x01, 0x10, +0xe8, 0xe8, 0xe8, 0xe8, 0x01, 0x10, 0x00, 0x08, 0xe8, 0xe8, 0xe8, 0xe0, +0x01, 0x08, 0x00, 0x08, 0xff, 0xff, 0xfb, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x7d, 0xef, 0x10, 0x84, 0x5b, 0x51, 0x71, 0x69, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xdf, 0xff, 0x10, 0x84, 0x54, 0x54, 0xd4, 0x94, +0x1c, 0xe7, 0xef, 0x7b, 0x5c, 0x54, 0x54, 0x56, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x8c, 0xf8, 0x6b, 0xf0, 0xfa, 0xea, 0xea, 0xea, +0x8c, 0xf0, 0x6b, 0xf0, 0xfa, 0xfe, 0xfa, 0xff, 0x8b, 0xf0, 0x6b, 0xe8, +0x5f, 0x5f, 0x5e, 0x5f, 0x6b, 0xe8, 0x6a, 0xd8, 0xfa, 0xfa, 0xfa, 0xfa, +0x6a, 0xd8, 0x6a, 0xd0, 0xe0, 0xa0, 0xa0, 0xe0, 0x6a, 0xd0, 0x69, 0xc8, +0xf8, 0xe8, 0xf8, 0xf8, 0x6a, 0xc8, 0x69, 0xc0, 0x7e, 0x7e, 0x5e, 0x5e, +0x69, 0xc0, 0x48, 0xb0, 0xfa, 0xfa, 0xfa, 0xfa, 0x68, 0xb0, 0x48, 0xa8, +0x7e, 0x5e, 0x7a, 0x7e, 0x48, 0xa8, 0x47, 0x98, 0x7a, 0x7a, 0x7a, 0xfa, +0x47, 0x90, 0x47, 0x98, 0x09, 0x09, 0x09, 0x09, 0x47, 0x90, 0x46, 0x80, +0x7e, 0x7e, 0x7a, 0x7e, 0x46, 0x80, 0x25, 0x70, 0xfa, 0xea, 0xea, 0xea, +0x46, 0x78, 0x25, 0x68, 0x7e, 0x7e, 0x7e, 0x7e, 0x45, 0x68, 0x24, 0x58, +0xfe, 0xfa, 0xfe, 0xfe, 0x24, 0x58, 0x24, 0x50, 0x78, 0x78, 0x78, 0x78, +0x24, 0x50, 0x23, 0x40, 0xfa, 0xfa, 0xfa, 0xfa, 0x23, 0x40, 0x23, 0x38, +0x78, 0x78, 0x78, 0x78, 0x23, 0x38, 0x22, 0x28, 0xea, 0xfa, 0xfa, 0xfa, +0x22, 0x30, 0x02, 0x20, 0xfa, 0xfe, 0xfa, 0xfa, 0x22, 0x28, 0x01, 0x18, +0xfe, 0xfe, 0xfe, 0xfe, 0x01, 0x18, 0x01, 0x10, 0xe8, 0xe8, 0xe8, 0xf8, +0x01, 0x10, 0x00, 0x08, 0xe8, 0xe8, 0xe0, 0xe8, 0x01, 0x10, 0x00, 0x00, +0xfe, 0xfa, 0xfa, 0xfe, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x7d, 0xef, 0x10, 0x84, 0x4d, 0x45, 0xe5, 0xa5, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xff, 0xff, 0x10, 0x84, 0x94, 0x14, 0xb4, 0x00, 0xdf, 0xff, 0xef, 0x7b, +0x57, 0x55, 0x55, 0x00, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x8c, 0xf8, 0x6b, 0xf0, 0xea, 0xea, 0xea, 0xea, 0x8c, 0xf0, 0x6b, 0xf0, +0xff, 0xff, 0xff, 0xfb, 0x6b, 0xf0, 0x6b, 0xe0, 0xea, 0xfa, 0xea, 0xfa, +0x6b, 0xe8, 0x6a, 0xd8, 0xfa, 0xfa, 0xfa, 0xfa, 0x6a, 0xd8, 0x6a, 0xd0, +0xe0, 0xe0, 0xa0, 0xe0, 0x6a, 0xd0, 0x69, 0xc8, 0x70, 0xe8, 0x78, 0xe8, +0x6a, 0xd0, 0x69, 0xc0, 0x5f, 0x5f, 0x5f, 0x5f, 0x69, 0xc0, 0x48, 0xb0, +0xfe, 0xfe, 0xfa, 0x7a, 0x68, 0xb0, 0x48, 0xa8, 0x7a, 0x7a, 0x7e, 0x7a, +0x48, 0xa8, 0x47, 0x98, 0xfa, 0xfa, 0xfa, 0xfa, 0x47, 0x90, 0x47, 0x98, +0x09, 0x09, 0x09, 0x09, 0x47, 0x90, 0x46, 0x80, 0x7a, 0x7a, 0x7e, 0x7e, +0x46, 0x80, 0x25, 0x70, 0xe8, 0xe8, 0xe8, 0xe8, 0x45, 0x70, 0x25, 0x68, +0x78, 0x78, 0x78, 0x78, 0x25, 0x68, 0x24, 0x58, 0xfa, 0xfa, 0xfa, 0xfa, +0x24, 0x58, 0x24, 0x50, 0x78, 0x78, 0x78, 0x78, 0x24, 0x50, 0x23, 0x40, +0xfa, 0xfa, 0xfa, 0xfa, 0x23, 0x40, 0x23, 0x38, 0x78, 0x78, 0x78, 0x78, +0x23, 0x38, 0x02, 0x28, 0xea, 0xfa, 0xea, 0xea, 0x22, 0x30, 0x02, 0x20, +0xfa, 0xfa, 0xfa, 0xfe, 0x22, 0x28, 0x01, 0x18, 0xfe, 0xfe, 0xfe, 0x7e, +0x01, 0x18, 0x01, 0x10, 0xe8, 0xe8, 0xf8, 0xe8, 0x01, 0x10, 0x00, 0x08, +0xa8, 0xe8, 0xa0, 0xa8, 0x01, 0x10, 0x00, 0x00, 0xfe, 0xfe, 0xfe, 0xfe, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0xff, 0xff, 0x10, 0x84, +0xb5, 0x15, 0x95, 0x00, 0xff, 0xff, 0x10, 0x84, 0x15, 0x15, 0x17, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xf7, 0x30, 0x84, +0x00, 0x44, 0x44, 0x6c, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x41, 0x08, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0xbe, 0xf7, 0x10, 0x84, 0x00, 0x14, 0x14, 0x1c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xde, 0xf7, 0x30, 0x84, 0x6c, 0x78, 0x78, 0x50, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf7, 0x30, 0x84, +0x1e, 0x1b, 0x1b, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xdf, 0xff, 0x10, 0x84, 0x50, 0x50, 0x58, 0x58, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xdf, 0xff, 0x30, 0x84, 0x11, 0x11, 0x39, 0x39, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x08, 0x42, +0xf8, 0xf8, 0xf8, 0x00, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0xff, 0xff, 0x08, 0x42, 0x2b, 0x2b, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xa2, 0x10, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x04, 0x21, 0xbe, 0xf7, +0x55, 0x5a, 0x5a, 0x5a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xff, 0xff, 0x28, 0x42, 0xfc, 0xfc, 0xfc, 0xfc, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, +0xde, 0xff, 0xc8, 0x88, 0xff, 0xff, 0xff, 0xff, 0xde, 0xff, 0xc8, 0x88, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x71, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x39, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf7, 0x49, 0x4a, 0x0f, 0x0f, 0x0f, 0x0f, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xff, 0xff, 0x28, 0x42, +0xfc, 0xfc, 0xfc, 0xfc, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x10, 0x84, 0xdf, 0xff, 0x50, 0x50, 0x50, 0x50, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xa2, 0x10, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xd3, 0x9d, 0x02, 0x10, +0xff, 0xff, 0xff, 0xff, 0xd3, 0x9d, 0x02, 0x10, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0xbe, 0xf7, 0xaa, 0x52, +0x0f, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xff, 0xff, 0x08, 0x42, 0x00, 0xf8, 0xf8, 0xf8, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0x08, 0x42, 0x00, 0x2b, 0x2b, 0x2b, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdf, 0xff, 0x10, 0x84, +0x58, 0x58, 0x50, 0x50, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xdf, 0xff, 0x30, 0x84, 0x39, 0x39, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xde, 0xf7, 0x30, 0x84, 0x50, 0x78, 0x78, 0x6c, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf7, 0x30, 0x84, +0x11, 0x1b, 0x1b, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xbe, 0xf7, 0x30, 0x84, 0x4c, 0x44, 0x44, 0x00, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x41, 0x08, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0xbe, 0xf7, 0x10, 0x84, 0x1c, 0x14, 0x14, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xff, 0xff, 0x10, 0x84, +0x00, 0xb4, 0x14, 0x94, 0xdf, 0xff, 0xef, 0x7b, 0x00, 0x55, 0x55, 0x57, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0x10, 0x84, 0x00, 0x95, 0x15, 0xb5, +0xff, 0xff, 0x10, 0x84, 0x00, 0x17, 0x15, 0x15, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xdf, 0xff, 0x10, 0x84, 0x94, 0xd4, 0x54, 0x54, +0x3c, 0xe7, 0xef, 0x7b, 0x56, 0x54, 0x54, 0x5c, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x7d, 0xef, 0x10, 0x84, 0xa5, 0xe5, 0x45, 0x4d, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0x5d, 0xef, 0x10, 0x84, +0x5a, 0x53, 0x51, 0x79, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x7d, 0xef, 0x10, 0x84, +0x69, 0x71, 0x51, 0x5b, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0xff, 0xff, 0x30, 0x84, 0x6d, 0x45, 0xe5, 0x00, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, +0xd6, 0xb5, 0x21, 0x08, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0x21, 0x08, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0xff, 0xff, 0x10, 0x84, +0x55, 0x55, 0xd5, 0x00, 0xff, 0xff, 0x10, 0x84, 0x5e, 0x54, 0x56, 0x00, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0xff, 0xff, 0xef, 0x7b, 0x00, 0x95, 0xd5, 0x55, 0xbe, 0xf7, 0x10, 0x84, +0x00, 0x57, 0x54, 0x5c, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0xbe, 0xf7, 0x10, 0x84, 0x00, 0xb5, 0xc5, 0x4d, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x9e, 0xf7, 0x30, 0x84, 0x5a, 0x51, 0x79, 0x4d, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08, 0x42, 0xfc, 0xfc, 0xfc, 0xfc, +0x9e, 0xf7, 0x30, 0x84, 0x69, 0x51, 0x5b, 0x5c, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x3c, 0xe7, 0x69, 0x4a, 0x8f, 0x2f, 0x3f, 0xbf, 0xff, 0xff, 0xef, 0x7b, +0x15, 0x15, 0x17, 0x16, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x10, 0x84, 0xd4, 0x94, 0x34, 0xe4, 0x1c, 0xe7, 0x69, 0x4a, +0xfc, 0xfe, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0xff, 0xff, 0x08, 0x42, +0xff, 0xff, 0xff, 0x00, 0xdf, 0xff, 0x71, 0x8c, 0x14, 0x1b, 0x11, 0x00, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xff, 0x51, 0x8c, +0x44, 0x78, 0x50, 0x00, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0xff, 0xff, 0x10, 0x84, 0x00, 0x35, 0x15, 0x15, 0xbe, 0xef, 0xb0, 0x4c, +0x55, 0x55, 0x57, 0x54, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0x5b, 0xe7, 0x29, 0x7c, +0xd5, 0x15, 0x35, 0xc5, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x9d, 0xe7, 0xd0, 0x54, 0x5a, 0x51, 0x69, 0xc5, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xe1, 0x52, 0x70, 0xa5, +0xaa, 0xaa, 0xaa, 0x6a, 0x3a, 0xe7, 0x6a, 0x84, 0x49, 0x51, 0x58, 0x54, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x9d, 0xef, 0xb0, 0x4c, 0x35, 0x95, 0x55, 0x55, 0x9d, 0xef, 0xd0, 0x4c, +0x55, 0x56, 0x5c, 0x53, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0x9d, 0xef, 0x49, 0x84, 0x15, 0xa5, 0x4d, 0x71, +0x2e, 0x9d, 0xa6, 0x6b, 0xfc, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0x9d, 0xe7, 0xb0, 0x4c, 0x69, 0xc5, 0x35, 0x95, +0x3c, 0xd7, 0xaf, 0x4c, 0x55, 0x55, 0x57, 0x54, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0x7c, 0xef, 0xc0, 0x4a, 0xff, 0xbf, 0x3f, 0x8f, +0x9c, 0xef, 0x29, 0x7c, 0x5a, 0x54, 0x57, 0x55, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xb0, 0x4c, 0xb0, 0x44, 0x2a, 0xaa, 0xaa, 0xaa, 0xbe, 0xef, 0xb0, 0x4c, +0x5a, 0x71, 0xcd, 0x35, 0x6e, 0x3c, 0xf1, 0x5c, 0xaa, 0xaa, 0xaa, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x7c, 0xef, 0x29, 0x7c, 0x55, 0x55, 0xd5, 0x15, +0xbd, 0xf7, 0x49, 0x7c, 0x69, 0x53, 0x5c, 0x57, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0x7d, 0xe7, 0xb0, 0x4c, 0x95, 0x55, 0x55, 0x55, +0x3b, 0xd7, 0xd0, 0x4c, 0x54, 0x50, 0x61, 0x85, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x3a, 0xe7, 0x49, 0x84, 0x85, 0x41, 0x52, 0x54, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0x9d, 0xe7, 0x8f, 0x44, +0x35, 0xd5, 0x55, 0x55, 0xde, 0xf7, 0xef, 0x7b, 0x54, 0x5c, 0x70, 0xcc, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x2c, 0x63, 0xd3, 0x9c, 0xaa, 0xaa, 0xaa, 0x6a, +0xde, 0xf7, 0xef, 0x7b, 0x35, 0x0d, 0x33, 0x1c, 0x70, 0xad, 0x85, 0x63, +0xfe, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xdf, 0xff, 0x10, 0x84, 0x34, 0xd4, 0x54, 0x00, 0xde, 0xf7, 0xef, 0x7b, +0x57, 0x5c, 0x73, 0x00, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xde, 0xf5, 0x14, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, +0xff, 0xff, 0xff, 0xff, 0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, +0xc0, 0xf5, 0x00, 0xa6, 0xff, 0xff, 0xff, 0xff, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x14, 0x06, 0xde, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x14, 0x06, 0xde, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, +0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x06, 0xc0, 0x05, 0xaa, 0xaa, 0xaa, 0xaa, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x1e, 0xf0, 0x14, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, +0x00, 0xf0, 0x00, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0x00, 0xa0, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, +0xff, 0xff, 0xff, 0xff, 0x1e, 0x00, 0x14, 0x00, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0xde, 0xf7, 0xef, 0x7b, 0x35, 0xcd, 0x73, 0x00, 0xff, 0xff, 0x10, 0x84, +0x17, 0x15, 0x15, 0x00, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xf6, 0x63, +0x55, 0x55, 0x55, 0x55, 0x7e, 0xef, 0xd6, 0x63, 0x35, 0xd5, 0x55, 0x55, +0x3d, 0xe7, 0x17, 0x6c, 0x57, 0x58, 0x61, 0x05, 0xd8, 0x8c, 0xf6, 0x63, +0x55, 0x55, 0x55, 0x57, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0xfd, 0xde, 0x93, 0x32, 0xff, 0xff, 0xff, 0x3f, +0x9e, 0xef, 0x37, 0x6c, 0x35, 0x49, 0x52, 0x56, 0xf6, 0x63, 0xfb, 0xb5, +0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, 0x9e, 0xef, 0xf6, 0x63, +0x95, 0x55, 0x55, 0x55, 0x5e, 0xe7, 0x16, 0x6c, 0x5c, 0x73, 0x8d, 0x15, +0x41, 0x08, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x30, 0x84, 0x15, 0x15, 0x15, 0x25, +0x9e, 0xef, 0x36, 0x6c, 0x85, 0x73, 0x5c, 0x55, 0x16, 0x6c, 0xf6, 0x63, +0x57, 0x55, 0x55, 0x55, 0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xf6, 0x63, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, +0x16, 0x6c, 0xf6, 0x63, 0xd5, 0x55, 0x55, 0x55, 0xbe, 0xf7, 0x10, 0x84, +0x70, 0x8c, 0x94, 0x54, 0x7d, 0xef, 0x10, 0x84, 0x55, 0x55, 0x5c, 0x63, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0x71, 0x8c, 0xcf, 0x7b, +0xff, 0xff, 0xff, 0xbf, 0x9d, 0xef, 0x10, 0x84, 0x55, 0x95, 0x8d, 0x72, +0xbe, 0xf7, 0x10, 0x84, 0x23, 0x1c, 0x15, 0x15, 0x00, 0x00, 0xf6, 0x63, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xf6, 0x63, +0x55, 0x55, 0x55, 0x55, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xd5, 0x0a, 0x04, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xf6, 0x63, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xf6, 0x63, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0xde, 0xf7, 0x10, 0x84, 0x25, 0xd5, 0x55, 0x00, 0xbe, 0xf7, 0x30, 0x84, +0x57, 0x58, 0xc9, 0x00, 0xff, 0xff, 0x08, 0x42, 0x3f, 0x3f, 0x3f, 0x00, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0xff, 0xff, 0x08, 0x42, +0xfc, 0xfc, 0xbc, 0x00, 0xbe, 0xf7, 0x10, 0x84, 0x35, 0xc9, 0x58, 0x00, +0xff, 0xff, 0x10, 0x84, 0x56, 0x55, 0x55, 0x00, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xf6, 0x63, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xf6, 0x63, 0x55, 0x55, 0x55, 0x55, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, +0xff, 0xff, 0xff, 0xff, 0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, +0xda, 0xfd, 0x60, 0x3b, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0xff, 0xff, 0x08, 0x42, 0x00, 0x2f, 0x3f, 0x3f, 0xff, 0xff, 0x8a, 0x52, +0x00, 0xfc, 0xe0, 0x2c, 0xdf, 0xff, 0x30, 0x84, 0x00, 0x55, 0x55, 0x56, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0xdf, 0xff, 0x30, 0x84, 0x00, 0x55, 0x55, 0x25, 0xff, 0xff, 0x8a, 0x52, +0x00, 0x2f, 0x22, 0x3e, 0xff, 0xff, 0x08, 0x42, 0x00, 0xfc, 0xfc, 0xfc, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0x55, 0xa9, 0xa9, 0xa9, 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa9, 0xa9, 0xa9, +0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, +0x55, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0xff, 0xff, 0x55, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x41, 0x08, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0x9e, 0xf7, 0x10, 0x84, 0xe2, 0x35, 0x55, 0x55, 0x3c, 0xe7, 0x10, 0x84, +0x55, 0x5c, 0x83, 0xb5, 0xff, 0xff, 0x30, 0x84, 0x15, 0x15, 0x15, 0x38, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xde, 0xf7, 0x30, 0x84, 0x54, 0x54, 0x94, 0x88, +0x5c, 0xe7, 0x10, 0x84, 0xd5, 0x0d, 0x70, 0x57, 0xbe, 0xf7, 0x30, 0x84, +0x62, 0x57, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0x10, 0x84, 0x2d, 0x15, 0x15, 0x15, 0xff, 0xff, 0x30, 0x84, +0x5c, 0x88, 0xd4, 0x54, 0xff, 0xff, 0x10, 0x84, 0x55, 0x57, 0xe2, 0xb5, +0x9d, 0xef, 0x30, 0x84, 0x55, 0x55, 0x55, 0xe0, 0x41, 0x08, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0xff, 0xff, 0x30, 0x84, 0x54, 0x54, 0x54, 0xd4, +0x9d, 0xef, 0x30, 0x84, 0x55, 0x55, 0xd5, 0x82, 0xff, 0xff, 0x10, 0x84, +0x55, 0xb5, 0xe2, 0x57, 0xff, 0xff, 0x30, 0x84, 0x2d, 0x38, 0x15, 0x15, +0xff, 0xff, 0x10, 0x84, 0x5c, 0x54, 0x54, 0x54, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, +0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0xa9, 0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, +0xfb, 0xdf, 0x0a, 0x52, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0xff, 0xff, 0x30, 0x84, +0xb5, 0x55, 0x55, 0x00, 0xde, 0xf7, 0x30, 0x84, 0x20, 0x35, 0x15, 0x00, +0xdf, 0xff, 0x30, 0x84, 0x54, 0x80, 0xd4, 0x00, 0xde, 0xf7, 0x30, 0x84, +0x55, 0x7e, 0x2b, 0x00, 0xdf, 0xff, 0x10, 0x84, 0x55, 0x55, 0xa0, 0x00, +0xef, 0x7b, 0xdf, 0xff, 0x40, 0x40, 0x6a, 0x55, 0x82, 0x10, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0xff, 0xff, 0x28, 0x42, 0xff, 0xff, 0xff, 0x00, +0x21, 0x08, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0xc3, 0x18, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0xff, 0xff, 0x08, 0x42, 0xfc, 0xfc, 0xa8, 0x00, +0xde, 0xf7, 0x30, 0x84, 0x55, 0x55, 0x02, 0x00, 0xff, 0xff, 0x10, 0x84, +0x55, 0xaf, 0xfa, 0x00, 0xdf, 0xff, 0x30, 0x84, 0x35, 0x20, 0x15, 0x00, +0xde, 0xf7, 0x30, 0x84, 0x80, 0x54, 0x54, 0x00, 0xff, 0xff, 0x30, 0x84, +0x57, 0x55, 0x55, 0x00, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, +0xa9, 0xa9, 0xa9, 0x55, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0x6a, 0x6a, 0x6a, 0x55, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa9, 0xa9, 0x55, +0x00, 0x00, 0xff, 0xff, 0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, +0xaa, 0xaa, 0xaa, 0x55, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6a, 0x6a, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, +0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55, +0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0x55, 0x55 \ No newline at end of file diff --git a/build/single_file_libs/examples/testcard-zstd.inl b/build/single_file_libs/examples/testcard-zstd.inl new file mode 100644 index 00000000000..9e4388365c1 --- /dev/null +++ b/build/single_file_libs/examples/testcard-zstd.inl @@ -0,0 +1,261 @@ +0x28, 0xb5, 0x2f, 0xfd, 0x60, 0x00, 0x7f, 0x6d, 0x61, 0x00, 0x0a, 0x66, +0xec, 0x17, 0x48, 0x60, 0x1c, 0x5a, 0xc9, 0x5d, 0x1a, 0x38, 0x07, 0xe8, +0xc5, 0x82, 0x99, 0x68, 0xe6, 0x95, 0x45, 0x58, 0x0d, 0x0c, 0xf3, 0x36, +0xc8, 0xd9, 0x0f, 0x46, 0x2d, 0x68, 0x11, 0xf8, 0x31, 0x10, 0xa1, 0x1a, +0x2f, 0x99, 0x5c, 0x84, 0xfd, 0x92, 0x02, 0xe6, 0x3b, 0x44, 0x9b, 0x01, +0x5d, 0x92, 0xff, 0x38, 0x26, 0x00, 0x6a, 0x6b, 0xc3, 0x53, 0xb2, 0x0c, +0x25, 0xf3, 0xd8, 0x59, 0x68, 0x9b, 0x14, 0x8a, 0x89, 0x75, 0x18, 0x03, +0x1d, 0xc9, 0x0f, 0x63, 0x01, 0x73, 0x01, 0x72, 0x01, 0x4f, 0x66, 0x31, +0x58, 0x0f, 0x97, 0x4b, 0x0c, 0x4c, 0x06, 0xac, 0x07, 0x0b, 0x68, 0xd4, +0xad, 0x80, 0x64, 0x13, 0x74, 0xa1, 0x12, 0x16, 0x58, 0xcf, 0x1a, 0x95, +0x5f, 0x0d, 0x26, 0x55, 0xd0, 0x9c, 0xf4, 0x52, 0x35, 0x2e, 0x20, 0xc1, +0x06, 0x69, 0x03, 0x0a, 0x93, 0x83, 0x5e, 0x27, 0x9b, 0x4c, 0x6d, 0xee, +0x87, 0x03, 0x30, 0x6c, 0x46, 0xd7, 0x50, 0x5c, 0xca, 0xe6, 0xa6, 0x4d, +0xa8, 0xf6, 0xab, 0xd7, 0x0e, 0x27, 0x27, 0x90, 0xc4, 0xb2, 0xd1, 0x10, +0xfa, 0x43, 0x82, 0xc8, 0xf2, 0xe5, 0xff, 0xff, 0xd5, 0x52, 0x62, 0x43, +0x87, 0x26, 0x2a, 0x05, 0x70, 0x0e, 0xb0, 0x2f, 0xc4, 0x56, 0xef, 0xb5, +0xca, 0xb8, 0x53, 0xb7, 0x96, 0x0e, 0xe7, 0x00, 0x2c, 0xa8, 0xda, 0x3b, +0x07, 0x70, 0xa7, 0x78, 0x38, 0x60, 0x87, 0x7a, 0x01, 0x3b, 0x75, 0xec, +0xfa, 0x77, 0xe2, 0x46, 0x94, 0x61, 0x8e, 0x0d, 0x0c, 0xfb, 0xe7, 0x8b, +0x13, 0x50, 0x31, 0xa9, 0x27, 0xcd, 0x27, 0xef, 0x6b, 0xa6, 0xab, 0x9c, +0x4d, 0x95, 0x6c, 0x3a, 0xbb, 0x8e, 0x96, 0x92, 0x18, 0x5a, 0x7c, 0x4f, +0xff, 0x7b, 0x38, 0xf2, 0xdb, 0x86, 0xde, 0xff, 0x1f, 0x2f, 0x21, 0x86, +0x7d, 0xbf, 0x45, 0xd0, 0x6e, 0x77, 0x0a, 0xee, 0x0a, 0xee, 0x14, 0x9a, +0xb8, 0x84, 0xf3, 0xac, 0xbe, 0xc8, 0x7f, 0x8d, 0xff, 0xff, 0xcf, 0x2a, +0xfb, 0x69, 0xfc, 0xfb, 0xfd, 0x7a, 0x10, 0x22, 0x36, 0xfc, 0xff, 0x3f, +0xcf, 0xd0, 0xf1, 0x7f, 0xfe, 0xff, 0x3d, 0x24, 0xdf, 0x78, 0x4a, 0xff, +0xda, 0x9c, 0x39, 0xcf, 0xef, 0xe7, 0xfd, 0x52, 0x98, 0xb5, 0x40, 0x92, +0xee, 0xdd, 0x99, 0xf5, 0x53, 0x5b, 0x65, 0x6b, 0xb5, 0xd8, 0x7b, 0xae, +0xfa, 0xc1, 0x0f, 0x0c, 0x7f, 0x4f, 0x55, 0xa3, 0xad, 0x2c, 0xa0, 0xbd, +0xf7, 0x2a, 0x0e, 0xe8, 0xbd, 0xc7, 0x5e, 0xf5, 0xd8, 0x54, 0x9e, 0x56, +0xa3, 0xd6, 0x59, 0xd5, 0xfe, 0x1f, 0xc0, 0x30, 0x8c, 0xfc, 0x46, 0x04, +0xae, 0x60, 0xbc, 0xe8, 0xcf, 0xec, 0x3d, 0xde, 0xf9, 0xf0, 0xfe, 0xef, +0x7d, 0xcc, 0xf7, 0x2b, 0xe5, 0x1b, 0x70, 0xff, 0xff, 0x7e, 0x3f, 0x6e, +0xe4, 0x02, 0x07, 0xfc, 0x1b, 0x7a, 0xff, 0xe7, 0x58, 0xfc, 0x7e, 0x3a, +0xdc, 0x97, 0xfd, 0x57, 0xef, 0xa3, 0xfc, 0x2a, 0xc7, 0x4d, 0xf3, 0xcb, +0x9d, 0xce, 0xac, 0xfe, 0xeb, 0x2e, 0x86, 0xb9, 0x69, 0x54, 0xef, 0xf9, +0x55, 0xcf, 0xff, 0x48, 0x24, 0x72, 0x3a, 0x9d, 0x72, 0x2f, 0x2f, 0x2f, +0xff, 0x3f, 0xe7, 0x01, 0x6c, 0x4d, 0x6c, 0xcd, 0x2d, 0x5b, 0x53, 0xb7, +0x59, 0x22, 0x08, 0x0b, 0xa7, 0x92, 0x15, 0x75, 0x93, 0xb0, 0x5d, 0xaf, +0x2a, 0x63, 0x95, 0x1d, 0x25, 0xd2, 0xd2, 0xa8, 0x1c, 0x84, 0xc9, 0xdc, +0x72, 0xba, 0xd7, 0xfc, 0x69, 0xf5, 0xc7, 0x19, 0xa9, 0xbe, 0xfa, 0x26, +0x55, 0x25, 0x75, 0xb7, 0x60, 0xa3, 0xd8, 0x68, 0x54, 0xb7, 0x1b, 0xa5, +0x54, 0x62, 0xb1, 0x49, 0xde, 0xe2, 0xac, 0xa2, 0xe8, 0x7b, 0xff, 0x5f, +0x75, 0x4e, 0xb8, 0xa2, 0xdd, 0x6a, 0xb7, 0xda, 0x6e, 0x2e, 0x04, 0xcd, +0x08, 0x2f, 0xec, 0x8e, 0x49, 0xaf, 0x49, 0x6f, 0x8b, 0x4f, 0x2e, 0x1a, +0xc5, 0x62, 0x7b, 0x6b, 0x3e, 0x32, 0x3e, 0x32, 0xbe, 0x08, 0x35, 0x4d, +0x63, 0x93, 0xa6, 0xc8, 0x42, 0xe6, 0x21, 0xcc, 0x59, 0xc8, 0x4c, 0xe5, +0x86, 0xe1, 0x03, 0x06, 0xa4, 0xec, 0xff, 0xb7, 0x78, 0x7e, 0x62, 0x43, +0xc7, 0x2c, 0x50, 0x30, 0x4a, 0xc8, 0x9b, 0xf3, 0xbf, 0xe6, 0x62, 0xa0, +0x50, 0xa6, 0x9c, 0xe3, 0x6e, 0x5b, 0xaf, 0x77, 0x8b, 0xbb, 0xe1, 0x70, +0xaa, 0xaa, 0xaa, 0x92, 0xb4, 0x52, 0xad, 0x14, 0x87, 0x93, 0x0b, 0xe6, +0x82, 0x39, 0x11, 0xb9, 0x20, 0x9a, 0x16, 0x34, 0x22, 0x68, 0xb2, 0x68, +0xb2, 0x76, 0xd3, 0xe8, 0x6e, 0xda, 0x6b, 0x62, 0x34, 0x2e, 0x8d, 0xbd, +0x2d, 0x3d, 0x6b, 0x4c, 0x26, 0x33, 0xda, 0x33, 0xf3, 0x91, 0x51, 0x46, +0x79, 0xbd, 0x3e, 0x39, 0x9f, 0xcf, 0xd2, 0xb8, 0x5c, 0xfa, 0x22, 0xf8, +0x8e, 0xb6, 0xbe, 0x08, 0x40, 0x14, 0x49, 0x40, 0x14, 0xf2, 0x0c, 0x2d, +0x30, 0x85, 0x3c, 0x63, 0x29, 0xd3, 0x98, 0x85, 0x6c, 0xb5, 0xdb, 0xad, +0x5c, 0x63, 0x9e, 0x72, 0xcf, 0x43, 0xe6, 0xaf, 0x77, 0xa6, 0xe2, 0x21, +0x0c, 0x4d, 0xd3, 0x49, 0x1e, 0xc2, 0x14, 0x6f, 0xee, 0xdb, 0x7b, 0x7b, +0x08, 0xb3, 0xa4, 0x60, 0x3b, 0x9d, 0x6e, 0x8b, 0x37, 0x4b, 0x0a, 0x74, +0x35, 0x33, 0xbc, 0xf9, 0x64, 0x85, 0x63, 0x32, 0x29, 0x20, 0x59, 0x0c, +0x3c, 0x96, 0x67, 0x62, 0xb7, 0x8a, 0x92, 0x4d, 0xa0, 0xd3, 0xf3, 0xd1, +0x85, 0x80, 0x38, 0xcb, 0x64, 0x60, 0xc9, 0xb5, 0xaf, 0x97, 0x8d, 0x20, +0x45, 0x28, 0xb8, 0xab, 0xe8, 0xc9, 0x0a, 0x88, 0x1f, 0xd6, 0x47, 0x54, +0xf1, 0xd3, 0xfb, 0x62, 0xa7, 0xfd, 0xf2, 0x8b, 0xfd, 0xb6, 0xe4, 0x2e, +0xb6, 0x91, 0x73, 0x1c, 0xd0, 0x7b, 0xba, 0x83, 0xc9, 0xac, 0x51, 0x39, +0x92, 0xc5, 0x4f, 0x30, 0x1e, 0x2e, 0xd5, 0xf1, 0xa8, 0xa6, 0xa5, 0x80, +0x70, 0xb9, 0xbc, 0xb7, 0xc2, 0x52, 0x32, 0x6c, 0xe3, 0x3d, 0xed, 0x41, +0xa4, 0x4b, 0x31, 0x2a, 0xe6, 0x62, 0x11, 0x19, 0x95, 0x73, 0x1d, 0xbf, +0xe1, 0x6c, 0xfc, 0x47, 0x75, 0x6c, 0x37, 0x63, 0x02, 0xf8, 0x34, 0x40, +0x9a, 0x00, 0x1d, 0xf7, 0x32, 0x56, 0x77, 0xda, 0x5b, 0x9f, 0x9f, 0x0f, +0xbb, 0x91, 0x5b, 0xbd, 0xe7, 0x58, 0x82, 0x4a, 0x20, 0xcd, 0x4f, 0x47, +0x15, 0xf3, 0x51, 0xf1, 0x43, 0x51, 0x10, 0x96, 0xae, 0xba, 0xf7, 0x21, +0x50, 0xef, 0x55, 0x27, 0x0c, 0x1f, 0xe0, 0x54, 0xf8, 0xc9, 0x69, 0xef, +0xb9, 0x53, 0xf7, 0x83, 0x73, 0x9d, 0xce, 0x86, 0x07, 0x83, 0x44, 0x61, +0x37, 0x35, 0x35, 0x33, 0x4e, 0x33, 0x33, 0x3e, 0x9f, 0x50, 0x48, 0x24, +0xe6, 0xd0, 0x79, 0x49, 0xc3, 0x2d, 0xa0, 0x46, 0x31, 0x9a, 0x72, 0xc3, +0x84, 0xff, 0x7a, 0x95, 0xbb, 0x00, 0x22, 0xcc, 0x14, 0x00, 0x04, 0xac, +0x60, 0x64, 0x86, 0xe4, 0x6f, 0xb1, 0x58, 0xf4, 0xdc, 0xb0, 0x05, 0x00, +0x72, 0x38, 0xf8, 0xce, 0xce, 0x8e, 0xcf, 0x37, 0x33, 0x43, 0x24, 0x0a, +0x85, 0x33, 0x35, 0x35, 0x37, 0xc2, 0xa8, 0x28, 0x27, 0x1b, 0x9d, 0xce, +0x29, 0x18, 0xe4, 0xfc, 0x09, 0x53, 0xa5, 0x51, 0xad, 0x74, 0x79, 0x7b, +0xb5, 0x4a, 0x65, 0x94, 0x36, 0x89, 0xf5, 0x62, 0x9b, 0xd8, 0x9a, 0xe8, +0x2b, 0xff, 0xa1, 0x73, 0xfe, 0x2e, 0x2c, 0x41, 0x45, 0x37, 0xef, 0x6e, +0x7b, 0x38, 0xca, 0xa5, 0xd2, 0xb8, 0x1c, 0x3b, 0x96, 0x21, 0xbb, 0x5d, +0xad, 0xd1, 0xdb, 0x1c, 0xf6, 0x5e, 0x4b, 0xd9, 0x59, 0x1b, 0x67, 0xf5, +0x7a, 0x7c, 0x9a, 0x91, 0x3e, 0x8e, 0xe3, 0xee, 0x7b, 0xb9, 0xa4, 0xb9, +0xf5, 0x70, 0xee, 0x1d, 0x4e, 0x4f, 0xcc, 0xd6, 0x7b, 0x07, 0x71, 0x48, +0xf2, 0x06, 0xd0, 0x10, 0x82, 0x21, 0xe4, 0x55, 0x2e, 0xa5, 0x1d, 0xbe, +0x48, 0x8c, 0x69, 0xbb, 0x24, 0x40, 0x68, 0x9b, 0xba, 0x5a, 0x5a, 0xa7, +0xe2, 0xd1, 0xac, 0xc2, 0xd8, 0x87, 0x9c, 0xe3, 0x78, 0xee, 0xa6, 0xcd, +0xdd, 0x68, 0x6e, 0xdd, 0xdb, 0x2e, 0xc6, 0xbb, 0x8b, 0xe9, 0xc1, 0xd9, +0xf6, 0x62, 0x7e, 0x72, 0x7e, 0x72, 0x34, 0xe4, 0x68, 0xc8, 0x11, 0x32, +0x10, 0x32, 0x20, 0x32, 0xf0, 0x12, 0x19, 0x90, 0xe0, 0x45, 0x91, 0xe0, +0x25, 0x83, 0xba, 0xc9, 0x20, 0x26, 0x87, 0xa5, 0x72, 0xa9, 0x64, 0x72, +0x80, 0x21, 0x54, 0x13, 0xeb, 0x29, 0x18, 0x42, 0x34, 0x84, 0x94, 0x34, +0x84, 0xa4, 0x1d, 0xa4, 0x1d, 0x82, 0x1c, 0xef, 0xaf, 0x0e, 0x63, 0x47, +0x6d, 0x10, 0xb9, 0xec, 0xd8, 0x2d, 0x3b, 0x9e, 0x21, 0xb1, 0x67, 0x48, +0x34, 0x24, 0x1a, 0x52, 0xdb, 0xa4, 0x6d, 0x62, 0xd3, 0xfa, 0xff, 0xfa, +0x03, 0x34, 0xef, 0x9d, 0xc9, 0xe1, 0x5d, 0xec, 0xf5, 0xf1, 0x79, 0x8c, +0x97, 0xd2, 0x24, 0xc1, 0x2d, 0xe0, 0x39, 0x16, 0x1e, 0xa9, 0x41, 0xc3, +0xbf, 0x4a, 0xd9, 0x3c, 0xea, 0x77, 0x96, 0x55, 0xe6, 0x95, 0xc3, 0xf1, +0x8e, 0x7b, 0x4f, 0xad, 0x61, 0xf8, 0xe7, 0x01, 0xad, 0x46, 0xf5, 0x2c, +0xac, 0x55, 0x2c, 0x94, 0xaa, 0x46, 0xfb, 0x5e, 0xcd, 0xaa, 0x1f, 0x78, +0x4f, 0x2f, 0xd1, 0xc9, 0x02, 0xd6, 0x2c, 0x67, 0xef, 0x3f, 0x54, 0xab, +0xda, 0x03, 0x79, 0x1f, 0xab, 0xfd, 0x0c, 0x38, 0x3c, 0xbc, 0xe1, 0xd5, +0x01, 0xf6, 0xfb, 0xfb, 0xf1, 0x70, 0xee, 0xfd, 0x90, 0x13, 0x97, 0xc4, +0xbc, 0x08, 0xe7, 0x4b, 0x88, 0x34, 0xf7, 0x56, 0x1e, 0x0c, 0xdb, 0xe4, +0x9c, 0x78, 0xf1, 0xf4, 0x62, 0x4c, 0xb5, 0xf7, 0xdd, 0xd9, 0x4c, 0x5a, +0x69, 0xa6, 0x36, 0x27, 0x03, 0xbe, 0x86, 0xc2, 0x72, 0xa2, 0x60, 0x73, +0xf1, 0xe0, 0x17, 0x50, 0xb5, 0x93, 0x81, 0xac, 0xf1, 0xc9, 0xd4, 0x66, +0x73, 0x71, 0xce, 0x63, 0xa8, 0x60, 0x98, 0xda, 0x86, 0x46, 0x72, 0xec, +0x54, 0xc1, 0xe6, 0x8a, 0x10, 0x23, 0x2d, 0x0c, 0xdd, 0x44, 0x0b, 0xa0, +0x44, 0xa4, 0x9d, 0x0e, 0x64, 0x31, 0x30, 0x45, 0xb1, 0x97, 0x4c, 0x6d, +0x9f, 0x43, 0x99, 0x71, 0xa8, 0x9a, 0xe6, 0xbd, 0x3a, 0xe1, 0xff, 0x7f, +0x33, 0x61, 0xf1, 0x48, 0xda, 0x48, 0x28, 0x10, 0x23, 0x9b, 0x24, 0xeb, +0xee, 0xbd, 0x35, 0x0e, 0x6e, 0x75, 0x23, 0x20, 0xd6, 0x95, 0x8c, 0xa5, +0x24, 0xec, 0x44, 0x67, 0x52, 0x2e, 0x78, 0x2a, 0xba, 0x3e, 0x3a, 0x4e, +0xc9, 0x5c, 0x23, 0xb0, 0xd5, 0xfb, 0x29, 0xaf, 0x9a, 0x0b, 0x90, 0x89, +0xd8, 0xec, 0xa9, 0xa8, 0x13, 0xfc, 0x22, 0xfa, 0xf2, 0x74, 0x2f, 0x4e, +0x35, 0xb0, 0x6d, 0x6c, 0xfd, 0xc4, 0xfe, 0xd0, 0x98, 0x3d, 0xe5, 0x43, +0x0a, 0xd0, 0x33, 0x26, 0x3b, 0x12, 0x7d, 0x65, 0xa1, 0xff, 0xff, 0x6f, +0x53, 0x0e, 0x28, 0x84, 0xa7, 0xa2, 0x2e, 0xf8, 0x4a, 0xb6, 0xa1, 0x47, +0xf5, 0xd0, 0x13, 0x9d, 0xd9, 0x50, 0xef, 0x9f, 0x31, 0xb4, 0x13, 0x67, +0xf9, 0x0a, 0x99, 0xb5, 0x80, 0xec, 0x4a, 0x1a, 0x59, 0x21, 0x3b, 0xce, +0xc5, 0x7e, 0x96, 0x85, 0x92, 0xc5, 0xb0, 0x75, 0xaa, 0x41, 0x16, 0xa9, +0xd3, 0xde, 0x13, 0x7d, 0xd9, 0x61, 0x30, 0x1c, 0x73, 0x61, 0x54, 0x90, +0xcf, 0xd4, 0xe8, 0xfe, 0xbf, 0xbf, 0x36, 0x57, 0x26, 0x38, 0xab, 0x06, +0xc4, 0x7e, 0x3c, 0x5b, 0x35, 0xd2, 0x7c, 0x2c, 0x0a, 0x82, 0xf2, 0xa8, +0xf2, 0x8b, 0x48, 0xa9, 0x90, 0x00, 0x01, 0x10, 0x10, 0x14, 0x04, 0x00, +0x32, 0xa2, 0x06, 0x82, 0x28, 0x90, 0xc2, 0xb4, 0x85, 0xda, 0x01, 0x52, +0xe9, 0x18, 0x85, 0x60, 0x00, 0x00, 0x20, 0x0c, 0x00, 0x14, 0x41, 0x00, +0x40, 0xa0, 0x04, 0x40, 0x00, 0x80, 0x50, 0x03, 0x01, 0x10, 0x18, 0x01, +0x80, 0xd4, 0x14, 0x99, 0x01, 0xfd, 0x07, 0xf8, 0x16, 0x0e, 0xd9, 0x5d, +0xa3, 0x70, 0xfe, 0xda, 0x17, 0xfa, 0xce, 0x46, 0x9a, 0x99, 0x81, 0x1a, +0x39, 0xba, 0x63, 0xb1, 0x18, 0x11, 0x58, 0xd7, 0xc7, 0xba, 0x03, 0x3e, +0x01, 0xf2, 0xf9, 0x4e, 0x12, 0xa3, 0x50, 0x7b, 0xaf, 0x7b, 0x60, 0x5c, +0x83, 0x23, 0xd2, 0x60, 0x27, 0x84, 0xad, 0xb8, 0x02, 0xed, 0xfe, 0xb4, +0x9c, 0x08, 0x9f, 0x49, 0xae, 0x55, 0x02, 0x94, 0xc0, 0x1b, 0x90, 0x75, +0x0b, 0x90, 0xc4, 0xc7, 0x43, 0x9e, 0x67, 0x25, 0x70, 0x61, 0x0d, 0xb8, +0x7a, 0x97, 0x43, 0xfc, 0xd1, 0x7e, 0x68, 0xed, 0x03, 0xb7, 0x1e, 0x75, +0xe9, 0x4d, 0x7a, 0x23, 0x18, 0x37, 0x63, 0x6f, 0xab, 0x5f, 0x7c, 0x5b, +0x1c, 0x05, 0xdf, 0x3f, 0x00, 0x86, 0x37, 0xa0, 0xfa, 0x0c, 0xe0, 0xed, +0x35, 0x35, 0x2f, 0xd8, 0xd1, 0x75, 0xba, 0x37, 0x34, 0x7e, 0xb0, 0x84, +0x2a, 0x01, 0x0c, 0x98, 0xed, 0x47, 0xf9, 0x86, 0x81, 0x74, 0x00, 0x5d, +0x8b, 0x4c, 0x18, 0x8a, 0x31, 0xcd, 0xae, 0x07, 0x44, 0xb5, 0xd5, 0x07, +0xa0, 0xdf, 0xf4, 0xfa, 0xa6, 0x42, 0xd0, 0x4f, 0x17, 0xd8, 0xdf, 0xb6, +0x34, 0x44, 0xe3, 0x01, 0xc4, 0xb6, 0x2d, 0xb5, 0x56, 0xc6, 0x2a, 0x1f, +0x05, 0x6c, 0x35, 0xe0, 0x09, 0x31, 0xef, 0x60, 0xfe, 0xaf, 0x07, 0x80, +0x32, 0xa0, 0xe9, 0xd3, 0x96, 0x45, 0xa7, 0xaa, 0xb6, 0xfb, 0x03, 0x10, +0xe3, 0x97, 0x96, 0x8d, 0x3a, 0x01, 0xdd, 0x58, 0x58, 0x78, 0x00, 0xab, +0xff, 0x06, 0xa0, 0xd6, 0x01, 0x58, 0x08, 0xb7, 0xdc, 0x2d, 0xa7, 0xfb, +0x22, 0xa8, 0x67, 0x00, 0xe3, 0xcf, 0x82, 0x43, 0xfc, 0x96, 0x1b, 0x40, +0x63, 0xcf, 0x9d, 0x42, 0x5d, 0x66, 0x40, 0xaa, 0xaf, 0x28, 0x94, 0xd3, +0x2a, 0xd4, 0x02, 0x13, 0xd2, 0xdf, 0x03, 0x9c, 0x60, 0x6b, 0x16, 0x94, +0xb4, 0xbe, 0x62, 0xc2, 0x35, 0x60, 0x45, 0x09, 0x23, 0x5a, 0xe0, 0x85, +0xb3, 0x03, 0x50, 0x68, 0x0c, 0x20, 0xa5, 0xf9, 0x94, 0xd2, 0x35, 0x80, +0xad, 0x4c, 0x4e, 0x40, 0x41, 0x97, 0x92, 0x75, 0xbe, 0x0c, 0x03, 0x50, +0x85, 0x08, 0xaf, 0x36, 0x00, 0x68, 0xaf, 0x09, 0xb3, 0x0c, 0x20, 0x4f, +0x81, 0x6a, 0x6a, 0xf5, 0x0d, 0x70, 0x69, 0x00, 0x4c, 0xb4, 0x0f, 0x59, +0xe7, 0x31, 0x0a, 0x45, 0x9f, 0xde, 0x90, 0xd6, 0x38, 0x80, 0x6b, 0x2c, +0xb9, 0x2f, 0xd4, 0x01, 0x40, 0x14, 0xd5, 0xed, 0x8e, 0x01, 0x53, 0xbf, +0x03, 0x18, 0x1e, 0xb0, 0xc1, 0x85, 0x32, 0xec, 0x78, 0x2b, 0xf0, 0xbb, +0xbb, 0x6c, 0xf3, 0x4d, 0xdc, 0x73, 0x40, 0xfd, 0x10, 0x09, 0x9e, 0x20, +0xe2, 0x12, 0x8c, 0xe0, 0xd2, 0xed, 0x80, 0x6b, 0xcc, 0x78, 0x20, 0x03, +0xd0, 0x5e, 0x06, 0xf4, 0xb0, 0xc4, 0x0e, 0x15, 0x1d, 0x80, 0xb4, 0x76, +0xdf, 0x49, 0x03, 0x50, 0x82, 0xad, 0xda, 0x8b, 0x5a, 0x61, 0xc2, 0x5e, +0xb5, 0x1e, 0x46, 0xc0, 0xde, 0xaa, 0x0e, 0x15, 0x06, 0xd2, 0xf4, 0xb2, +0xd1, 0xed, 0x38, 0x0a, 0x03, 0x18, 0x33, 0x1a, 0x80, 0x61, 0x3e, 0xec, +0x7c, 0x74, 0xa8, 0x1d, 0x80, 0x1a, 0xce, 0x25, 0x1d, 0x41, 0xd1, 0xc1, +0x03, 0x28, 0xb5, 0xaf, 0x72, 0x9c, 0x59, 0x7a, 0xe1, 0x7d, 0xc0, 0xa5, +0x08, 0x1e, 0x18, 0x24, 0xfa, 0xbd, 0x99, 0x4a, 0x31, 0xa0, 0xea, 0xee, +0xf8, 0x36, 0x60, 0x98, 0xc9, 0x10, 0xd1, 0xa7, 0x35, 0x00, 0x8d, 0x40, +0x8e, 0x5a, 0x35, 0x0f, 0x80, 0xb1, 0xd4, 0x32, 0x79, 0x40, 0x34, 0x05, +0x7e, 0x98, 0xc6, 0x80, 0x3e, 0x90, 0x01, 0x65, 0xf4, 0x80, 0x73, 0x08, +0x64, 0xd7, 0x36, 0xc1, 0x7c, 0xc0, 0x5c, 0x75, 0x00, 0xc5, 0x09, 0x58, +0x9c, 0x13, 0x01, 0x72, 0x37, 0x9b, 0x79, 0xe4, 0x05, 0xd1, 0x01, 0x04, +0x98, 0x08, 0x74, 0xfd, 0xfc, 0x3f, 0x1c, 0x00, 0x73, 0x01, 0xfc, 0x1c, +0xcc, 0x16, 0x43, 0x19, 0x1d, 0xac, 0x61, 0x4b, 0x11, 0xc2, 0xa0, 0xf2, +0x01, 0x0b, 0x7b, 0x3b, 0xf4, 0xfc, 0x58, 0x5d, 0x2d, 0x5c, 0x01, 0x8c, +0x62, 0x17, 0x78, 0xbe, 0x60, 0x8c, 0x01, 0x6f, 0x91, 0x49, 0x65, 0x54, +0x92, 0xe9, 0x01, 0x1e, 0x10, 0x77, 0x35, 0x00, 0xa8, 0xd4, 0xc7, 0x71, +0x07, 0xd8, 0xcd, 0xa3, 0x7d, 0x69, 0x20, 0xac, 0x07, 0x00, 0x35, 0xc7, +0x62, 0xee, 0x8c, 0x7d, 0x0c, 0xb8, 0x43, 0x0e, 0x00, 0x08, 0xfb, 0xe7, +0xec, 0x33, 0x37, 0x04, 0x80, 0x2d, 0x1d, 0xa6, 0x13, 0x34, 0x1b, 0x1d, +0xc0, 0xca, 0x00, 0x92, 0xed, 0x2e, 0x56, 0xbe, 0x91, 0x80, 0x0c, 0x88, +0xa6, 0x01, 0xdf, 0x7f, 0x90, 0x49, 0xed, 0x0c, 0xe0, 0x08, 0x73, 0x28, +0x74, 0xc7, 0xe1, 0xb1, 0x03, 0x5d, 0xc5, 0xab, 0x61, 0x42, 0xdf, 0x03, +0x43, 0xf3, 0x35, 0x04, 0xcf, 0xc6, 0x1d, 0x79, 0x07, 0x40, 0x22, 0xe4, +0x68, 0x0d, 0x01, 0x95, 0xad, 0x72, 0x69, 0x00, 0x39, 0x9d, 0x53, 0x8f, +0x13, 0x0d, 0xb0, 0x29, 0x79, 0x1a, 0x39, 0x20, 0x12, 0x28, 0x9b, 0x02, +0x8f, 0x74, 0x90, 0x4c, 0xe8, 0xd1, 0x57, 0xf4, 0x01, 0x44, 0x04, 0xe0, +0x0c, 0x82, 0x91, 0xc5, 0x4f, 0x8f, 0xc6, 0x00, 0x43, 0x85, 0x65, 0xc8, +0xe6, 0x34, 0x1d, 0x80, 0xc0, 0xca, 0xdb, 0x57, 0x6c, 0x00, 0x72, 0x42, +0x5f, 0xd0, 0x49, 0x57, 0x47, 0xd4, 0x97, 0x18, 0x18, 0x80, 0x68, 0x8e, +0x0a, 0xf1, 0x6b, 0x34, 0xf1, 0x60, 0x2c, 0x41, 0x29, 0xd3, 0x3d, 0x55, +0x95, 0xb1, 0x3c, 0xd4, 0x95, 0x42, 0xef, 0xe7, 0xca, 0x00, 0x2e, 0xce, +0x25, 0xc2, 0xca, 0xf5, 0x00, 0x17, 0x3b, 0x8c, 0x42, 0x88, 0x03, 0xde, +0x97, 0xe1, 0x3a, 0x74, 0xb0, 0x33, 0xe0, 0x8f, 0x47, 0xeb, 0x2a, 0x5f, +0x36, 0x3e, 0x5a, 0xff, 0xc5, 0x80, 0xb9, 0x13, 0xa9, 0x1f, 0xf8, 0x86, +0xc9, 0x51, 0xf8, 0x4c, 0xaa, 0xe1, 0x65, 0x80, 0xb0, 0x8b, 0x91, 0xec, +0xcc, 0xbf, 0x70, 0x19, 0x98, 0x03, 0x10, 0xf0, 0x38, 0x40, 0xc4, 0x65, +0xbe, 0x41, 0xb2, 0x58, 0x3f, 0xe0, 0xcc, 0x0e, 0x08, 0x2b, 0x73, 0xf4, +0xdd, 0x86, 0x06, 0xa0, 0xc6, 0x8f, 0x1a, 0x32, 0x66, 0x50, 0x8e, 0xe1, +0x59, 0x67, 0x00, 0xed, 0x66, 0x1d, 0xdd, 0xfa, 0x7b, 0xe2, 0x56, 0x89, +0xd9, 0xa0, 0x4f, 0x41, 0x94, 0x28, 0xb8, 0xc6, 0xc7, 0x64, 0xde, 0x9b, +0x64, 0x44, 0x33, 0x39, 0xb5, 0x6c, 0xb9, 0x42, 0xe7, 0x7e, 0x16, 0xd2, +0x01, 0x12, 0x03, 0xb3, 0x48, 0x47, 0x6b, 0x75, 0x26, 0x19, 0x8c, 0xac, +0x6f, 0xb1, 0x6f, 0xdc, 0x04, 0x27, 0x3a, 0x00, 0xd6, 0xae, 0xfa, 0xe1, +0xf7, 0x30, 0xa4, 0xdb, 0xd5, 0x86, 0x5a, 0x07, 0x11, 0xde, 0xea, 0xf4, +0xb0, 0x83, 0x16, 0xbb, 0xc6, 0x00, 0x6e, 0xf2, 0x6b, 0x40, 0x81, 0x01, +0x67, 0x0e, 0xa9, 0x82, 0x23, 0x04, 0x34, 0xed, 0x02, 0xf5, 0xe4, 0x0e, +0x58, 0xe8, 0x8a, 0x58, 0x57, 0xb0, 0x56, 0x65, 0x3d, 0x40, 0x64, 0x03, +0x6e, 0x7b, 0x07, 0x20, 0x99, 0x90, 0x36, 0x95, 0x9f, 0xdf, 0x3d, 0xe8, +0x00, 0xa0, 0x57, 0x8f, 0x6d, 0xa4, 0xb3, 0x1d, 0x7a, 0x06, 0xa8, 0x26, +0x41, 0xb0, 0x8c, 0x9c, 0x10, 0x85, 0x6c, 0xb4, 0x31, 0xa6, 0x5b, 0x7a, +0x10, 0x51, 0x15, 0x3c, 0xa2, 0x42, 0xd3, 0x23, 0x02, 0xc0, 0x17, 0x7e, +0x03, 0x28, 0xba, 0xce, 0x9b, 0xae, 0xdd, 0x1a, 0x19, 0xd0, 0x15, 0xac, +0xeb, 0x20, 0x3c, 0x3a, 0x00, 0xc6, 0xbb, 0x8a, 0xd5, 0x64, 0xc2, 0x21, +0x1d, 0x6c, 0x15, 0x5a, 0xd3, 0x44, 0x98, 0x14, 0x95, 0xb3, 0xb7, 0xdd, +0xa6, 0xea, 0x06, 0x54, 0x78, 0xc3, 0xe8, 0x79, 0x9b, 0x86, 0x29, 0x76, +0x8b, 0x6b, 0xaa, 0x0d, 0xa8, 0x2f, 0x22, 0x2a, 0xeb, 0x68, 0x81, 0x6c, +0x56, 0xfd, 0x79, 0xac, 0x79, 0x4b, 0xa0, 0x01, 0x3f, 0x17, 0x43, 0x82, +0xb4, 0xd5, 0x00, 0x14, 0xb7, 0xf5, 0x00, 0xf4, 0x15, 0xa8, 0xd7, 0x4b, +0xb1, 0xbc, 0xa8, 0x36, 0x98, 0xf0, 0x8c, 0xe7, 0xf4, 0x7b, 0x35, 0xd8, +0xad, 0x0d, 0x5f, 0x9d, 0x96, 0xab, 0xed, 0x48, 0xe2, 0xdc, 0x1c, 0xbe, +0x12, 0xfa, 0x41, 0x6f, 0xf5, 0x1e, 0xb6, 0x9f, 0xee, 0xac, 0x21, 0xf4, +0xf6, 0x00, 0x38, 0xb1, 0x1f, 0xfd, 0xd0, 0x0e, 0xc7, 0xdd, 0xa0, 0x39, +0x07, 0x8c, 0x35, 0x1f, 0x7e, 0xcc, 0xbf, 0xf6, 0xe0, 0x06, 0x66, 0x7d, +0x10, 0x3f, 0xc5, 0x3e, 0xde, 0x42, 0xf9, 0x3d, 0x00, 0x54, 0x81, 0x67, +0x8a, 0xe6, 0x63, 0x0d, 0x01, 0xd0, 0x31, 0xe0, 0x6e, 0xd0, 0xe1, 0x59, +0xf6, 0x1b, 0xf7, 0x0d, 0x52, 0x06, 0x80, 0x61, 0x4f, 0xe8, 0x77, 0xdd, +0x6f, 0x48, 0x20, 0x1d, 0xbb, 0x2a, 0x16, 0x8b, 0x54, 0x87, 0x92, 0x83, +0xe6, 0x8f, 0x55, 0x59, 0x06, 0x00, 0xe9, 0xc5, 0xce, 0x21, 0x63, 0x87, +0xaf, 0x86, 0xcc, 0xba, 0xd6, 0xe7, 0x00, 0xf6, 0x91, 0x92, 0x92, 0xea, +0xe8, 0x42, 0x06, 0x69, 0x13, 0xf5, 0x00, 0xd0, 0xb0, 0xa7, 0xcb, 0x4c, +0xb0, 0xd2, 0x2d, 0x28, 0x63, 0xf0, 0x6a, 0xc7, 0x80, 0x6a, 0x19, 0xb2, +0x66, 0x51, 0xf3, 0xb1, 0x21, 0xa0, 0x48, 0xad, 0x1e, 0x80, 0x62, 0xaf, +0x00, 0xf4, 0xa5, 0x4e, 0x83, 0x75, 0x1b, 0xfe, 0x00, 0xc4, 0xcf, 0x55, +0xb2, 0x50, 0xa6, 0xeb, 0x38, 0xed, 0x8f, 0xd3, 0x1d, 0x00, 0xf6, 0xe3, +0x90, 0x1c, 0x60, 0x9e, 0x8e, 0xeb, 0x0b, 0xba, 0x44, 0x06, 0x68, 0xbb, +0xd3, 0x10, 0x63, 0x35, 0xe1, 0x86, 0x5c, 0x5c, 0x2b, 0x85, 0xa6, 0xe7, +0x38, 0x2c, 0x18, 0x83, 0x1f, 0x8f, 0x9b, 0x8e, 0x4d, 0x26, 0xcd, 0x34, +0x0c, 0x66, 0x1d, 0x70, 0xb7, 0x01, 0xe6, 0x02, 0xa8, 0x51, 0x63, 0xcf, +0xbb, 0x03, 0xca, 0x85, 0xc6, 0x9c, 0xf6, 0xf1, 0x51, 0xe0, 0x60, 0x07, +0x40, 0x86, 0xf0, 0x1e, 0x6e, 0xef, 0x61, 0x10, 0xd9, 0x36, 0xcc, 0xfc, +0x58, 0xe2, 0x37, 0x0d, 0x58, 0xb7, 0xbe, 0xca, 0xc9, 0xd8, 0xcd, 0xaa, +0xd5, 0x5b, 0x77, 0x83, 0xcb, 0x0e, 0x30, 0xce, 0xc8, 0xb8, 0xcd, 0xbf, +0x1e, 0x63, 0x04, 0xad, 0xb7, 0xcd, 0x43, 0x62, 0x4c, 0xe0, 0x1a, 0xd4, +0x21, 0xe2, 0xdd, 0x33, 0xdf, 0xb1, 0xdd, 0xdc, 0x01, 0x22, 0x18, 0xce, +0xa1, 0xd8, 0xcb, 0x67, 0xd5, 0x38, 0x4a, 0xbc, 0xd5, 0x81, 0x3d, 0x03, +0x98, 0x35, 0x60, 0x41, 0x85, 0x0c, 0x1d, 0xe7, 0x76, 0xf8, 0x11, 0x52, +0x76, 0xf6, 0x06, 0x16, 0x02, 0x45, 0xc8, 0xd8, 0x2f, 0x5e, 0x57, 0xbc, +0x3b, 0x89, 0x97, 0x09, 0x3e, 0x03, 0x34, 0x1a, 0x9d, 0x37, 0x87, 0x48, +0x0a, 0xe0, 0xa7, 0x4f, 0x8c, 0x3a, 0xa2, 0xaf, 0xfd, 0x7b, 0x80, 0xcf, +0xe5, 0x18, 0x61, 0x68, 0xba, 0x61, 0x8b, 0x09, 0xaa, 0xa3, 0x0c, 0x47, +0x3c, 0x43, 0x03, 0xac, 0xa3, 0x2e, 0x5e, 0x72, 0x0c, 0x80, 0x19, 0x61, +0xe6, 0x6e, 0x0e, 0xd9, 0xe8, 0xe8, 0xaf, 0x11, 0x9b, 0x4a, 0x73, 0x7a, +0x61, 0x66, 0xf0, 0x54, 0x1d, 0x18, 0xc8, 0x23, 0x36, 0xbf, 0xb5, 0xf4, +0x86, 0x54, 0xed, 0xb5, 0x91, 0xee, 0xb8, 0xbc, 0xde, 0xc3, 0x87, 0x9b, +0x2f, 0x81, 0xf2, 0xee, 0xa3, 0xec, 0x02 \ No newline at end of file diff --git a/build/single_file_libs/examples/testcard.png b/build/single_file_libs/examples/testcard.png new file mode 100755 index 00000000000..43088bb1696 Binary files /dev/null and b/build/single_file_libs/examples/testcard.png differ diff --git a/build/single_file_libs/zstd-in.c b/build/single_file_libs/zstd-in.c new file mode 100644 index 00000000000..f381ecc4f56 --- /dev/null +++ b/build/single_file_libs/zstd-in.c @@ -0,0 +1,91 @@ +/** + * \file zstd.c + * Single-file Zstandard library. + * + * Generate using: + * \code + * python combine.py -r ../../lib -x legacy/zstd_legacy.h -o zstd.c zstd-in.c + * \endcode + */ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +/* + * Settings to bake for the single library file. + * + * Note: It's important that none of these affects 'zstd.h' (only the + * implementation files we're amalgamating). + * + * Note: MEM_MODULE stops xxhash redefining BYTE, U16, etc., which are also + * defined in mem.h (breaking C99 compatibility). + * + * Note: the undefs for xxHash allow Zstd's implementation to coincide with + * standalone xxHash usage (with global defines). + * + * Note: if you enable ZSTD_LEGACY_SUPPORT the combine.py script will need + * re-running without the "-x legacy/zstd_legacy.h" option (it excludes the + * legacy support at the source level). + * + * Note: multithreading is enabled for all platforms apart from Emscripten. + */ +#define DEBUGLEVEL 0 +#define MEM_MODULE +#undef XXH_NAMESPACE +#define XXH_NAMESPACE ZSTD_ +#undef XXH_PRIVATE_API +#define XXH_PRIVATE_API +#undef XXH_INLINE_ALL +#define XXH_INLINE_ALL +#define ZSTD_LEGACY_SUPPORT 0 +#ifndef __EMSCRIPTEN__ +#define ZSTD_MULTITHREAD +#endif +#define ZSTD_TRACE 0 +/* TODO: Can't amalgamate ASM function */ +#define ZSTD_DISABLE_ASM 1 + +/* Include zstd_deps.h first with all the options we need enabled. */ +#define ZSTD_DEPS_NEED_MALLOC +#define ZSTD_DEPS_NEED_MATH64 +#include "common/zstd_deps.h" + +#include "common/debug.c" +#include "common/entropy_common.c" +#include "common/error_private.c" +#include "common/fse_decompress.c" +#include "common/threading.c" +#include "common/pool.c" +#include "common/zstd_common.c" + +#include "compress/fse_compress.c" +#include "compress/hist.c" +#include "compress/huf_compress.c" +#include "compress/zstd_compress_literals.c" +#include "compress/zstd_compress_sequences.c" +#include "compress/zstd_compress_superblock.c" +#include "compress/zstd_preSplit.c" +#include "compress/zstd_compress.c" +#include "compress/zstd_double_fast.c" +#include "compress/zstd_fast.c" +#include "compress/zstd_lazy.c" +#include "compress/zstd_ldm.c" +#include "compress/zstd_opt.c" +#ifdef ZSTD_MULTITHREAD +#include "compress/zstdmt_compress.c" +#endif + +#include "decompress/huf_decompress.c" +#include "decompress/zstd_ddict.c" +#include "decompress/zstd_decompress.c" +#include "decompress/zstd_decompress_block.c" + +#include "dictBuilder/cover.c" +#include "dictBuilder/divsufsort.c" +#include "dictBuilder/fastcover.c" +#include "dictBuilder/zdict.c" diff --git a/build/single_file_libs/zstddeclib-in.c b/build/single_file_libs/zstddeclib-in.c new file mode 100644 index 00000000000..8d9c1f54bb7 --- /dev/null +++ b/build/single_file_libs/zstddeclib-in.c @@ -0,0 +1,62 @@ +/** + * \file zstddeclib.c + * Single-file Zstandard decompressor. + * + * Generate using: + * \code + * python combine.py -r ../../lib -x legacy/zstd_legacy.h -o zstddeclib.c zstddeclib-in.c + * \endcode + */ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +/* + * Settings to bake for the standalone decompressor. + * + * Note: It's important that none of these affects 'zstd.h' (only the + * implementation files we're amalgamating). + * + * Note: MEM_MODULE stops xxhash redefining BYTE, U16, etc., which are also + * defined in mem.h (breaking C99 compatibility). + * + * Note: the undefs for xxHash allow Zstd's implementation to coincide with + * standalone xxHash usage (with global defines). + * + * Note: if you enable ZSTD_LEGACY_SUPPORT the combine.py script will need + * re-running without the "-x legacy/zstd_legacy.h" option (it excludes the + * legacy support at the source level). + */ +#define DEBUGLEVEL 0 +#define MEM_MODULE +#undef XXH_NAMESPACE +#define XXH_NAMESPACE ZSTD_ +#undef XXH_PRIVATE_API +#define XXH_PRIVATE_API +#undef XXH_INLINE_ALL +#define XXH_INLINE_ALL +#define ZSTD_LEGACY_SUPPORT 0 +#define ZSTD_STRIP_ERROR_STRINGS +#define ZSTD_TRACE 0 +/* TODO: Can't amalgamate ASM function */ +#define ZSTD_DISABLE_ASM 1 + +/* Include zstd_deps.h first with all the options we need enabled. */ +#define ZSTD_DEPS_NEED_MALLOC +#include "common/zstd_deps.h" + +#include "common/debug.c" +#include "common/entropy_common.c" +#include "common/error_private.c" +#include "common/fse_decompress.c" +#include "common/zstd_common.c" + +#include "decompress/huf_decompress.c" +#include "decompress/zstd_ddict.c" +#include "decompress/zstd_decompress.c" +#include "decompress/zstd_decompress_block.c" diff --git a/contrib/VS2005/fullbench/fullbench.vcproj b/contrib/VS2005/fullbench/fullbench.vcproj index c67490c6f44..98f8593769a 100644 --- a/contrib/VS2005/fullbench/fullbench.vcproj +++ b/contrib/VS2005/fullbench/fullbench.vcproj @@ -390,7 +390,7 @@ > + + @@ -454,7 +458,7 @@ > - - - - - - @@ -458,7 +446,7 @@ > - - tmp.zst"` - -If a file is provided, it is also possible to force writing to stdout using the `-c` flag like so: - -`zstd-adaptive -c FILE | ssh "cat - > tmp.zst"` - -Several options described below can be used to control the behavior of `zstd-adaptive`. More specifically, using the `-l#` and `-u#` flags will will set upper and lower bounds so that the compression level will always be within that range. The `-i#` flag can also be used to change the initial compression level. If an initial compression level is not provided, the initial compression level will be chosen such that it is within the appropriate range (it becomes equal to the lower bound). - -### Options -`-oFILE` : write output to `FILE` - -`-i#` : provide initial compression level (must within the appropriate bounds) - -`-h` : display help/information - -`-f` : force the compression level to stay constant - -`-c` : force write to `stdout` - -`-p` : hide progress bar - -`-q` : quiet mode -- do not show progress bar or other information - -`-l#` : set a lower bound on the compression level (default is 1) - -`-u#` : set an upper bound on the compression level (default is 22) -### Benchmarking / Test results -#### Artificial Tests -These artificial tests were run by using the `pv` command line utility in order to limit pipe speeds (25 MB/s read and 5 MB/s write limits were chosen to mimic severe throughput constraints). A 40 GB backup file was sent through a pipeline, compressed, and written out to a file. Compression time, size, and ratio were computed. Data for `zstd -15` was excluded from these tests because the test runs quite long. - - - - -
25 MB/s read limit
- -| Compressor Name | Ratio | Compressed Size | Compression Time | -|:----------------|------:|----------------:|-----------------:| -| zstd -3 | 2.108 | 20.718 GB | 29m 48.530s | -| zstd-adaptive | 2.230 | 19.581 GB | 29m 48.798s | - -
- - - - -
5 MB/s write limit
- -| Compressor Name | Ratio | Compressed Size | Compression Time | -|:----------------|------:|----------------:|-----------------:| -| zstd -3 | 2.108 | 20.718 GB | 1h 10m 43.076s | -| zstd-adaptive | 2.249 | 19.412 GB | 1h 06m 15.577s | - -
- -The commands used for this test generally followed the form: - -`cat FILE | pv -L 25m -q | COMPRESSION | pv -q > tmp.zst # impose 25 MB/s read limit` - -`cat FILE | pv -q | COMPRESSION | pv -L 5m -q > tmp.zst # impose 5 MB/s write limit` - -#### SSH Tests - -The following tests were performed by piping a relatively large backup file (approximately 80 GB) through compression and over SSH to be stored on a server. The test data includes statistics for time and compressed size on `zstd` at several compression levels, as well as `zstd-adaptive`. The data highlights the potential advantages that `zstd-adaptive` has over using a low static compression level and the negative imapcts that using an excessively high static compression level can have on -pipe throughput. - -| Compressor Name | Ratio | Compressed Size | Compression Time | -|:----------------|------:|----------------:|-----------------:| -| zstd -3 | 2.212 | 32.426 GB | 1h 17m 59.756s | -| zstd -15 | 2.374 | 30.213 GB | 2h 56m 59.441s | -| zstd-adaptive | 2.315 | 30.993 GB | 1h 18m 52.860s | - -The commands used for this test generally followed the form: - -`cat FILE | COMPRESSION | ssh dev "cat - > tmp.zst"` diff --git a/contrib/adaptive-compression/adapt.c b/contrib/adaptive-compression/adapt.c deleted file mode 100644 index 8fb4047e996..00000000000 --- a/contrib/adaptive-compression/adapt.c +++ /dev/null @@ -1,1129 +0,0 @@ -/* - * Copyright (c) 2017-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - */ - -#include /* fprintf */ -#include /* malloc, free */ -#include /* pthread functions */ -#include /* memset */ -#include "zstd_internal.h" -#include "util.h" -#include "timefn.h" /* UTIL_time_t, UTIL_getTime, UTIL_getSpanTimeMicro */ - -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define PRINT(...) fprintf(stdout, __VA_ARGS__) -#define DEBUG(l, ...) { if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } } -#define FILE_CHUNK_SIZE 4 << 20 -#define MAX_NUM_JOBS 2 -#define stdinmark "/*stdin*\\" -#define stdoutmark "/*stdout*\\" -#define MAX_PATH 256 -#define DEFAULT_DISPLAY_LEVEL 1 -#define DEFAULT_COMPRESSION_LEVEL 6 -#define MAX_COMPRESSION_LEVEL_CHANGE 2 -#define CONVERGENCE_LOWER_BOUND 5 -#define CLEVEL_DECREASE_COOLDOWN 5 -#define CHANGE_BY_TWO_THRESHOLD 0.1 -#define CHANGE_BY_ONE_THRESHOLD 0.65 - -#ifndef DEBUG_MODE -static int g_displayLevel = DEFAULT_DISPLAY_LEVEL; -#else -static int g_displayLevel = DEBUG_MODE; -#endif - -static unsigned g_compressionLevel = DEFAULT_COMPRESSION_LEVEL; -static UTIL_time_t g_startTime; -static size_t g_streamedSize = 0; -static unsigned g_useProgressBar = 1; -static unsigned g_forceCompressionLevel = 0; -static unsigned g_minCLevel = 1; -static unsigned g_maxCLevel; - -typedef struct { - void* start; - size_t size; - size_t capacity; -} buffer_t; - -typedef struct { - size_t filled; - buffer_t buffer; -} inBuff_t; - -typedef struct { - buffer_t src; - buffer_t dst; - unsigned jobID; - unsigned lastJobPlusOne; - size_t compressedSize; - size_t dictSize; -} jobDescription; - -typedef struct { - pthread_mutex_t pMutex; - int noError; -} mutex_t; - -typedef struct { - pthread_cond_t pCond; - int noError; -} cond_t; - -typedef struct { - unsigned compressionLevel; - unsigned numJobs; - unsigned nextJobID; - unsigned threadError; - - /* - * JobIDs for the next jobs to be created, compressed, and written - */ - unsigned jobReadyID; - unsigned jobCompressedID; - unsigned jobWriteID; - unsigned allJobsCompleted; - - /* - * counter for how many jobs in a row the compression level has not changed - * if the counter becomes >= CONVERGENCE_LOWER_BOUND, the next time the - * compression level tries to change (by non-zero amount) resets the counter - * to 1 and does not apply the change - */ - unsigned convergenceCounter; - - /* - * cooldown counter in order to prevent rapid successive decreases in compression level - * whenever compression level is decreased, cooldown is set to CLEVEL_DECREASE_COOLDOWN - * whenever adaptCompressionLevel() is called and cooldown != 0, it is decremented - * as long as cooldown != 0, the compression level cannot be decreased - */ - unsigned cooldown; - - /* - * XWaitYCompletion - * Range from 0.0 to 1.0 - * if the value is not 1.0, then this implies that thread X waited on thread Y to finish - * and thread Y was XWaitYCompletion finished at the time of the wait (i.e. compressWaitWriteCompletion=0.5 - * implies that the compression thread waited on the write thread and it was only 50% finished writing a job) - */ - double createWaitCompressionCompletion; - double compressWaitCreateCompletion; - double compressWaitWriteCompletion; - double writeWaitCompressionCompletion; - - /* - * Completion values - * Range from 0.0 to 1.0 - * Jobs are divided into mini-chunks in order to measure completion - * these values are updated each time a thread finishes its operation on the - * mini-chunk (i.e. finishes writing out, compressing, etc. this mini-chunk). - */ - double compressionCompletion; - double writeCompletion; - double createCompletion; - - mutex_t jobCompressed_mutex; - cond_t jobCompressed_cond; - mutex_t jobReady_mutex; - cond_t jobReady_cond; - mutex_t allJobsCompleted_mutex; - cond_t allJobsCompleted_cond; - mutex_t jobWrite_mutex; - cond_t jobWrite_cond; - mutex_t compressionCompletion_mutex; - mutex_t createCompletion_mutex; - mutex_t writeCompletion_mutex; - mutex_t compressionLevel_mutex; - size_t lastDictSize; - inBuff_t input; - jobDescription* jobs; - ZSTD_CCtx* cctx; -} adaptCCtx; - -typedef struct { - adaptCCtx* ctx; - FILE* dstFile; -} outputThreadArg; - -typedef struct { - FILE* srcFile; - adaptCCtx* ctx; - outputThreadArg* otArg; -} fcResources; - -static void freeCompressionJobs(adaptCCtx* ctx) -{ - unsigned u; - for (u=0; unumJobs; u++) { - jobDescription job = ctx->jobs[u]; - free(job.dst.start); - free(job.src.start); - } -} - -static int destroyMutex(mutex_t* mutex) -{ - if (mutex->noError) { - int const ret = pthread_mutex_destroy(&mutex->pMutex); - return ret; - } - return 0; -} - -static int destroyCond(cond_t* cond) -{ - if (cond->noError) { - int const ret = pthread_cond_destroy(&cond->pCond); - return ret; - } - return 0; -} - -static int freeCCtx(adaptCCtx* ctx) -{ - if (!ctx) return 0; - { - int error = 0; - error |= destroyMutex(&ctx->jobCompressed_mutex); - error |= destroyCond(&ctx->jobCompressed_cond); - error |= destroyMutex(&ctx->jobReady_mutex); - error |= destroyCond(&ctx->jobReady_cond); - error |= destroyMutex(&ctx->allJobsCompleted_mutex); - error |= destroyCond(&ctx->allJobsCompleted_cond); - error |= destroyMutex(&ctx->jobWrite_mutex); - error |= destroyCond(&ctx->jobWrite_cond); - error |= destroyMutex(&ctx->compressionCompletion_mutex); - error |= destroyMutex(&ctx->createCompletion_mutex); - error |= destroyMutex(&ctx->writeCompletion_mutex); - error |= destroyMutex(&ctx->compressionLevel_mutex); - error |= ZSTD_isError(ZSTD_freeCCtx(ctx->cctx)); - free(ctx->input.buffer.start); - if (ctx->jobs){ - freeCompressionJobs(ctx); - free(ctx->jobs); - } - free(ctx); - return error; - } -} - -static int initMutex(mutex_t* mutex) -{ - int const ret = pthread_mutex_init(&mutex->pMutex, NULL); - mutex->noError = !ret; - return ret; -} - -static int initCond(cond_t* cond) -{ - int const ret = pthread_cond_init(&cond->pCond, NULL); - cond->noError = !ret; - return ret; -} - -static int initCCtx(adaptCCtx* ctx, unsigned numJobs) -{ - ctx->compressionLevel = g_compressionLevel; - { - int pthreadError = 0; - pthreadError |= initMutex(&ctx->jobCompressed_mutex); - pthreadError |= initCond(&ctx->jobCompressed_cond); - pthreadError |= initMutex(&ctx->jobReady_mutex); - pthreadError |= initCond(&ctx->jobReady_cond); - pthreadError |= initMutex(&ctx->allJobsCompleted_mutex); - pthreadError |= initCond(&ctx->allJobsCompleted_cond); - pthreadError |= initMutex(&ctx->jobWrite_mutex); - pthreadError |= initCond(&ctx->jobWrite_cond); - pthreadError |= initMutex(&ctx->compressionCompletion_mutex); - pthreadError |= initMutex(&ctx->createCompletion_mutex); - pthreadError |= initMutex(&ctx->writeCompletion_mutex); - pthreadError |= initMutex(&ctx->compressionLevel_mutex); - if (pthreadError) return pthreadError; - } - ctx->numJobs = numJobs; - ctx->jobReadyID = 0; - ctx->jobCompressedID = 0; - ctx->jobWriteID = 0; - ctx->lastDictSize = 0; - - - ctx->createWaitCompressionCompletion = 1; - ctx->compressWaitCreateCompletion = 1; - ctx->compressWaitWriteCompletion = 1; - ctx->writeWaitCompressionCompletion = 1; - ctx->createCompletion = 1; - ctx->writeCompletion = 1; - ctx->compressionCompletion = 1; - ctx->convergenceCounter = 0; - ctx->cooldown = 0; - - ctx->jobs = calloc(1, numJobs*sizeof(jobDescription)); - - if (!ctx->jobs) { - DISPLAY("Error: could not allocate space for jobs during context creation\n"); - return 1; - } - - /* initializing jobs */ - { - unsigned jobNum; - for (jobNum=0; jobNumjobs[jobNum]; - job->src.start = malloc(2 * FILE_CHUNK_SIZE); - job->dst.start = malloc(ZSTD_compressBound(FILE_CHUNK_SIZE)); - job->lastJobPlusOne = 0; - if (!job->src.start || !job->dst.start) { - DISPLAY("Could not allocate buffers for jobs\n"); - return 1; - } - job->src.capacity = FILE_CHUNK_SIZE; - job->dst.capacity = ZSTD_compressBound(FILE_CHUNK_SIZE); - } - } - - ctx->nextJobID = 0; - ctx->threadError = 0; - ctx->allJobsCompleted = 0; - - ctx->cctx = ZSTD_createCCtx(); - if (!ctx->cctx) { - DISPLAY("Error: could not allocate ZSTD_CCtx\n"); - return 1; - } - - ctx->input.filled = 0; - ctx->input.buffer.capacity = 2 * FILE_CHUNK_SIZE; - - ctx->input.buffer.start = malloc(ctx->input.buffer.capacity); - if (!ctx->input.buffer.start) { - DISPLAY("Error: could not allocate input buffer\n"); - return 1; - } - return 0; -} - -static adaptCCtx* createCCtx(unsigned numJobs) -{ - - adaptCCtx* const ctx = calloc(1, sizeof(adaptCCtx)); - if (ctx == NULL) { - DISPLAY("Error: could not allocate space for context\n"); - return NULL; - } - { - int const error = initCCtx(ctx, numJobs); - if (error) { - freeCCtx(ctx); - return NULL; - } - return ctx; - } -} - -static void signalErrorToThreads(adaptCCtx* ctx) -{ - ctx->threadError = 1; - pthread_mutex_lock(&ctx->jobReady_mutex.pMutex); - pthread_cond_signal(&ctx->jobReady_cond.pCond); - pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex); - - pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); - pthread_cond_broadcast(&ctx->jobCompressed_cond.pCond); - pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex); - - pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex); - pthread_cond_signal(&ctx->jobWrite_cond.pCond); - pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex); - - pthread_mutex_lock(&ctx->allJobsCompleted_mutex.pMutex); - pthread_cond_signal(&ctx->allJobsCompleted_cond.pCond); - pthread_mutex_unlock(&ctx->allJobsCompleted_mutex.pMutex); -} - -static void waitUntilAllJobsCompleted(adaptCCtx* ctx) -{ - if (!ctx) return; - pthread_mutex_lock(&ctx->allJobsCompleted_mutex.pMutex); - while (ctx->allJobsCompleted == 0 && !ctx->threadError) { - pthread_cond_wait(&ctx->allJobsCompleted_cond.pCond, &ctx->allJobsCompleted_mutex.pMutex); - } - pthread_mutex_unlock(&ctx->allJobsCompleted_mutex.pMutex); -} - -/* map completion percentages to values for changing compression level */ -static unsigned convertCompletionToChange(double completion) -{ - if (completion < CHANGE_BY_TWO_THRESHOLD) { - return 2; - } - else if (completion < CHANGE_BY_ONE_THRESHOLD) { - return 1; - } - else { - return 0; - } -} - -/* - * Compression level is changed depending on which part of the compression process is lagging - * Currently, three theads exist for job creation, compression, and file writing respectively. - * adaptCompressionLevel() increments or decrements compression level based on which of the threads is lagging - * job creation or file writing lag => increased compression level - * compression thread lag => decreased compression level - * detecting which thread is lagging is done by keeping track of how many calls each thread makes to pthread_cond_wait - */ -static void adaptCompressionLevel(adaptCCtx* ctx) -{ - double createWaitCompressionCompletion; - double compressWaitCreateCompletion; - double compressWaitWriteCompletion; - double writeWaitCompressionCompletion; - double const threshold = 0.00001; - unsigned prevCompressionLevel; - - pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex); - prevCompressionLevel = ctx->compressionLevel; - pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex); - - - if (g_forceCompressionLevel) { - pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex); - ctx->compressionLevel = g_compressionLevel; - pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex); - return; - } - - - DEBUG(2, "adapting compression level %u\n", prevCompressionLevel); - - /* read and reset completion measurements */ - pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex); - DEBUG(2, "createWaitCompressionCompletion %f\n", ctx->createWaitCompressionCompletion); - DEBUG(2, "writeWaitCompressionCompletion %f\n", ctx->writeWaitCompressionCompletion); - createWaitCompressionCompletion = ctx->createWaitCompressionCompletion; - writeWaitCompressionCompletion = ctx->writeWaitCompressionCompletion; - pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex); - - pthread_mutex_lock(&ctx->writeCompletion_mutex.pMutex); - DEBUG(2, "compressWaitWriteCompletion %f\n", ctx->compressWaitWriteCompletion); - compressWaitWriteCompletion = ctx->compressWaitWriteCompletion; - pthread_mutex_unlock(&ctx->writeCompletion_mutex.pMutex); - - pthread_mutex_lock(&ctx->createCompletion_mutex.pMutex); - DEBUG(2, "compressWaitCreateCompletion %f\n", ctx->compressWaitCreateCompletion); - compressWaitCreateCompletion = ctx->compressWaitCreateCompletion; - pthread_mutex_unlock(&ctx->createCompletion_mutex.pMutex); - DEBUG(2, "convergence counter: %u\n", ctx->convergenceCounter); - - assert(g_minCLevel <= prevCompressionLevel && g_maxCLevel >= prevCompressionLevel); - - /* adaptation logic */ - if (ctx->cooldown) ctx->cooldown--; - - if ((1-createWaitCompressionCompletion > threshold || 1-writeWaitCompressionCompletion > threshold) && ctx->cooldown == 0) { - /* create or write waiting on compression */ - /* use whichever one waited less because it was slower */ - double const completion = MAX(createWaitCompressionCompletion, writeWaitCompressionCompletion); - unsigned const change = convertCompletionToChange(completion); - unsigned const boundChange = MIN(change, prevCompressionLevel - g_minCLevel); - if (ctx->convergenceCounter >= CONVERGENCE_LOWER_BOUND && boundChange != 0) { - /* reset convergence counter, might have been a spike */ - ctx->convergenceCounter = 0; - DEBUG(2, "convergence counter reset, no change applied\n"); - } - else if (boundChange != 0) { - pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex); - ctx->compressionLevel -= boundChange; - pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex); - ctx->cooldown = CLEVEL_DECREASE_COOLDOWN; - ctx->convergenceCounter = 1; - - DEBUG(2, "create or write threads waiting on compression, tried to decrease compression level by %u\n\n", boundChange); - } - } - else if (1-compressWaitWriteCompletion > threshold || 1-compressWaitCreateCompletion > threshold) { - /* compress waiting on write */ - double const completion = MIN(compressWaitWriteCompletion, compressWaitCreateCompletion); - unsigned const change = convertCompletionToChange(completion); - unsigned const boundChange = MIN(change, g_maxCLevel - prevCompressionLevel); - if (ctx->convergenceCounter >= CONVERGENCE_LOWER_BOUND && boundChange != 0) { - /* reset convergence counter, might have been a spike */ - ctx->convergenceCounter = 0; - DEBUG(2, "convergence counter reset, no change applied\n"); - } - else if (boundChange != 0) { - pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex); - ctx->compressionLevel += boundChange; - pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex); - ctx->cooldown = 0; - ctx->convergenceCounter = 1; - - DEBUG(2, "compress waiting on write or create, tried to increase compression level by %u\n\n", boundChange); - } - - } - - pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex); - if (ctx->compressionLevel == prevCompressionLevel) { - ctx->convergenceCounter++; - } - pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex); -} - -static size_t getUseableDictSize(unsigned compressionLevel) -{ - ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0); - unsigned const overlapLog = compressionLevel >= (unsigned)ZSTD_maxCLevel() ? 0 : 3; - size_t const overlapSize = 1 << (params.cParams.windowLog - overlapLog); - return overlapSize; -} - -static void* compressionThread(void* arg) -{ - adaptCCtx* const ctx = (adaptCCtx*)arg; - unsigned currJob = 0; - for ( ; ; ) { - unsigned const currJobIndex = currJob % ctx->numJobs; - jobDescription* const job = &ctx->jobs[currJobIndex]; - DEBUG(2, "starting compression for job %u\n", currJob); - - { - /* check if compression thread will have to wait */ - unsigned willWaitForCreate = 0; - unsigned willWaitForWrite = 0; - - pthread_mutex_lock(&ctx->jobReady_mutex.pMutex); - if (currJob + 1 > ctx->jobReadyID) willWaitForCreate = 1; - pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex); - - pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex); - if (currJob - ctx->jobWriteID >= ctx->numJobs) willWaitForWrite = 1; - pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex); - - - pthread_mutex_lock(&ctx->createCompletion_mutex.pMutex); - if (willWaitForCreate) { - DEBUG(2, "compression will wait for create on job %u\n", currJob); - ctx->compressWaitCreateCompletion = ctx->createCompletion; - DEBUG(2, "create completion %f\n", ctx->compressWaitCreateCompletion); - - } - else { - ctx->compressWaitCreateCompletion = 1; - } - pthread_mutex_unlock(&ctx->createCompletion_mutex.pMutex); - - pthread_mutex_lock(&ctx->writeCompletion_mutex.pMutex); - if (willWaitForWrite) { - DEBUG(2, "compression will wait for write on job %u\n", currJob); - ctx->compressWaitWriteCompletion = ctx->writeCompletion; - DEBUG(2, "write completion %f\n", ctx->compressWaitWriteCompletion); - } - else { - ctx->compressWaitWriteCompletion = 1; - } - pthread_mutex_unlock(&ctx->writeCompletion_mutex.pMutex); - - } - - /* wait until job is ready */ - pthread_mutex_lock(&ctx->jobReady_mutex.pMutex); - while (currJob + 1 > ctx->jobReadyID && !ctx->threadError) { - pthread_cond_wait(&ctx->jobReady_cond.pCond, &ctx->jobReady_mutex.pMutex); - } - pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex); - - /* wait until job previously in this space is written */ - pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex); - while (currJob - ctx->jobWriteID >= ctx->numJobs && !ctx->threadError) { - pthread_cond_wait(&ctx->jobWrite_cond.pCond, &ctx->jobWrite_mutex.pMutex); - } - pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex); - /* reset compression completion */ - pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex); - ctx->compressionCompletion = 0; - pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex); - - /* adapt compression level */ - if (currJob) adaptCompressionLevel(ctx); - - pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex); - DEBUG(2, "job %u compressed with level %u\n", currJob, ctx->compressionLevel); - pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex); - - /* compress the data */ - { - size_t const compressionBlockSize = ZSTD_BLOCKSIZE_MAX; /* 128 KB */ - unsigned cLevel; - unsigned blockNum = 0; - size_t remaining = job->src.size; - size_t srcPos = 0; - size_t dstPos = 0; - - pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex); - cLevel = ctx->compressionLevel; - pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex); - - /* reset compressed size */ - job->compressedSize = 0; - DEBUG(2, "calling ZSTD_compressBegin()\n"); - /* begin compression */ - { - size_t const useDictSize = MIN(getUseableDictSize(cLevel), job->dictSize); - ZSTD_parameters params = ZSTD_getParams(cLevel, 0, useDictSize); - params.cParams.windowLog = 23; - { - size_t const initError = ZSTD_compressBegin_advanced(ctx->cctx, job->src.start + job->dictSize - useDictSize, useDictSize, params, 0); - size_t const windowSizeError = ZSTD_CCtx_setParameter(ctx->cctx, ZSTD_c_forceMaxWindow, 1); - if (ZSTD_isError(initError) || ZSTD_isError(windowSizeError)) { - DISPLAY("Error: something went wrong while starting compression\n"); - signalErrorToThreads(ctx); - return arg; - } - } - } - DEBUG(2, "finished with ZSTD_compressBegin()\n"); - - do { - size_t const actualBlockSize = MIN(remaining, compressionBlockSize); - - /* continue compression */ - if (currJob != 0 || blockNum != 0) { /* not first block of first job flush/overwrite the frame header */ - size_t const hSize = ZSTD_compressContinue(ctx->cctx, job->dst.start + dstPos, job->dst.capacity - dstPos, job->src.start + job->dictSize + srcPos, 0); - if (ZSTD_isError(hSize)) { - DISPLAY("Error: something went wrong while continuing compression\n"); - job->compressedSize = hSize; - signalErrorToThreads(ctx); - return arg; - } - ZSTD_invalidateRepCodes(ctx->cctx); - } - { - size_t const ret = (job->lastJobPlusOne == currJob + 1 && remaining == actualBlockSize) ? - ZSTD_compressEnd (ctx->cctx, job->dst.start + dstPos, job->dst.capacity - dstPos, job->src.start + job->dictSize + srcPos, actualBlockSize) : - ZSTD_compressContinue(ctx->cctx, job->dst.start + dstPos, job->dst.capacity - dstPos, job->src.start + job->dictSize + srcPos, actualBlockSize); - if (ZSTD_isError(ret)) { - DISPLAY("Error: something went wrong during compression: %s\n", ZSTD_getErrorName(ret)); - signalErrorToThreads(ctx); - return arg; - } - job->compressedSize += ret; - remaining -= actualBlockSize; - srcPos += actualBlockSize; - dstPos += ret; - blockNum++; - - /* update completion */ - pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex); - ctx->compressionCompletion = 1 - (double)remaining/job->src.size; - pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex); - } - } while (remaining != 0); - job->dst.size = job->compressedSize; - } - pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); - ctx->jobCompressedID++; - pthread_cond_broadcast(&ctx->jobCompressed_cond.pCond); - pthread_mutex_unlock(&ctx->jobCompressed_mutex.pMutex); - if (job->lastJobPlusOne == currJob + 1 || ctx->threadError) { - /* finished compressing all jobs */ - break; - } - DEBUG(2, "finished compressing job %u\n", currJob); - currJob++; - } - return arg; -} - -static void displayProgress(unsigned cLevel, unsigned last) -{ - UTIL_time_t currTime = UTIL_getTime(); - if (!g_useProgressBar) return; - { double const timeElapsed = (double)(UTIL_getSpanTimeMicro(g_startTime, currTime) / 1000.0); - double const sizeMB = (double)g_streamedSize / (1 << 20); - double const avgCompRate = sizeMB * 1000 / timeElapsed; - fprintf(stderr, "\r| Comp. Level: %2u | Time Elapsed: %7.2f s | Data Size: %7.1f MB | Avg Comp. Rate: %6.2f MB/s |", cLevel, timeElapsed/1000.0, sizeMB, avgCompRate); - if (last) { - fprintf(stderr, "\n"); - } else { - fflush(stderr); - } } -} - -static void* outputThread(void* arg) -{ - outputThreadArg* const otArg = (outputThreadArg*)arg; - adaptCCtx* const ctx = otArg->ctx; - FILE* const dstFile = otArg->dstFile; - - unsigned currJob = 0; - for ( ; ; ) { - unsigned const currJobIndex = currJob % ctx->numJobs; - jobDescription* const job = &ctx->jobs[currJobIndex]; - unsigned willWaitForCompress = 0; - DEBUG(2, "starting write for job %u\n", currJob); - - pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); - if (currJob + 1 > ctx->jobCompressedID) willWaitForCompress = 1; - pthread_mutex_unlock(&ctx->jobCompressed_mutex.pMutex); - - - pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex); - if (willWaitForCompress) { - /* write thread is waiting on compression thread */ - ctx->writeWaitCompressionCompletion = ctx->compressionCompletion; - DEBUG(2, "writer thread waiting for nextJob: %u, writeWaitCompressionCompletion %f\n", currJob, ctx->writeWaitCompressionCompletion); - } - else { - ctx->writeWaitCompressionCompletion = 1; - } - pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex); - - pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); - while (currJob + 1 > ctx->jobCompressedID && !ctx->threadError) { - pthread_cond_wait(&ctx->jobCompressed_cond.pCond, &ctx->jobCompressed_mutex.pMutex); - } - pthread_mutex_unlock(&ctx->jobCompressed_mutex.pMutex); - - /* reset write completion */ - pthread_mutex_lock(&ctx->writeCompletion_mutex.pMutex); - ctx->writeCompletion = 0; - pthread_mutex_unlock(&ctx->writeCompletion_mutex.pMutex); - - { - size_t const compressedSize = job->compressedSize; - size_t remaining = compressedSize; - if (ZSTD_isError(compressedSize)) { - DISPLAY("Error: an error occurred during compression\n"); - signalErrorToThreads(ctx); - return arg; - } - { - size_t const blockSize = MAX(compressedSize >> 7, 1 << 10); - size_t pos = 0; - for ( ; ; ) { - size_t const writeSize = MIN(remaining, blockSize); - size_t const ret = fwrite(job->dst.start + pos, 1, writeSize, dstFile); - if (ret != writeSize) break; - pos += ret; - remaining -= ret; - - /* update completion variable for writing */ - pthread_mutex_lock(&ctx->writeCompletion_mutex.pMutex); - ctx->writeCompletion = 1 - (double)remaining/compressedSize; - pthread_mutex_unlock(&ctx->writeCompletion_mutex.pMutex); - - if (remaining == 0) break; - } - if (pos != compressedSize) { - DISPLAY("Error: an error occurred during file write operation\n"); - signalErrorToThreads(ctx); - return arg; - } - } - } - { - unsigned cLevel; - pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex); - cLevel = ctx->compressionLevel; - pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex); - displayProgress(cLevel, job->lastJobPlusOne == currJob + 1); - } - pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex); - ctx->jobWriteID++; - pthread_cond_signal(&ctx->jobWrite_cond.pCond); - pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex); - - if (job->lastJobPlusOne == currJob + 1 || ctx->threadError) { - /* finished with all jobs */ - pthread_mutex_lock(&ctx->allJobsCompleted_mutex.pMutex); - ctx->allJobsCompleted = 1; - pthread_cond_signal(&ctx->allJobsCompleted_cond.pCond); - pthread_mutex_unlock(&ctx->allJobsCompleted_mutex.pMutex); - break; - } - DEBUG(2, "finished writing job %u\n", currJob); - currJob++; - - } - return arg; -} - -static int createCompressionJob(adaptCCtx* ctx, size_t srcSize, int last) -{ - unsigned const nextJob = ctx->nextJobID; - unsigned const nextJobIndex = nextJob % ctx->numJobs; - jobDescription* const job = &ctx->jobs[nextJobIndex]; - - - job->src.size = srcSize; - job->jobID = nextJob; - if (last) job->lastJobPlusOne = nextJob + 1; - { - /* swap buffer */ - void* const copy = job->src.start; - job->src.start = ctx->input.buffer.start; - ctx->input.buffer.start = copy; - } - job->dictSize = ctx->lastDictSize; - - ctx->nextJobID++; - /* if not on the last job, reuse data as dictionary in next job */ - if (!last) { - size_t const oldDictSize = ctx->lastDictSize; - memcpy(ctx->input.buffer.start, job->src.start + oldDictSize, srcSize); - ctx->lastDictSize = srcSize; - ctx->input.filled = srcSize; - } - - /* signal job ready */ - pthread_mutex_lock(&ctx->jobReady_mutex.pMutex); - ctx->jobReadyID++; - pthread_cond_signal(&ctx->jobReady_cond.pCond); - pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex); - - return 0; -} - -static int performCompression(adaptCCtx* ctx, FILE* const srcFile, outputThreadArg* otArg) -{ - /* early error check to exit */ - if (!ctx || !srcFile || !otArg) { - return 1; - } - - /* create output thread */ - { - pthread_t out; - if (pthread_create(&out, NULL, &outputThread, otArg)) { - DISPLAY("Error: could not create output thread\n"); - signalErrorToThreads(ctx); - return 1; - } - else if (pthread_detach(out)) { - DISPLAY("Error: could not detach output thread\n"); - signalErrorToThreads(ctx); - return 1; - } - } - - /* create compression thread */ - { - pthread_t compression; - if (pthread_create(&compression, NULL, &compressionThread, ctx)) { - DISPLAY("Error: could not create compression thread\n"); - signalErrorToThreads(ctx); - return 1; - } - else if (pthread_detach(compression)) { - DISPLAY("Error: could not detach compression thread\n"); - signalErrorToThreads(ctx); - return 1; - } - } - { - unsigned currJob = 0; - /* creating jobs */ - for ( ; ; ) { - size_t pos = 0; - size_t const readBlockSize = 1 << 15; - size_t remaining = FILE_CHUNK_SIZE; - unsigned const nextJob = ctx->nextJobID; - unsigned willWaitForCompress = 0; - DEBUG(2, "starting creation of job %u\n", currJob); - - pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); - if (nextJob - ctx->jobCompressedID >= ctx->numJobs) willWaitForCompress = 1; - pthread_mutex_unlock(&ctx->jobCompressed_mutex.pMutex); - - pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex); - if (willWaitForCompress) { - /* creation thread is waiting, take measurement of completion */ - ctx->createWaitCompressionCompletion = ctx->compressionCompletion; - DEBUG(2, "create thread waiting for nextJob: %u, createWaitCompressionCompletion %f\n", nextJob, ctx->createWaitCompressionCompletion); - } - else { - ctx->createWaitCompressionCompletion = 1; - } - pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex); - - /* wait until the job has been compressed */ - pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); - while (nextJob - ctx->jobCompressedID >= ctx->numJobs && !ctx->threadError) { - pthread_cond_wait(&ctx->jobCompressed_cond.pCond, &ctx->jobCompressed_mutex.pMutex); - } - pthread_mutex_unlock(&ctx->jobCompressed_mutex.pMutex); - - /* reset create completion */ - pthread_mutex_lock(&ctx->createCompletion_mutex.pMutex); - ctx->createCompletion = 0; - pthread_mutex_unlock(&ctx->createCompletion_mutex.pMutex); - - while (remaining != 0 && !feof(srcFile)) { - size_t const ret = fread(ctx->input.buffer.start + ctx->input.filled + pos, 1, readBlockSize, srcFile); - if (ret != readBlockSize && !feof(srcFile)) { - /* error could not read correct number of bytes */ - DISPLAY("Error: problem occurred during read from src file\n"); - signalErrorToThreads(ctx); - return 1; - } - pos += ret; - remaining -= ret; - pthread_mutex_lock(&ctx->createCompletion_mutex.pMutex); - ctx->createCompletion = 1 - (double)remaining/((size_t)FILE_CHUNK_SIZE); - pthread_mutex_unlock(&ctx->createCompletion_mutex.pMutex); - } - if (remaining != 0 && !feof(srcFile)) { - DISPLAY("Error: problem occurred during read from src file\n"); - signalErrorToThreads(ctx); - return 1; - } - g_streamedSize += pos; - /* reading was fine, now create the compression job */ - { - int const last = feof(srcFile); - int const error = createCompressionJob(ctx, pos, last); - if (error != 0) { - signalErrorToThreads(ctx); - return error; - } - } - DEBUG(2, "finished creating job %u\n", currJob); - currJob++; - if (feof(srcFile)) { - break; - } - } - } - /* success -- created all jobs */ - return 0; -} - -static fcResources createFileCompressionResources(const char* const srcFilename, const char* const dstFilenameOrNull) -{ - fcResources fcr; - unsigned const stdinUsed = !strcmp(srcFilename, stdinmark); - FILE* const srcFile = stdinUsed ? stdin : fopen(srcFilename, "rb"); - const char* const outFilenameIntermediate = (stdinUsed && !dstFilenameOrNull) ? stdoutmark : dstFilenameOrNull; - const char* outFilename = outFilenameIntermediate; - char fileAndSuffix[MAX_PATH]; - size_t const numJobs = MAX_NUM_JOBS; - - memset(&fcr, 0, sizeof(fcr)); - - if (!outFilenameIntermediate) { - if (snprintf(fileAndSuffix, MAX_PATH, "%s.zst", srcFilename) + 1 > MAX_PATH) { - DISPLAY("Error: output filename is too long\n"); - return fcr; - } - outFilename = fileAndSuffix; - } - - { - unsigned const stdoutUsed = !strcmp(outFilename, stdoutmark); - FILE* const dstFile = stdoutUsed ? stdout : fopen(outFilename, "wb"); - fcr.otArg = malloc(sizeof(outputThreadArg)); - if (!fcr.otArg) { - DISPLAY("Error: could not allocate space for output thread argument\n"); - return fcr; - } - fcr.otArg->dstFile = dstFile; - } - /* checking for errors */ - if (!fcr.otArg->dstFile || !srcFile) { - DISPLAY("Error: some file(s) could not be opened\n"); - return fcr; - } - - /* creating context */ - fcr.ctx = createCCtx(numJobs); - fcr.otArg->ctx = fcr.ctx; - fcr.srcFile = srcFile; - return fcr; -} - -static int freeFileCompressionResources(fcResources* fcr) -{ - int ret = 0; - waitUntilAllJobsCompleted(fcr->ctx); - ret |= (fcr->srcFile != NULL) ? fclose(fcr->srcFile) : 0; - ret |= (fcr->ctx != NULL) ? freeCCtx(fcr->ctx) : 0; - if (fcr->otArg) { - ret |= (fcr->otArg->dstFile != stdout) ? fclose(fcr->otArg->dstFile) : 0; - free(fcr->otArg); - /* no need to freeCCtx() on otArg->ctx because it should be the same context */ - } - return ret; -} - -static int compressFilename(const char* const srcFilename, const char* const dstFilenameOrNull) -{ - int ret = 0; - fcResources fcr = createFileCompressionResources(srcFilename, dstFilenameOrNull); - g_streamedSize = 0; - ret |= performCompression(fcr.ctx, fcr.srcFile, fcr.otArg); - ret |= freeFileCompressionResources(&fcr); - return ret; -} - -static int compressFilenames(const char** filenameTable, unsigned numFiles, unsigned forceStdout) -{ - int ret = 0; - unsigned fileNum; - for (fileNum=0; fileNum MAX_UINT */ -static unsigned readU32FromChar(const char** stringPtr) -{ - unsigned result = 0; - while ((**stringPtr >='0') && (**stringPtr <='9')) - result *= 10, result += **stringPtr - '0', (*stringPtr)++ ; - if ((**stringPtr=='K') || (**stringPtr=='M')) { - result <<= 10; - if (**stringPtr=='M') result <<= 10; - (*stringPtr)++ ; - if (**stringPtr=='i') (*stringPtr)++; - if (**stringPtr=='B') (*stringPtr)++; - } - return result; -} - -static void help(const char* progPath) -{ - PRINT("Usage:\n"); - PRINT(" %s [options] [file(s)]\n", progPath); - PRINT("\n"); - PRINT("Options:\n"); - PRINT(" -oFILE : specify the output file name\n"); - PRINT(" -i# : provide initial compression level -- default %d, must be in the range [L, U] where L and U are bound values (see below for defaults)\n", DEFAULT_COMPRESSION_LEVEL); - PRINT(" -h : display help/information\n"); - PRINT(" -f : force the compression level to stay constant\n"); - PRINT(" -c : force write to stdout\n"); - PRINT(" -p : hide progress bar\n"); - PRINT(" -q : quiet mode -- do not show progress bar or other information\n"); - PRINT(" -l# : provide lower bound for compression level -- default 1\n"); - PRINT(" -u# : provide upper bound for compression level -- default %u\n", ZSTD_maxCLevel()); -} -/* return 0 if successful, else return error */ -int main(int argCount, const char* argv[]) -{ - const char* outFilename = NULL; - const char** filenameTable = (const char**)malloc(argCount*sizeof(const char*)); - unsigned filenameIdx = 0; - unsigned forceStdout = 0; - unsigned providedInitialCLevel = 0; - int ret = 0; - int argNum; - filenameTable[0] = stdinmark; - g_maxCLevel = ZSTD_maxCLevel(); - - if (filenameTable == NULL) { - DISPLAY("Error: could not allocate sapce for filename table.\n"); - return 1; - } - - for (argNum=1; argNum 1) { - switch (argument[1]) { - case 'o': - argument += 2; - outFilename = argument; - break; - case 'i': - argument += 2; - g_compressionLevel = readU32FromChar(&argument); - providedInitialCLevel = 1; - break; - case 'h': - help(argv[0]); - goto _main_exit; - case 'p': - g_useProgressBar = 0; - break; - case 'c': - forceStdout = 1; - outFilename = stdoutmark; - break; - case 'f': - g_forceCompressionLevel = 1; - break; - case 'q': - g_useProgressBar = 0; - g_displayLevel = 0; - break; - case 'l': - argument += 2; - g_minCLevel = readU32FromChar(&argument); - break; - case 'u': - argument += 2; - g_maxCLevel = readU32FromChar(&argument); - break; - default: - DISPLAY("Error: invalid argument provided\n"); - ret = 1; - goto _main_exit; - } - continue; - } - - /* regular files to be compressed */ - filenameTable[filenameIdx++] = argument; - } - - /* check initial, max, and min compression levels */ - { - unsigned const minMaxInconsistent = g_minCLevel > g_maxCLevel; - unsigned const initialNotInRange = g_minCLevel > g_compressionLevel || g_maxCLevel < g_compressionLevel; - if (minMaxInconsistent || (initialNotInRange && providedInitialCLevel)) { - DISPLAY("Error: provided compression level parameters are invalid\n"); - ret = 1; - goto _main_exit; - } - else if (initialNotInRange) { - g_compressionLevel = g_minCLevel; - } - } - - /* error checking with number of files */ - if (filenameIdx > 1 && (outFilename != NULL && strcmp(outFilename, stdoutmark))) { - DISPLAY("Error: multiple input files provided, cannot use specified output file\n"); - ret = 1; - goto _main_exit; - } - - /* compress files */ - if (filenameIdx <= 1) { - ret |= compressFilename(filenameTable[0], outFilename); - } - else { - ret |= compressFilenames(filenameTable, filenameIdx, forceStdout); - } -_main_exit: - free(filenameTable); - return ret; -} diff --git a/contrib/adaptive-compression/datagencli.c b/contrib/adaptive-compression/datagencli.c deleted file mode 100644 index 5f3c9314049..00000000000 --- a/contrib/adaptive-compression/datagencli.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - */ - - -/*-************************************ -* Dependencies -**************************************/ -#include "util.h" /* Compiler options */ -#include /* fprintf, stderr */ -#include "datagen.h" /* RDG_generate */ - - -/*-************************************ -* Constants -**************************************/ -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) - -#define SIZE_DEFAULT ((64 KB) + 1) -#define SEED_DEFAULT 0 -#define COMPRESSIBILITY_DEFAULT 50 - - -/*-************************************ -* Macros -**************************************/ -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } -static unsigned displayLevel = 2; - - -/*-******************************************************* -* Command line -*********************************************************/ -static int usage(const char* programName) -{ - DISPLAY( "Compressible data generator\n"); - DISPLAY( "Usage :\n"); - DISPLAY( " %s [args]\n", programName); - DISPLAY( "\n"); - DISPLAY( "Arguments :\n"); - DISPLAY( " -g# : generate # data (default:%i)\n", SIZE_DEFAULT); - DISPLAY( " -s# : Select seed (default:%i)\n", SEED_DEFAULT); - DISPLAY( " -P# : Select compressibility in %% (default:%i%%)\n", - COMPRESSIBILITY_DEFAULT); - DISPLAY( " -h : display help and exit\n"); - return 0; -} - - -int main(int argc, const char** argv) -{ - unsigned probaU32 = COMPRESSIBILITY_DEFAULT; - double litProba = 0.0; - U64 size = SIZE_DEFAULT; - U32 seed = SEED_DEFAULT; - const char* const programName = argv[0]; - - int argNb; - for(argNb=1; argNb='0') && (*argument<='9')) - size *= 10, size += *argument++ - '0'; - if (*argument=='K') { size <<= 10; argument++; } - if (*argument=='M') { size <<= 20; argument++; } - if (*argument=='G') { size <<= 30; argument++; } - if (*argument=='B') { argument++; } - break; - case 's': - argument++; - seed=0; - while ((*argument>='0') && (*argument<='9')) - seed *= 10, seed += *argument++ - '0'; - break; - case 'P': - argument++; - probaU32 = 0; - while ((*argument>='0') && (*argument<='9')) - probaU32 *= 10, probaU32 += *argument++ - '0'; - if (probaU32>100) probaU32 = 100; - break; - case 'L': /* hidden argument : Literal distribution probability */ - argument++; - litProba=0.; - while ((*argument>='0') && (*argument<='9')) - litProba *= 10, litProba += *argument++ - '0'; - if (litProba>100.) litProba=100.; - litProba /= 100.; - break; - case 'v': - displayLevel = 4; - argument++; - break; - default: - return usage(programName); - } - } } } /* for(argNb=1; argNb tmp -./adapt -otmp.zst tmp -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s2 -g500MB > tmp -./adapt -otmp.zst tmp -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s3 -g250MB > tmp -./adapt -otmp.zst tmp -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s4 -g125MB > tmp -./adapt -otmp.zst tmp -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s5 -g50MB > tmp -./adapt -otmp.zst tmp -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s6 -g25MB > tmp -./adapt -otmp.zst tmp -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s7 -g10MB > tmp -./adapt -otmp.zst tmp -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s8 -g5MB > tmp -./adapt -otmp.zst tmp -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s9 -g500KB > tmp -./adapt -otmp.zst tmp -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -echo -e "\ncorrectness tests -- streaming" -./datagen -s10 -g1GB > tmp -cat tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s11 -g100MB > tmp -cat tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s12 -g10MB > tmp -cat tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s13 -g1MB > tmp -cat tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s14 -g100KB > tmp -cat tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s15 -g10KB > tmp -cat tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -echo -e "\ncorrectness tests -- read limit" -./datagen -s16 -g1GB > tmp -pv -L 50m -q tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s17 -g100MB > tmp -pv -L 50m -q tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s18 -g10MB > tmp -pv -L 50m -q tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s19 -g1MB > tmp -pv -L 50m -q tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s20 -g100KB > tmp -pv -L 50m -q tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s21 -g10KB > tmp -pv -L 50m -q tmp | ./adapt > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -echo -e "\ncorrectness tests -- write limit" -./datagen -s22 -g1GB > tmp -pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s23 -g100MB > tmp -pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s24 -g10MB > tmp -pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s25 -g1MB > tmp -pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s26 -g100KB > tmp -pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s27 -g10KB > tmp -pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -echo -e "\ncorrectness tests -- read and write limits" -./datagen -s28 -g1GB > tmp -pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s29 -g100MB > tmp -pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s30 -g10MB > tmp -pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s31 -g1MB > tmp -pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s32 -g100KB > tmp -pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s33 -g10KB > tmp -pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -echo -e "\ncorrectness tests -- forced compression level" -./datagen -s34 -g1GB > tmp -./adapt tmp -otmp.zst -i11 -f -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s35 -g100MB > tmp -./adapt tmp -otmp.zst -i11 -f -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s36 -g10MB > tmp -./adapt tmp -otmp.zst -i11 -f -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s37 -g1MB > tmp -./adapt tmp -otmp.zst -i11 -f -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s38 -g100KB > tmp -./adapt tmp -otmp.zst -i11 -f -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -./datagen -s39 -g10KB > tmp -./adapt tmp -otmp.zst -i11 -f -zstd -d tmp.zst -o tmp2 -diff -s -q tmp tmp2 -rm tmp* - -echo -e "\ncorrectness tests -- window size test" -./datagen -s39 -g1GB | pv -L 25m -q | ./adapt -i1 | pv -q > tmp.zst -zstd -d tmp.zst -rm tmp* - -echo -e "\ncorrectness tests -- testing bounds" -./datagen -s40 -g1GB | pv -L 25m -q | ./adapt -i1 -u4 | pv -q > tmp.zst -rm tmp* - -./datagen -s41 -g1GB | ./adapt -i14 -l4 > tmp.zst -rm tmp* -make clean diff --git a/contrib/adaptive-compression/test-performance.sh b/contrib/adaptive-compression/test-performance.sh deleted file mode 100755 index 958cb3cc8cb..00000000000 --- a/contrib/adaptive-compression/test-performance.sh +++ /dev/null @@ -1,59 +0,0 @@ -echo "testing time -- no limits set" -./datagen -s1 -g1GB > tmp -time ./adapt -otmp1.zst tmp -time zstd -1 -o tmp2.zst tmp -rm tmp* - -./datagen -s2 -g2GB > tmp -time ./adapt -otmp1.zst tmp -time zstd -1 -o tmp2.zst tmp -rm tmp* - -./datagen -s3 -g4GB > tmp -time ./adapt -otmp1.zst tmp -time zstd -1 -o tmp2.zst tmp -rm tmp* - -echo -e "\ntesting compression ratio -- no limits set" -./datagen -s4 -g1GB > tmp -time ./adapt -otmp1.zst tmp -time zstd -1 -o tmp2.zst tmp -ls -l tmp1.zst tmp2.zst -rm tmp* - -./datagen -s5 -g2GB > tmp -time ./adapt -otmp1.zst tmp -time zstd -1 -o tmp2.zst tmp -ls -l tmp1.zst tmp2.zst -rm tmp* - -./datagen -s6 -g4GB > tmp -time ./adapt -otmp1.zst tmp -time zstd -1 -o tmp2.zst tmp -ls -l tmp1.zst tmp2.zst -rm tmp* - -echo e "\ntesting performance at various compression levels -- no limits set" -./datagen -s7 -g1GB > tmp -echo "adapt" -time ./adapt -i5 -f tmp -otmp1.zst -echo "zstdcli" -time zstd -5 tmp -o tmp2.zst -ls -l tmp1.zst tmp2.zst -rm tmp* - -./datagen -s8 -g1GB > tmp -echo "adapt" -time ./adapt -i10 -f tmp -otmp1.zst -echo "zstdcli" -time zstd -10 tmp -o tmp2.zst -ls -l tmp1.zst tmp2.zst -rm tmp* - -./datagen -s9 -g1GB > tmp -echo "adapt" -time ./adapt -i15 -f tmp -otmp1.zst -echo "zstdcli" -time zstd -15 tmp -o tmp2.zst -ls -l tmp1.zst tmp2.zst -rm tmp* diff --git a/contrib/diagnose_corruption/.gitignore b/contrib/diagnose_corruption/.gitignore new file mode 100644 index 00000000000..a8e92b69b81 --- /dev/null +++ b/contrib/diagnose_corruption/.gitignore @@ -0,0 +1 @@ +check_flipped_bits diff --git a/contrib/diagnose_corruption/Makefile b/contrib/diagnose_corruption/Makefile new file mode 100644 index 00000000000..ecc9e63952c --- /dev/null +++ b/contrib/diagnose_corruption/Makefile @@ -0,0 +1,35 @@ +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ + +.PHONY: all +all: check_flipped_bits + +ZSTDLIBDIR ?= ../../lib + +CFLAGS ?= -O3 +CFLAGS += -I$(ZSTDLIBDIR) -I$(ZSTDLIBDIR)/common -I$(ZSTDLIBDIR)/compress \ + -I$(ZSTDLIBDIR)/decompress +CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ + -Wstrict-prototypes -Wundef \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls -Wmissing-prototypes +CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) +FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) + +.PHONY: $(ZSTDLIBDIR)/libzstd.a +$(ZSTDLIBDIR)/libzstd.a: + $(MAKE) -C $(ZSTDLIBDIR) libzstd.a + +check_flipped_bits: check_flipped_bits.c $(ZSTDLIBDIR)/libzstd.a + $(CC) $(FLAGS) $< -o $@$(EXT) $(ZSTDLIBDIR)/libzstd.a + +.PHONY: clean +clean: + rm -f check_flipped_bits diff --git a/contrib/diagnose_corruption/check_flipped_bits.c b/contrib/diagnose_corruption/check_flipped_bits.c new file mode 100644 index 00000000000..09ddd467476 --- /dev/null +++ b/contrib/diagnose_corruption/check_flipped_bits.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#include "zstd_errors.h" + +#include +#include +#include +#include +#include +#include + +typedef struct { + char *input; + size_t input_size; + + char *perturbed; /* same size as input */ + + char *output; + size_t output_size; + + const char *dict_file_name; + const char *dict_file_dir_name; + int32_t dict_id; + char *dict; + size_t dict_size; + ZSTD_DDict* ddict; + + ZSTD_DCtx* dctx; + + int success_count; + int error_counts[ZSTD_error_maxCode]; +} stuff_t; + +static void free_stuff(stuff_t* stuff) { + free(stuff->input); + free(stuff->output); + ZSTD_freeDDict(stuff->ddict); + free(stuff->dict); + ZSTD_freeDCtx(stuff->dctx); +} + +static void usage(void) { + fprintf(stderr, "check_flipped_bits input_filename [-d dict] [-D dict_dir]\n"); + fprintf(stderr, "\n"); + fprintf(stderr, "Arguments:\n"); + fprintf(stderr, " -d file: path to a dictionary file to use.\n"); + fprintf(stderr, " -D dir : path to a directory, with files containing dictionaries, of the\n" + " form DICTID.zstd-dict, e.g., 12345.zstd-dict.\n"); + exit(1); +} + +static void print_summary(stuff_t* stuff) { + int error_code; + fprintf(stderr, "%9d successful decompressions\n", stuff->success_count); + for (error_code = 0; error_code < ZSTD_error_maxCode; error_code++) { + int count = stuff->error_counts[error_code]; + if (count) { + fprintf( + stderr, "%9d failed decompressions with message: %s\n", + count, ZSTD_getErrorString(error_code)); + } + } +} + +static char* readFile(const char* filename, size_t* size) { + struct stat statbuf; + int ret; + FILE* f; + char *buf; + size_t bytes_read; + + ret = stat(filename, &statbuf); + if (ret != 0) { + fprintf(stderr, "stat failed: %m\n"); + return NULL; + } + if ((statbuf.st_mode & S_IFREG) != S_IFREG) { + fprintf(stderr, "Input must be regular file\n"); + return NULL; + } + + *size = statbuf.st_size; + + f = fopen(filename, "r"); + if (f == NULL) { + fprintf(stderr, "fopen failed: %m\n"); + return NULL; + } + + buf = malloc(*size); + if (buf == NULL) { + fprintf(stderr, "malloc failed\n"); + fclose(f); + return NULL; + } + + bytes_read = fread(buf, 1, *size, f); + if (bytes_read != *size) { + fprintf(stderr, "failed to read whole file\n"); + fclose(f); + free(buf); + return NULL; + } + + ret = fclose(f); + if (ret != 0) { + fprintf(stderr, "fclose failed: %m\n"); + free(buf); + return NULL; + } + + return buf; +} + +static ZSTD_DDict* readDict(const char* filename, char **buf, size_t* size, int32_t* dict_id) { + ZSTD_DDict* ddict; + *buf = readFile(filename, size); + if (*buf == NULL) { + fprintf(stderr, "Opening dictionary file '%s' failed\n", filename); + return NULL; + } + + ddict = ZSTD_createDDict_advanced(*buf, *size, ZSTD_dlm_byRef, ZSTD_dct_auto, ZSTD_defaultCMem); + if (ddict == NULL) { + fprintf(stderr, "Failed to create ddict.\n"); + return NULL; + } + if (dict_id != NULL) { + *dict_id = ZSTD_getDictID_fromDDict(ddict); + } + return ddict; +} + +static ZSTD_DDict* readDictByID(stuff_t *stuff, int32_t dict_id, char **buf, size_t* size) { + if (stuff->dict_file_dir_name == NULL) { + return NULL; + } else { + size_t dir_name_len = strlen(stuff->dict_file_dir_name); + int dir_needs_separator = 0; + size_t dict_file_name_alloc_size = dir_name_len + 1 /* '/' */ + 10 /* max int32_t len */ + strlen(".zstd-dict") + 1 /* '\0' */; + char *dict_file_name = malloc(dict_file_name_alloc_size); + ZSTD_DDict* ddict; + int32_t read_dict_id; + if (dict_file_name == NULL) { + fprintf(stderr, "malloc failed.\n"); + return 0; + } + + if (dir_name_len > 0 && stuff->dict_file_dir_name[dir_name_len - 1] != '/') { + dir_needs_separator = 1; + } + + snprintf( + dict_file_name, + dict_file_name_alloc_size, + "%s%s%u.zstd-dict", + stuff->dict_file_dir_name, + dir_needs_separator ? "/" : "", + dict_id); + + /* fprintf(stderr, "Loading dict %u from '%s'.\n", dict_id, dict_file_name); */ + + ddict = readDict(dict_file_name, buf, size, &read_dict_id); + if (ddict == NULL) { + fprintf(stderr, "Failed to create ddict from '%s'.\n", dict_file_name); + free(dict_file_name); + return 0; + } + if (read_dict_id != dict_id) { + fprintf(stderr, "Read dictID (%u) does not match expected (%u).\n", read_dict_id, dict_id); + free(dict_file_name); + ZSTD_freeDDict(ddict); + return 0; + } + + free(dict_file_name); + return ddict; + } +} + +static int init_stuff(stuff_t* stuff, int argc, char *argv[]) { + const char* input_filename; + + if (argc < 2) { + usage(); + } + + input_filename = argv[1]; + stuff->input_size = 0; + stuff->input = readFile(input_filename, &stuff->input_size); + if (stuff->input == NULL) { + fprintf(stderr, "Failed to read input file.\n"); + return 0; + } + + stuff->perturbed = malloc(stuff->input_size); + if (stuff->perturbed == NULL) { + fprintf(stderr, "malloc failed.\n"); + return 0; + } + memcpy(stuff->perturbed, stuff->input, stuff->input_size); + + stuff->output_size = ZSTD_DStreamOutSize(); + stuff->output = malloc(stuff->output_size); + if (stuff->output == NULL) { + fprintf(stderr, "malloc failed.\n"); + return 0; + } + + stuff->dict_file_name = NULL; + stuff->dict_file_dir_name = NULL; + stuff->dict_id = 0; + stuff->dict = NULL; + stuff->dict_size = 0; + stuff->ddict = NULL; + + if (argc > 2) { + if (!strcmp(argv[2], "-d")) { + if (argc > 3) { + stuff->dict_file_name = argv[3]; + } else { + usage(); + } + } else + if (!strcmp(argv[2], "-D")) { + if (argc > 3) { + stuff->dict_file_dir_name = argv[3]; + } else { + usage(); + } + } else { + usage(); + } + } + + if (stuff->dict_file_dir_name) { + int32_t dict_id = ZSTD_getDictID_fromFrame(stuff->input, stuff->input_size); + if (dict_id != 0) { + stuff->ddict = readDictByID(stuff, dict_id, &stuff->dict, &stuff->dict_size); + if (stuff->ddict == NULL) { + fprintf(stderr, "Failed to create cached ddict.\n"); + return 0; + } + stuff->dict_id = dict_id; + } + } else + if (stuff->dict_file_name) { + stuff->ddict = readDict(stuff->dict_file_name, &stuff->dict, &stuff->dict_size, &stuff->dict_id); + if (stuff->ddict == NULL) { + fprintf(stderr, "Failed to create ddict from '%s'.\n", stuff->dict_file_name); + return 0; + } + } + + stuff->dctx = ZSTD_createDCtx(); + if (stuff->dctx == NULL) { + return 0; + } + + stuff->success_count = 0; + memset(stuff->error_counts, 0, sizeof(stuff->error_counts)); + + return 1; +} + +static int test_decompress(stuff_t* stuff) { + size_t ret; + ZSTD_inBuffer in = {stuff->perturbed, stuff->input_size, 0}; + ZSTD_outBuffer out = {stuff->output, stuff->output_size, 0}; + ZSTD_DCtx* dctx = stuff->dctx; + int32_t custom_dict_id = ZSTD_getDictID_fromFrame(in.src, in.size); + char *custom_dict = NULL; + size_t custom_dict_size = 0; + ZSTD_DDict* custom_ddict = NULL; + + if (custom_dict_id != 0 && custom_dict_id != stuff->dict_id) { + /* fprintf(stderr, "Instead of dict %u, this perturbed blob wants dict %u.\n", stuff->dict_id, custom_dict_id); */ + custom_ddict = readDictByID(stuff, custom_dict_id, &custom_dict, &custom_dict_size); + } + + ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only); + + if (custom_ddict != NULL) { + ZSTD_DCtx_refDDict(dctx, custom_ddict); + } else { + ZSTD_DCtx_refDDict(dctx, stuff->ddict); + } + + while (in.pos != in.size) { + out.pos = 0; + ret = ZSTD_decompressStream(dctx, &out, &in); + + if (ZSTD_isError(ret)) { + unsigned int code = ZSTD_getErrorCode(ret); + if (code >= ZSTD_error_maxCode) { + fprintf(stderr, "Received unexpected error code!\n"); + exit(1); + } + stuff->error_counts[code]++; + /* + fprintf( + stderr, "Decompression failed: %s\n", ZSTD_getErrorName(ret)); + */ + if (custom_ddict != NULL) { + ZSTD_freeDDict(custom_ddict); + free(custom_dict); + } + return 0; + } + } + + stuff->success_count++; + + if (custom_ddict != NULL) { + ZSTD_freeDDict(custom_ddict); + free(custom_dict); + } + return 1; +} + +static int perturb_bits(stuff_t* stuff) { + size_t pos; + size_t bit; + for (pos = 0; pos < stuff->input_size; pos++) { + unsigned char old_val = stuff->input[pos]; + if (pos % 1000 == 0) { + fprintf(stderr, "Perturbing byte %zu / %zu\n", pos, stuff->input_size); + } + for (bit = 0; bit < 8; bit++) { + unsigned char new_val = old_val ^ (1 << bit); + stuff->perturbed[pos] = new_val; + if (test_decompress(stuff)) { + fprintf( + stderr, + "Flipping byte %zu bit %zu (0x%02x -> 0x%02x) " + "produced a successful decompression!\n", + pos, bit, old_val, new_val); + } + } + stuff->perturbed[pos] = old_val; + } + return 1; +} + +static int perturb_bytes(stuff_t* stuff) { + size_t pos; + size_t new_val; + for (pos = 0; pos < stuff->input_size; pos++) { + unsigned char old_val = stuff->input[pos]; + if (pos % 1000 == 0) { + fprintf(stderr, "Perturbing byte %zu / %zu\n", pos, stuff->input_size); + } + for (new_val = 0; new_val < 256; new_val++) { + stuff->perturbed[pos] = new_val; + if (test_decompress(stuff)) { + fprintf( + stderr, + "Changing byte %zu (0x%02x -> 0x%02x) " + "produced a successful decompression!\n", + pos, old_val, (unsigned char)new_val); + } + } + stuff->perturbed[pos] = old_val; + } + return 1; +} + +int main(int argc, char* argv[]) { + stuff_t stuff; + + if(!init_stuff(&stuff, argc, argv)) { + fprintf(stderr, "Failed to init.\n"); + return 1; + } + + if (test_decompress(&stuff)) { + fprintf(stderr, "Blob already decompresses successfully!\n"); + return 1; + } + + perturb_bits(&stuff); + + perturb_bytes(&stuff); + + print_summary(&stuff); + + free_stuff(&stuff); + + return 0; +} diff --git a/contrib/docker/Dockerfile b/contrib/docker/Dockerfile index e06a32c0dac..912bf194977 100644 --- a/contrib/docker/Dockerfile +++ b/contrib/docker/Dockerfile @@ -1,13 +1,13 @@ # Dockerfile # First image to build the binary -FROM alpine as builder +FROM alpine@sha256:69665d02cb32192e52e07644d76bc6f25abeb5410edc1c7a81a10ba3f0efb90a as builder RUN apk --no-cache add make gcc libc-dev COPY . /src RUN mkdir /pkg && cd /src && make && make DESTDIR=/pkg install # Second minimal image to only keep the built binary -FROM alpine +FROM alpine@sha256:69665d02cb32192e52e07644d76bc6f25abeb5410edc1c7a81a10ba3f0efb90a # Copy the built files COPY --from=builder /pkg / diff --git a/contrib/experimental_dict_builders/benchmarkDictBuilder/Makefile b/contrib/experimental_dict_builders/benchmarkDictBuilder/Makefile deleted file mode 100644 index 72ce04f2a56..00000000000 --- a/contrib/experimental_dict_builders/benchmarkDictBuilder/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -ARG := - -CC ?= gcc -CFLAGS ?= -O3 -INCLUDES := -I ../randomDictBuilder -I ../../../programs -I ../../../lib/common -I ../../../lib -I ../../../lib/dictBuilder - -RANDOM_FILE := ../randomDictBuilder/random.c -IO_FILE := ../randomDictBuilder/io.c - -all: run clean - -.PHONY: run -run: benchmark - echo "Benchmarking with $(ARG)" - ./benchmark $(ARG) - -.PHONY: test -test: benchmarkTest clean - -.PHONY: benchmarkTest -benchmarkTest: benchmark test.sh - sh test.sh - -benchmark: benchmark.o io.o random.o libzstd.a - $(CC) $(CFLAGS) benchmark.o io.o random.o libzstd.a -o benchmark - -benchmark.o: benchmark.c - $(CC) $(CFLAGS) $(INCLUDES) -c benchmark.c - -random.o: $(RANDOM_FILE) - $(CC) $(CFLAGS) $(INCLUDES) -c $(RANDOM_FILE) - -io.o: $(IO_FILE) - $(CC) $(CFLAGS) $(INCLUDES) -c $(IO_FILE) - -libzstd.a: - $(MAKE) -C ../../../lib libzstd.a - mv ../../../lib/libzstd.a . - -.PHONY: clean -clean: - rm -f *.o benchmark libzstd.a - $(MAKE) -C ../../../lib clean - echo "Cleaning is completed" diff --git a/contrib/experimental_dict_builders/benchmarkDictBuilder/README.md b/contrib/experimental_dict_builders/benchmarkDictBuilder/README.md deleted file mode 100644 index 6a6c7f1d216..00000000000 --- a/contrib/experimental_dict_builders/benchmarkDictBuilder/README.md +++ /dev/null @@ -1,849 +0,0 @@ -Benchmarking Dictionary Builder - -### Permitted Argument: -Input File/Directory (in=fileName): required; file/directory used to build dictionary; if directory, will operate recursively for files inside directory; can include multiple files/directories, each following "in=" - -###Running Test: -make test - -###Usage: -Benchmark given input files: make ARG= followed by permitted arguments - -### Examples: -make ARG="in=../../../lib/dictBuilder in=../../../lib/compress" - -###Benchmarking Result: -- First Cover is optimize cover, second Cover uses optimized d and k from first one. -- For every f value of fastCover, the first one is optimize fastCover and the second one uses optimized d and k from first one. This is run for accel values from 1 to 10. -- Fourth column is chosen d and fifth column is chosen k - -github: -NODICT 0.000004 2.999642 -RANDOM 0.024560 8.791189 -LEGACY 0.727109 8.173529 -COVER 40.565676 10.652243 8 1298 -COVER 3.608284 10.652243 8 1298 -FAST f=15 a=1 4.181024 10.570882 8 1154 -FAST f=15 a=1 0.040788 10.570882 8 1154 -FAST f=15 a=2 3.548352 10.574287 6 1970 -FAST f=15 a=2 0.035535 10.574287 6 1970 -FAST f=15 a=3 3.287364 10.613950 6 1010 -FAST f=15 a=3 0.032182 10.613950 6 1010 -FAST f=15 a=4 3.184976 10.573883 6 1058 -FAST f=15 a=4 0.029878 10.573883 6 1058 -FAST f=15 a=5 3.045513 10.580640 8 1154 -FAST f=15 a=5 0.022162 10.580640 8 1154 -FAST f=15 a=6 3.003296 10.583677 6 1010 -FAST f=15 a=6 0.028091 10.583677 6 1010 -FAST f=15 a=7 2.952655 10.622551 6 1106 -FAST f=15 a=7 0.02724 10.622551 6 1106 -FAST f=15 a=8 2.945674 10.614657 6 1010 -FAST f=15 a=8 0.027264 10.614657 6 1010 -FAST f=15 a=9 3.153439 10.564018 8 1154 -FAST f=15 a=9 0.020635 10.564018 8 1154 -FAST f=15 a=10 2.950416 10.511454 6 1010 -FAST f=15 a=10 0.026606 10.511454 6 1010 -FAST f=16 a=1 3.970029 10.681035 8 1154 -FAST f=16 a=1 0.038188 10.681035 8 1154 -FAST f=16 a=2 3.422892 10.484978 6 1874 -FAST f=16 a=2 0.034702 10.484978 6 1874 -FAST f=16 a=3 3.215836 10.632631 8 1154 -FAST f=16 a=3 0.026084 10.632631 8 1154 -FAST f=16 a=4 3.081353 10.626533 6 1106 -FAST f=16 a=4 0.030032 10.626533 6 1106 -FAST f=16 a=5 3.041241 10.545027 8 1922 -FAST f=16 a=5 0.022882 10.545027 8 1922 -FAST f=16 a=6 2.989390 10.638284 6 1874 -FAST f=16 a=6 0.028308 10.638284 6 1874 -FAST f=16 a=7 3.001581 10.797136 6 1106 -FAST f=16 a=7 0.027479 10.797136 6 1106 -FAST f=16 a=8 2.984107 10.658356 8 1058 -FAST f=16 a=8 0.021099 10.658356 8 1058 -FAST f=16 a=9 2.925788 10.523869 6 1010 -FAST f=16 a=9 0.026905 10.523869 6 1010 -FAST f=16 a=10 2.889605 10.745841 6 1874 -FAST f=16 a=10 0.026846 10.745841 6 1874 -FAST f=17 a=1 4.031953 10.672080 8 1202 -FAST f=17 a=1 0.040658 10.672080 8 1202 -FAST f=17 a=2 3.458107 10.589352 8 1106 -FAST f=17 a=2 0.02926 10.589352 8 1106 -FAST f=17 a=3 3.291189 10.662714 8 1154 -FAST f=17 a=3 0.026531 10.662714 8 1154 -FAST f=17 a=4 3.154950 10.549456 8 1346 -FAST f=17 a=4 0.024991 10.549456 8 1346 -FAST f=17 a=5 3.092271 10.541670 6 1202 -FAST f=17 a=5 0.038285 10.541670 6 1202 -FAST f=17 a=6 3.166146 10.729112 6 1874 -FAST f=17 a=6 0.038217 10.729112 6 1874 -FAST f=17 a=7 3.035467 10.810485 6 1106 -FAST f=17 a=7 0.036655 10.810485 6 1106 -FAST f=17 a=8 3.035668 10.530532 6 1058 -FAST f=17 a=8 0.037715 10.530532 6 1058 -FAST f=17 a=9 2.987917 10.589802 8 1922 -FAST f=17 a=9 0.02217 10.589802 8 1922 -FAST f=17 a=10 2.981647 10.722579 8 1106 -FAST f=17 a=10 0.021948 10.722579 8 1106 -FAST f=18 a=1 4.067144 10.634943 8 1154 -FAST f=18 a=1 0.041386 10.634943 8 1154 -FAST f=18 a=2 3.507377 10.546230 6 1970 -FAST f=18 a=2 0.037572 10.546230 6 1970 -FAST f=18 a=3 3.323015 10.648061 8 1154 -FAST f=18 a=3 0.028306 10.648061 8 1154 -FAST f=18 a=4 3.216735 10.705402 6 1010 -FAST f=18 a=4 0.030755 10.705402 6 1010 -FAST f=18 a=5 3.175794 10.588154 8 1874 -FAST f=18 a=5 0.025315 10.588154 8 1874 -FAST f=18 a=6 3.127459 10.751104 8 1106 -FAST f=18 a=6 0.023897 10.751104 8 1106 -FAST f=18 a=7 3.083017 10.780402 6 1106 -FAST f=18 a=7 0.029158 10.780402 6 1106 -FAST f=18 a=8 3.069700 10.547226 8 1346 -FAST f=18 a=8 0.024046 10.547226 8 1346 -FAST f=18 a=9 3.056591 10.674759 6 1010 -FAST f=18 a=9 0.028496 10.674759 6 1010 -FAST f=18 a=10 3.063588 10.737578 8 1106 -FAST f=18 a=10 0.023033 10.737578 8 1106 -FAST f=19 a=1 4.164041 10.650333 8 1154 -FAST f=19 a=1 0.042906 10.650333 8 1154 -FAST f=19 a=2 3.585409 10.577066 6 1058 -FAST f=19 a=2 0.038994 10.577066 6 1058 -FAST f=19 a=3 3.439643 10.639403 8 1154 -FAST f=19 a=3 0.028427 10.639403 8 1154 -FAST f=19 a=4 3.268869 10.554410 8 1298 -FAST f=19 a=4 0.026866 10.554410 8 1298 -FAST f=19 a=5 3.238225 10.615109 6 1010 -FAST f=19 a=5 0.03078 10.615109 6 1010 -FAST f=19 a=6 3.199558 10.609782 6 1874 -FAST f=19 a=6 0.030099 10.609782 6 1874 -FAST f=19 a=7 3.132395 10.794753 6 1106 -FAST f=19 a=7 0.028964 10.794753 6 1106 -FAST f=19 a=8 3.148446 10.554842 8 1298 -FAST f=19 a=8 0.024277 10.554842 8 1298 -FAST f=19 a=9 3.108324 10.668763 6 1010 -FAST f=19 a=9 0.02896 10.668763 6 1010 -FAST f=19 a=10 3.159863 10.757347 8 1106 -FAST f=19 a=10 0.023351 10.757347 8 1106 -FAST f=20 a=1 4.462698 10.661788 8 1154 -FAST f=20 a=1 0.047174 10.661788 8 1154 -FAST f=20 a=2 3.820269 10.678612 6 1106 -FAST f=20 a=2 0.040807 10.678612 6 1106 -FAST f=20 a=3 3.644955 10.648424 8 1154 -FAST f=20 a=3 0.031398 10.648424 8 1154 -FAST f=20 a=4 3.546257 10.559756 8 1298 -FAST f=20 a=4 0.029856 10.559756 8 1298 -FAST f=20 a=5 3.485248 10.646637 6 1010 -FAST f=20 a=5 0.033756 10.646637 6 1010 -FAST f=20 a=6 3.490438 10.775824 8 1106 -FAST f=20 a=6 0.028338 10.775824 8 1106 -FAST f=20 a=7 3.631289 10.801795 6 1106 -FAST f=20 a=7 0.035228 10.801795 6 1106 -FAST f=20 a=8 3.758936 10.545116 8 1346 -FAST f=20 a=8 0.027495 10.545116 8 1346 -FAST f=20 a=9 3.707024 10.677454 6 1010 -FAST f=20 a=9 0.031326 10.677454 6 1010 -FAST f=20 a=10 3.586593 10.756017 8 1106 -FAST f=20 a=10 0.027122 10.756017 8 1106 -FAST f=21 a=1 5.701396 10.655398 8 1154 -FAST f=21 a=1 0.067744 10.655398 8 1154 -FAST f=21 a=2 5.270542 10.650743 6 1106 -FAST f=21 a=2 0.052999 10.650743 6 1106 -FAST f=21 a=3 4.945294 10.652380 8 1154 -FAST f=21 a=3 0.052678 10.652380 8 1154 -FAST f=21 a=4 4.894079 10.543185 8 1298 -FAST f=21 a=4 0.04997 10.543185 8 1298 -FAST f=21 a=5 4.785417 10.630321 6 1010 -FAST f=21 a=5 0.045294 10.630321 6 1010 -FAST f=21 a=6 4.789381 10.664477 6 1874 -FAST f=21 a=6 0.046578 10.664477 6 1874 -FAST f=21 a=7 4.302955 10.805179 6 1106 -FAST f=21 a=7 0.041205 10.805179 6 1106 -FAST f=21 a=8 4.034630 10.551211 8 1298 -FAST f=21 a=8 0.040121 10.551211 8 1298 -FAST f=21 a=9 4.523868 10.799114 6 1010 -FAST f=21 a=9 0.043592 10.799114 6 1010 -FAST f=21 a=10 4.760736 10.750255 8 1106 -FAST f=21 a=10 0.043483 10.750255 8 1106 -FAST f=22 a=1 6.743064 10.640537 8 1154 -FAST f=22 a=1 0.086967 10.640537 8 1154 -FAST f=22 a=2 6.121739 10.626638 6 1970 -FAST f=22 a=2 0.066337 10.626638 6 1970 -FAST f=22 a=3 5.248851 10.640688 8 1154 -FAST f=22 a=3 0.054935 10.640688 8 1154 -FAST f=22 a=4 5.436579 10.588333 8 1298 -FAST f=22 a=4 0.064113 10.588333 8 1298 -FAST f=22 a=5 5.812815 10.652653 6 1010 -FAST f=22 a=5 0.058189 10.652653 6 1010 -FAST f=22 a=6 5.745472 10.666437 6 1874 -FAST f=22 a=6 0.057188 10.666437 6 1874 -FAST f=22 a=7 5.716393 10.806911 6 1106 -FAST f=22 a=7 0.056 10.806911 6 1106 -FAST f=22 a=8 5.698799 10.530784 8 1298 -FAST f=22 a=8 0.0583 10.530784 8 1298 -FAST f=22 a=9 5.710533 10.777391 6 1010 -FAST f=22 a=9 0.054945 10.777391 6 1010 -FAST f=22 a=10 5.685395 10.745023 8 1106 -FAST f=22 a=10 0.056526 10.745023 8 1106 -FAST f=23 a=1 7.836923 10.638828 8 1154 -FAST f=23 a=1 0.099522 10.638828 8 1154 -FAST f=23 a=2 6.627834 10.631061 6 1970 -FAST f=23 a=2 0.066769 10.631061 6 1970 -FAST f=23 a=3 5.602533 10.647288 8 1154 -FAST f=23 a=3 0.064513 10.647288 8 1154 -FAST f=23 a=4 6.005580 10.568747 8 1298 -FAST f=23 a=4 0.062022 10.568747 8 1298 -FAST f=23 a=5 5.481816 10.676921 6 1010 -FAST f=23 a=5 0.058959 10.676921 6 1010 -FAST f=23 a=6 5.460444 10.666194 6 1874 -FAST f=23 a=6 0.057687 10.666194 6 1874 -FAST f=23 a=7 5.659822 10.800377 6 1106 -FAST f=23 a=7 0.06783 10.800377 6 1106 -FAST f=23 a=8 6.826940 10.522167 8 1298 -FAST f=23 a=8 0.070533 10.522167 8 1298 -FAST f=23 a=9 6.804757 10.577799 8 1682 -FAST f=23 a=9 0.069949 10.577799 8 1682 -FAST f=23 a=10 6.774933 10.742093 8 1106 -FAST f=23 a=10 0.068395 10.742093 8 1106 -FAST f=24 a=1 8.444110 10.632783 8 1154 -FAST f=24 a=1 0.094357 10.632783 8 1154 -FAST f=24 a=2 7.289578 10.631061 6 1970 -FAST f=24 a=2 0.098515 10.631061 6 1970 -FAST f=24 a=3 8.619780 10.646289 8 1154 -FAST f=24 a=3 0.098041 10.646289 8 1154 -FAST f=24 a=4 8.508455 10.555199 8 1298 -FAST f=24 a=4 0.093885 10.555199 8 1298 -FAST f=24 a=5 8.471145 10.674363 6 1010 -FAST f=24 a=5 0.088676 10.674363 6 1010 -FAST f=24 a=6 8.426727 10.667228 6 1874 -FAST f=24 a=6 0.087247 10.667228 6 1874 -FAST f=24 a=7 8.356826 10.803027 6 1106 -FAST f=24 a=7 0.085835 10.803027 6 1106 -FAST f=24 a=8 6.756811 10.522049 8 1298 -FAST f=24 a=8 0.07107 10.522049 8 1298 -FAST f=24 a=9 6.548169 10.571882 8 1682 -FAST f=24 a=9 0.0713 10.571882 8 1682 -FAST f=24 a=10 8.238079 10.736453 8 1106 -FAST f=24 a=10 0.07004 10.736453 8 1106 - - -hg-commands: -NODICT 0.000005 2.425276 -RANDOM 0.046332 3.490331 -LEGACY 0.720351 3.911682 -COVER 45.507731 4.132653 8 386 -COVER 1.868810 4.132653 8 386 -FAST f=15 a=1 4.561427 3.866894 8 1202 -FAST f=15 a=1 0.048946 3.866894 8 1202 -FAST f=15 a=2 3.574462 3.892119 8 1538 -FAST f=15 a=2 0.033677 3.892119 8 1538 -FAST f=15 a=3 3.230227 3.888791 6 1346 -FAST f=15 a=3 0.034312 3.888791 6 1346 -FAST f=15 a=4 3.042388 3.899739 8 1010 -FAST f=15 a=4 0.024307 3.899739 8 1010 -FAST f=15 a=5 2.800148 3.896220 8 818 -FAST f=15 a=5 0.022331 3.896220 8 818 -FAST f=15 a=6 2.706518 3.882039 8 578 -FAST f=15 a=6 0.020955 3.882039 8 578 -FAST f=15 a=7 2.701820 3.885430 6 866 -FAST f=15 a=7 0.026074 3.885430 6 866 -FAST f=15 a=8 2.604445 3.906932 8 1826 -FAST f=15 a=8 0.021789 3.906932 8 1826 -FAST f=15 a=9 2.598568 3.870324 6 1682 -FAST f=15 a=9 0.026004 3.870324 6 1682 -FAST f=15 a=10 2.575920 3.920783 8 1442 -FAST f=15 a=10 0.020228 3.920783 8 1442 -FAST f=16 a=1 4.630623 4.001430 8 770 -FAST f=16 a=1 0.047497 4.001430 8 770 -FAST f=16 a=2 3.674721 3.974431 8 1874 -FAST f=16 a=2 0.035761 3.974431 8 1874 -FAST f=16 a=3 3.338384 3.978703 8 1010 -FAST f=16 a=3 0.029436 3.978703 8 1010 -FAST f=16 a=4 3.004412 3.983035 8 1010 -FAST f=16 a=4 0.025744 3.983035 8 1010 -FAST f=16 a=5 2.881892 3.987710 8 770 -FAST f=16 a=5 0.023211 3.987710 8 770 -FAST f=16 a=6 2.807410 3.952717 8 1298 -FAST f=16 a=6 0.023199 3.952717 8 1298 -FAST f=16 a=7 2.819623 3.994627 8 770 -FAST f=16 a=7 0.021806 3.994627 8 770 -FAST f=16 a=8 2.740092 3.954032 8 1826 -FAST f=16 a=8 0.0226 3.954032 8 1826 -FAST f=16 a=9 2.682564 3.969879 6 1442 -FAST f=16 a=9 0.026324 3.969879 6 1442 -FAST f=16 a=10 2.657959 3.969755 8 674 -FAST f=16 a=10 0.020413 3.969755 8 674 -FAST f=17 a=1 4.729228 4.046000 8 530 -FAST f=17 a=1 0.049703 4.046000 8 530 -FAST f=17 a=2 3.764510 3.991519 8 1970 -FAST f=17 a=2 0.038195 3.991519 8 1970 -FAST f=17 a=3 3.416992 4.006296 6 914 -FAST f=17 a=3 0.036244 4.006296 6 914 -FAST f=17 a=4 3.145626 3.979182 8 1970 -FAST f=17 a=4 0.028676 3.979182 8 1970 -FAST f=17 a=5 2.995070 4.050070 8 770 -FAST f=17 a=5 0.025707 4.050070 8 770 -FAST f=17 a=6 2.911833 4.040024 8 770 -FAST f=17 a=6 0.02453 4.040024 8 770 -FAST f=17 a=7 2.894796 4.015884 8 818 -FAST f=17 a=7 0.023956 4.015884 8 818 -FAST f=17 a=8 2.789962 4.039303 8 530 -FAST f=17 a=8 0.023219 4.039303 8 530 -FAST f=17 a=9 2.787625 3.996762 8 1634 -FAST f=17 a=9 0.023651 3.996762 8 1634 -FAST f=17 a=10 2.754796 4.005059 8 1058 -FAST f=17 a=10 0.022537 4.005059 8 1058 -FAST f=18 a=1 4.779117 4.038214 8 242 -FAST f=18 a=1 0.048814 4.038214 8 242 -FAST f=18 a=2 3.829753 4.045768 8 722 -FAST f=18 a=2 0.036541 4.045768 8 722 -FAST f=18 a=3 3.495053 4.021497 8 770 -FAST f=18 a=3 0.032648 4.021497 8 770 -FAST f=18 a=4 3.221395 4.039623 8 770 -FAST f=18 a=4 0.027818 4.039623 8 770 -FAST f=18 a=5 3.059369 4.050414 8 530 -FAST f=18 a=5 0.026296 4.050414 8 530 -FAST f=18 a=6 3.019292 4.010714 6 962 -FAST f=18 a=6 0.031104 4.010714 6 962 -FAST f=18 a=7 2.949322 4.031439 6 770 -FAST f=18 a=7 0.030745 4.031439 6 770 -FAST f=18 a=8 2.876425 4.032088 6 386 -FAST f=18 a=8 0.027407 4.032088 6 386 -FAST f=18 a=9 2.850958 4.053372 8 674 -FAST f=18 a=9 0.023799 4.053372 8 674 -FAST f=18 a=10 2.884352 4.020148 8 1730 -FAST f=18 a=10 0.024401 4.020148 8 1730 -FAST f=19 a=1 4.815669 4.061203 8 674 -FAST f=19 a=1 0.051425 4.061203 8 674 -FAST f=19 a=2 3.951356 4.013822 8 1442 -FAST f=19 a=2 0.039968 4.013822 8 1442 -FAST f=19 a=3 3.554682 4.050425 8 722 -FAST f=19 a=3 0.032725 4.050425 8 722 -FAST f=19 a=4 3.242585 4.054677 8 722 -FAST f=19 a=4 0.028194 4.054677 8 722 -FAST f=19 a=5 3.105909 4.064524 8 818 -FAST f=19 a=5 0.02675 4.064524 8 818 -FAST f=19 a=6 3.059901 4.036857 8 1250 -FAST f=19 a=6 0.026396 4.036857 8 1250 -FAST f=19 a=7 3.016151 4.068234 6 770 -FAST f=19 a=7 0.031501 4.068234 6 770 -FAST f=19 a=8 2.962902 4.077509 8 530 -FAST f=19 a=8 0.023333 4.077509 8 530 -FAST f=19 a=9 2.899607 4.067328 8 530 -FAST f=19 a=9 0.024553 4.067328 8 530 -FAST f=19 a=10 2.950978 4.059901 8 434 -FAST f=19 a=10 0.023852 4.059901 8 434 -FAST f=20 a=1 5.259834 4.027579 8 1634 -FAST f=20 a=1 0.061123 4.027579 8 1634 -FAST f=20 a=2 4.382150 4.025093 8 1634 -FAST f=20 a=2 0.048009 4.025093 8 1634 -FAST f=20 a=3 4.104323 4.060842 8 530 -FAST f=20 a=3 0.040965 4.060842 8 530 -FAST f=20 a=4 3.853340 4.023504 6 914 -FAST f=20 a=4 0.041072 4.023504 6 914 -FAST f=20 a=5 3.728841 4.018089 6 1634 -FAST f=20 a=5 0.037469 4.018089 6 1634 -FAST f=20 a=6 3.683045 4.069138 8 578 -FAST f=20 a=6 0.028011 4.069138 8 578 -FAST f=20 a=7 3.726973 4.063160 8 722 -FAST f=20 a=7 0.028437 4.063160 8 722 -FAST f=20 a=8 3.555073 4.057690 8 386 -FAST f=20 a=8 0.027588 4.057690 8 386 -FAST f=20 a=9 3.551095 4.067253 8 482 -FAST f=20 a=9 0.025976 4.067253 8 482 -FAST f=20 a=10 3.490127 4.068518 8 530 -FAST f=20 a=10 0.025971 4.068518 8 530 -FAST f=21 a=1 7.343816 4.064945 8 770 -FAST f=21 a=1 0.085035 4.064945 8 770 -FAST f=21 a=2 5.930894 4.048206 8 386 -FAST f=21 a=2 0.067349 4.048206 8 386 -FAST f=21 a=3 6.770775 4.063417 8 578 -FAST f=21 a=3 0.077104 4.063417 8 578 -FAST f=21 a=4 6.889409 4.066761 8 626 -FAST f=21 a=4 0.0717 4.066761 8 626 -FAST f=21 a=5 6.714896 4.051813 8 914 -FAST f=21 a=5 0.071026 4.051813 8 914 -FAST f=21 a=6 6.539890 4.047263 8 1922 -FAST f=21 a=6 0.07127 4.047263 8 1922 -FAST f=21 a=7 6.511052 4.068373 8 482 -FAST f=21 a=7 0.065467 4.068373 8 482 -FAST f=21 a=8 6.458788 4.071597 8 482 -FAST f=21 a=8 0.063817 4.071597 8 482 -FAST f=21 a=9 6.377591 4.052905 8 434 -FAST f=21 a=9 0.063112 4.052905 8 434 -FAST f=21 a=10 6.360752 4.047773 8 530 -FAST f=21 a=10 0.063606 4.047773 8 530 -FAST f=22 a=1 10.523471 4.040812 8 962 -FAST f=22 a=1 0.14214 4.040812 8 962 -FAST f=22 a=2 9.454758 4.059396 8 914 -FAST f=22 a=2 0.118343 4.059396 8 914 -FAST f=22 a=3 9.043197 4.043019 8 1922 -FAST f=22 a=3 0.109798 4.043019 8 1922 -FAST f=22 a=4 8.716261 4.044819 8 770 -FAST f=22 a=4 0.099687 4.044819 8 770 -FAST f=22 a=5 8.529472 4.070576 8 530 -FAST f=22 a=5 0.093127 4.070576 8 530 -FAST f=22 a=6 8.424241 4.070565 8 722 -FAST f=22 a=6 0.093703 4.070565 8 722 -FAST f=22 a=7 8.403391 4.070591 8 578 -FAST f=22 a=7 0.089763 4.070591 8 578 -FAST f=22 a=8 8.285221 4.089171 8 530 -FAST f=22 a=8 0.087716 4.089171 8 530 -FAST f=22 a=9 8.282506 4.047470 8 722 -FAST f=22 a=9 0.089773 4.047470 8 722 -FAST f=22 a=10 8.241809 4.064151 8 818 -FAST f=22 a=10 0.090413 4.064151 8 818 -FAST f=23 a=1 12.389208 4.051635 6 530 -FAST f=23 a=1 0.147796 4.051635 6 530 -FAST f=23 a=2 11.300910 4.042835 6 914 -FAST f=23 a=2 0.133178 4.042835 6 914 -FAST f=23 a=3 10.879455 4.047415 8 626 -FAST f=23 a=3 0.129571 4.047415 8 626 -FAST f=23 a=4 10.522718 4.038269 6 914 -FAST f=23 a=4 0.118121 4.038269 6 914 -FAST f=23 a=5 10.348043 4.066884 8 434 -FAST f=23 a=5 0.112098 4.066884 8 434 -FAST f=23 a=6 10.238630 4.048635 8 1010 -FAST f=23 a=6 0.120281 4.048635 8 1010 -FAST f=23 a=7 10.213255 4.061809 8 530 -FAST f=23 a=7 0.1121 4.061809 8 530 -FAST f=23 a=8 10.107879 4.074104 8 818 -FAST f=23 a=8 0.116544 4.074104 8 818 -FAST f=23 a=9 10.063424 4.064811 8 674 -FAST f=23 a=9 0.109045 4.064811 8 674 -FAST f=23 a=10 10.035801 4.054918 8 530 -FAST f=23 a=10 0.108735 4.054918 8 530 -FAST f=24 a=1 14.963878 4.073490 8 722 -FAST f=24 a=1 0.206344 4.073490 8 722 -FAST f=24 a=2 13.833472 4.036100 8 962 -FAST f=24 a=2 0.17486 4.036100 8 962 -FAST f=24 a=3 13.404631 4.026281 6 1106 -FAST f=24 a=3 0.153961 4.026281 6 1106 -FAST f=24 a=4 13.041164 4.065448 8 674 -FAST f=24 a=4 0.155509 4.065448 8 674 -FAST f=24 a=5 12.879412 4.054636 8 674 -FAST f=24 a=5 0.148282 4.054636 8 674 -FAST f=24 a=6 12.773736 4.081376 8 530 -FAST f=24 a=6 0.142563 4.081376 8 530 -FAST f=24 a=7 12.711310 4.059834 8 770 -FAST f=24 a=7 0.149321 4.059834 8 770 -FAST f=24 a=8 12.635459 4.052050 8 1298 -FAST f=24 a=8 0.15095 4.052050 8 1298 -FAST f=24 a=9 12.558104 4.076516 8 722 -FAST f=24 a=9 0.144361 4.076516 8 722 -FAST f=24 a=10 10.661348 4.062137 8 818 -FAST f=24 a=10 0.108232 4.062137 8 818 - - -hg-changelog: -NODICT 0.000017 1.377590 -RANDOM 0.186171 2.097487 -LEGACY 1.670867 2.058907 -COVER 173.561948 2.189685 8 98 -COVER 4.811180 2.189685 8 98 -FAST f=15 a=1 18.685906 2.129682 8 434 -FAST f=15 a=1 0.173376 2.129682 8 434 -FAST f=15 a=2 12.928259 2.131890 8 482 -FAST f=15 a=2 0.102582 2.131890 8 482 -FAST f=15 a=3 11.132343 2.128027 8 386 -FAST f=15 a=3 0.077122 2.128027 8 386 -FAST f=15 a=4 10.120683 2.125797 8 434 -FAST f=15 a=4 0.065175 2.125797 8 434 -FAST f=15 a=5 9.479092 2.127697 8 386 -FAST f=15 a=5 0.057905 2.127697 8 386 -FAST f=15 a=6 9.159523 2.127132 8 1682 -FAST f=15 a=6 0.058604 2.127132 8 1682 -FAST f=15 a=7 8.724003 2.129914 8 434 -FAST f=15 a=7 0.0493 2.129914 8 434 -FAST f=15 a=8 8.595001 2.127137 8 338 -FAST f=15 a=8 0.0474 2.127137 8 338 -FAST f=15 a=9 8.356405 2.125512 8 482 -FAST f=15 a=9 0.046126 2.125512 8 482 -FAST f=15 a=10 8.207111 2.126066 8 338 -FAST f=15 a=10 0.043292 2.126066 8 338 -FAST f=16 a=1 18.464436 2.144040 8 242 -FAST f=16 a=1 0.172156 2.144040 8 242 -FAST f=16 a=2 12.844825 2.148171 8 194 -FAST f=16 a=2 0.099619 2.148171 8 194 -FAST f=16 a=3 11.082568 2.140837 8 290 -FAST f=16 a=3 0.079165 2.140837 8 290 -FAST f=16 a=4 10.066749 2.144405 8 386 -FAST f=16 a=4 0.068411 2.144405 8 386 -FAST f=16 a=5 9.501121 2.140720 8 386 -FAST f=16 a=5 0.061316 2.140720 8 386 -FAST f=16 a=6 9.179332 2.139478 8 386 -FAST f=16 a=6 0.056322 2.139478 8 386 -FAST f=16 a=7 8.849438 2.142412 8 194 -FAST f=16 a=7 0.050493 2.142412 8 194 -FAST f=16 a=8 8.810919 2.143454 8 434 -FAST f=16 a=8 0.051304 2.143454 8 434 -FAST f=16 a=9 8.553900 2.140339 8 194 -FAST f=16 a=9 0.047285 2.140339 8 194 -FAST f=16 a=10 8.398027 2.143130 8 386 -FAST f=16 a=10 0.046386 2.143130 8 386 -FAST f=17 a=1 18.644657 2.157192 8 98 -FAST f=17 a=1 0.173884 2.157192 8 98 -FAST f=17 a=2 13.071242 2.159830 8 146 -FAST f=17 a=2 0.10388 2.159830 8 146 -FAST f=17 a=3 11.332366 2.153654 6 194 -FAST f=17 a=3 0.08983 2.153654 6 194 -FAST f=17 a=4 10.362413 2.156813 8 242 -FAST f=17 a=4 0.070389 2.156813 8 242 -FAST f=17 a=5 9.808159 2.155098 6 338 -FAST f=17 a=5 0.072661 2.155098 6 338 -FAST f=17 a=6 9.451165 2.153845 6 146 -FAST f=17 a=6 0.064959 2.153845 6 146 -FAST f=17 a=7 9.163097 2.155424 6 242 -FAST f=17 a=7 0.064323 2.155424 6 242 -FAST f=17 a=8 9.047276 2.156640 8 242 -FAST f=17 a=8 0.053382 2.156640 8 242 -FAST f=17 a=9 8.807671 2.152396 8 146 -FAST f=17 a=9 0.049617 2.152396 8 146 -FAST f=17 a=10 8.649827 2.152370 8 146 -FAST f=17 a=10 0.047849 2.152370 8 146 -FAST f=18 a=1 18.809502 2.168116 8 98 -FAST f=18 a=1 0.175226 2.168116 8 98 -FAST f=18 a=2 13.756502 2.170870 6 242 -FAST f=18 a=2 0.119507 2.170870 6 242 -FAST f=18 a=3 12.059748 2.163094 6 98 -FAST f=18 a=3 0.093912 2.163094 6 98 -FAST f=18 a=4 11.410294 2.172372 8 98 -FAST f=18 a=4 0.073048 2.172372 8 98 -FAST f=18 a=5 10.560297 2.166388 8 98 -FAST f=18 a=5 0.065136 2.166388 8 98 -FAST f=18 a=6 10.071390 2.162672 8 98 -FAST f=18 a=6 0.059402 2.162672 8 98 -FAST f=18 a=7 10.084214 2.166624 6 194 -FAST f=18 a=7 0.073276 2.166624 6 194 -FAST f=18 a=8 9.953226 2.167454 8 98 -FAST f=18 a=8 0.053659 2.167454 8 98 -FAST f=18 a=9 8.982461 2.161593 6 146 -FAST f=18 a=9 0.05955 2.161593 6 146 -FAST f=18 a=10 8.986092 2.164373 6 242 -FAST f=18 a=10 0.059135 2.164373 6 242 -FAST f=19 a=1 18.908277 2.176021 8 98 -FAST f=19 a=1 0.177316 2.176021 8 98 -FAST f=19 a=2 13.471313 2.176103 8 98 -FAST f=19 a=2 0.106344 2.176103 8 98 -FAST f=19 a=3 11.571406 2.172812 8 98 -FAST f=19 a=3 0.083293 2.172812 8 98 -FAST f=19 a=4 10.632775 2.177770 6 146 -FAST f=19 a=4 0.079864 2.177770 6 146 -FAST f=19 a=5 10.030190 2.175574 6 146 -FAST f=19 a=5 0.07223 2.175574 6 146 -FAST f=19 a=6 9.717818 2.169997 8 98 -FAST f=19 a=6 0.060049 2.169997 8 98 -FAST f=19 a=7 9.397531 2.172770 8 146 -FAST f=19 a=7 0.057188 2.172770 8 146 -FAST f=19 a=8 9.281061 2.175822 8 98 -FAST f=19 a=8 0.053711 2.175822 8 98 -FAST f=19 a=9 9.165242 2.169849 6 146 -FAST f=19 a=9 0.059898 2.169849 6 146 -FAST f=19 a=10 9.048763 2.173394 8 98 -FAST f=19 a=10 0.049757 2.173394 8 98 -FAST f=20 a=1 21.166917 2.183923 6 98 -FAST f=20 a=1 0.205425 2.183923 6 98 -FAST f=20 a=2 15.642753 2.182349 6 98 -FAST f=20 a=2 0.135957 2.182349 6 98 -FAST f=20 a=3 14.053730 2.173544 6 98 -FAST f=20 a=3 0.11266 2.173544 6 98 -FAST f=20 a=4 15.270019 2.183656 8 98 -FAST f=20 a=4 0.107892 2.183656 8 98 -FAST f=20 a=5 15.497927 2.174661 6 98 -FAST f=20 a=5 0.100305 2.174661 6 98 -FAST f=20 a=6 13.973505 2.172391 8 98 -FAST f=20 a=6 0.087565 2.172391 8 98 -FAST f=20 a=7 14.083296 2.172443 8 98 -FAST f=20 a=7 0.078062 2.172443 8 98 -FAST f=20 a=8 12.560048 2.175581 8 98 -FAST f=20 a=8 0.070282 2.175581 8 98 -FAST f=20 a=9 13.078645 2.173975 6 146 -FAST f=20 a=9 0.081041 2.173975 6 146 -FAST f=20 a=10 12.823328 2.177778 8 98 -FAST f=20 a=10 0.074522 2.177778 8 98 -FAST f=21 a=1 29.825370 2.183057 6 98 -FAST f=21 a=1 0.334453 2.183057 6 98 -FAST f=21 a=2 29.476474 2.182752 8 98 -FAST f=21 a=2 0.286602 2.182752 8 98 -FAST f=21 a=3 25.937186 2.175867 8 98 -FAST f=21 a=3 0.17626 2.175867 8 98 -FAST f=21 a=4 20.413865 2.179780 8 98 -FAST f=21 a=4 0.206085 2.179780 8 98 -FAST f=21 a=5 20.541889 2.178328 6 146 -FAST f=21 a=5 0.199157 2.178328 6 146 -FAST f=21 a=6 21.090670 2.174443 6 146 -FAST f=21 a=6 0.190645 2.174443 6 146 -FAST f=21 a=7 20.221569 2.177384 6 146 -FAST f=21 a=7 0.184278 2.177384 6 146 -FAST f=21 a=8 20.322357 2.179456 6 98 -FAST f=21 a=8 0.178458 2.179456 6 98 -FAST f=21 a=9 20.683912 2.174396 6 146 -FAST f=21 a=9 0.190829 2.174396 6 146 -FAST f=21 a=10 20.840865 2.174905 8 98 -FAST f=21 a=10 0.172515 2.174905 8 98 -FAST f=22 a=1 36.822827 2.181612 6 98 -FAST f=22 a=1 0.437389 2.181612 6 98 -FAST f=22 a=2 30.616902 2.183142 8 98 -FAST f=22 a=2 0.324284 2.183142 8 98 -FAST f=22 a=3 28.472482 2.178130 8 98 -FAST f=22 a=3 0.236538 2.178130 8 98 -FAST f=22 a=4 25.847028 2.181878 8 98 -FAST f=22 a=4 0.263744 2.181878 8 98 -FAST f=22 a=5 27.095881 2.180775 8 98 -FAST f=22 a=5 0.24988 2.180775 8 98 -FAST f=22 a=6 25.939172 2.170916 8 98 -FAST f=22 a=6 0.240033 2.170916 8 98 -FAST f=22 a=7 27.064194 2.177849 8 98 -FAST f=22 a=7 0.242383 2.177849 8 98 -FAST f=22 a=8 25.140221 2.178216 8 98 -FAST f=22 a=8 0.237601 2.178216 8 98 -FAST f=22 a=9 25.505283 2.177455 6 146 -FAST f=22 a=9 0.223217 2.177455 6 146 -FAST f=22 a=10 24.529362 2.176705 6 98 -FAST f=22 a=10 0.222876 2.176705 6 98 -FAST f=23 a=1 39.127310 2.183006 6 98 -FAST f=23 a=1 0.417338 2.183006 6 98 -FAST f=23 a=2 32.468161 2.183524 6 98 -FAST f=23 a=2 0.351645 2.183524 6 98 -FAST f=23 a=3 31.577620 2.172604 6 98 -FAST f=23 a=3 0.319659 2.172604 6 98 -FAST f=23 a=4 30.129247 2.183932 6 98 -FAST f=23 a=4 0.307239 2.183932 6 98 -FAST f=23 a=5 29.103376 2.183529 6 146 -FAST f=23 a=5 0.285533 2.183529 6 146 -FAST f=23 a=6 29.776045 2.174367 8 98 -FAST f=23 a=6 0.276846 2.174367 8 98 -FAST f=23 a=7 28.940407 2.178022 6 146 -FAST f=23 a=7 0.274082 2.178022 6 146 -FAST f=23 a=8 29.256009 2.179462 6 98 -FAST f=23 a=8 0.26949 2.179462 6 98 -FAST f=23 a=9 29.347312 2.170407 8 98 -FAST f=23 a=9 0.265034 2.170407 8 98 -FAST f=23 a=10 29.140081 2.171762 8 98 -FAST f=23 a=10 0.259183 2.171762 8 98 -FAST f=24 a=1 44.871179 2.182115 6 98 -FAST f=24 a=1 0.509433 2.182115 6 98 -FAST f=24 a=2 38.694867 2.180549 8 98 -FAST f=24 a=2 0.406695 2.180549 8 98 -FAST f=24 a=3 38.363769 2.172821 8 98 -FAST f=24 a=3 0.359581 2.172821 8 98 -FAST f=24 a=4 36.580797 2.184142 8 98 -FAST f=24 a=4 0.340614 2.184142 8 98 -FAST f=24 a=5 33.125701 2.183301 8 98 -FAST f=24 a=5 0.324874 2.183301 8 98 -FAST f=24 a=6 34.776068 2.173019 6 146 -FAST f=24 a=6 0.340397 2.173019 6 146 -FAST f=24 a=7 34.417625 2.176561 6 146 -FAST f=24 a=7 0.308223 2.176561 6 146 -FAST f=24 a=8 35.470291 2.182161 6 98 -FAST f=24 a=8 0.307724 2.182161 6 98 -FAST f=24 a=9 34.927252 2.172682 6 146 -FAST f=24 a=9 0.300598 2.172682 6 146 -FAST f=24 a=10 33.238355 2.173395 6 98 -FAST f=24 a=10 0.249916 2.173395 6 98 - - -hg-manifest: -NODICT 0.000004 1.866377 -RANDOM 0.696346 2.309436 -LEGACY 7.064527 2.506977 -COVER 876.312865 2.582528 8 434 -COVER 35.684533 2.582528 8 434 -FAST f=15 a=1 76.618201 2.404013 8 1202 -FAST f=15 a=1 0.700722 2.404013 8 1202 -FAST f=15 a=2 49.213058 2.409248 6 1826 -FAST f=15 a=2 0.473393 2.409248 6 1826 -FAST f=15 a=3 41.753197 2.409677 8 1490 -FAST f=15 a=3 0.336848 2.409677 8 1490 -FAST f=15 a=4 38.648295 2.407996 8 1538 -FAST f=15 a=4 0.283952 2.407996 8 1538 -FAST f=15 a=5 36.144936 2.402895 8 1874 -FAST f=15 a=5 0.270128 2.402895 8 1874 -FAST f=15 a=6 35.484675 2.394873 8 1586 -FAST f=15 a=6 0.251637 2.394873 8 1586 -FAST f=15 a=7 34.280599 2.397311 8 1778 -FAST f=15 a=7 0.23984 2.397311 8 1778 -FAST f=15 a=8 32.122572 2.396089 6 1490 -FAST f=15 a=8 0.251508 2.396089 6 1490 -FAST f=15 a=9 29.909842 2.390092 6 1970 -FAST f=15 a=9 0.251233 2.390092 6 1970 -FAST f=15 a=10 30.102938 2.400086 6 1682 -FAST f=15 a=10 0.23688 2.400086 6 1682 -FAST f=16 a=1 67.750401 2.475460 6 1346 -FAST f=16 a=1 0.796035 2.475460 6 1346 -FAST f=16 a=2 52.812027 2.480860 6 1730 -FAST f=16 a=2 0.480384 2.480860 6 1730 -FAST f=16 a=3 44.179259 2.469304 8 1970 -FAST f=16 a=3 0.332657 2.469304 8 1970 -FAST f=16 a=4 37.612728 2.478208 6 1970 -FAST f=16 a=4 0.32498 2.478208 6 1970 -FAST f=16 a=5 35.056222 2.475568 6 1298 -FAST f=16 a=5 0.302824 2.475568 6 1298 -FAST f=16 a=6 34.713012 2.486079 8 1730 -FAST f=16 a=6 0.24755 2.486079 8 1730 -FAST f=16 a=7 33.713687 2.477180 6 1682 -FAST f=16 a=7 0.280358 2.477180 6 1682 -FAST f=16 a=8 31.571412 2.475418 8 1538 -FAST f=16 a=8 0.241241 2.475418 8 1538 -FAST f=16 a=9 31.608069 2.478263 8 1922 -FAST f=16 a=9 0.241764 2.478263 8 1922 -FAST f=16 a=10 31.358002 2.472263 8 1442 -FAST f=16 a=10 0.221661 2.472263 8 1442 -FAST f=17 a=1 66.185775 2.536085 6 1346 -FAST f=17 a=1 0.713549 2.536085 6 1346 -FAST f=17 a=2 50.365000 2.546105 8 1298 -FAST f=17 a=2 0.467846 2.546105 8 1298 -FAST f=17 a=3 42.712843 2.536250 8 1298 -FAST f=17 a=3 0.34047 2.536250 8 1298 -FAST f=17 a=4 39.514227 2.535555 8 1442 -FAST f=17 a=4 0.302989 2.535555 8 1442 -FAST f=17 a=5 35.189292 2.524925 8 1202 -FAST f=17 a=5 0.273451 2.524925 8 1202 -FAST f=17 a=6 35.791683 2.523466 8 1202 -FAST f=17 a=6 0.268261 2.523466 8 1202 -FAST f=17 a=7 37.416136 2.526625 6 1010 -FAST f=17 a=7 0.277558 2.526625 6 1010 -FAST f=17 a=8 37.084707 2.533274 6 1250 -FAST f=17 a=8 0.285104 2.533274 6 1250 -FAST f=17 a=9 34.183814 2.532765 8 1298 -FAST f=17 a=9 0.235133 2.532765 8 1298 -FAST f=17 a=10 31.149235 2.528722 8 1346 -FAST f=17 a=10 0.232679 2.528722 8 1346 -FAST f=18 a=1 72.942176 2.559857 6 386 -FAST f=18 a=1 0.718618 2.559857 6 386 -FAST f=18 a=2 51.690440 2.559572 8 290 -FAST f=18 a=2 0.403978 2.559572 8 290 -FAST f=18 a=3 45.344908 2.561040 8 962 -FAST f=18 a=3 0.357205 2.561040 8 962 -FAST f=18 a=4 39.804522 2.558446 8 1010 -FAST f=18 a=4 0.310526 2.558446 8 1010 -FAST f=18 a=5 38.134888 2.561811 8 626 -FAST f=18 a=5 0.273743 2.561811 8 626 -FAST f=18 a=6 35.091890 2.555518 8 722 -FAST f=18 a=6 0.260135 2.555518 8 722 -FAST f=18 a=7 34.639523 2.562938 8 290 -FAST f=18 a=7 0.234294 2.562938 8 290 -FAST f=18 a=8 36.076431 2.563567 8 1586 -FAST f=18 a=8 0.274075 2.563567 8 1586 -FAST f=18 a=9 36.376433 2.560950 8 722 -FAST f=18 a=9 0.240106 2.560950 8 722 -FAST f=18 a=10 32.624790 2.559340 8 578 -FAST f=18 a=10 0.234704 2.559340 8 578 -FAST f=19 a=1 70.513761 2.572441 8 194 -FAST f=19 a=1 0.726112 2.572441 8 194 -FAST f=19 a=2 59.263032 2.574560 8 482 -FAST f=19 a=2 0.451554 2.574560 8 482 -FAST f=19 a=3 51.509594 2.571546 6 194 -FAST f=19 a=3 0.393014 2.571546 6 194 -FAST f=19 a=4 55.393906 2.573386 8 482 -FAST f=19 a=4 0.38819 2.573386 8 482 -FAST f=19 a=5 43.201736 2.567589 8 674 -FAST f=19 a=5 0.292155 2.567589 8 674 -FAST f=19 a=6 42.911687 2.572666 6 434 -FAST f=19 a=6 0.303988 2.572666 6 434 -FAST f=19 a=7 44.687591 2.573613 6 290 -FAST f=19 a=7 0.308721 2.573613 6 290 -FAST f=19 a=8 37.372868 2.571039 6 194 -FAST f=19 a=8 0.287137 2.571039 6 194 -FAST f=19 a=9 36.074230 2.566473 6 482 -FAST f=19 a=9 0.280721 2.566473 6 482 -FAST f=19 a=10 33.731720 2.570306 8 194 -FAST f=19 a=10 0.224073 2.570306 8 194 -FAST f=20 a=1 79.670634 2.581146 6 290 -FAST f=20 a=1 0.899986 2.581146 6 290 -FAST f=20 a=2 58.827141 2.579782 8 386 -FAST f=20 a=2 0.602288 2.579782 8 386 -FAST f=20 a=3 51.289004 2.579627 8 722 -FAST f=20 a=3 0.446091 2.579627 8 722 -FAST f=20 a=4 47.711068 2.581508 8 722 -FAST f=20 a=4 0.473007 2.581508 8 722 -FAST f=20 a=5 47.402929 2.578062 6 434 -FAST f=20 a=5 0.497131 2.578062 6 434 -FAST f=20 a=6 54.797102 2.577365 8 482 -FAST f=20 a=6 0.515061 2.577365 8 482 -FAST f=20 a=7 51.370877 2.583050 8 386 -FAST f=20 a=7 0.402878 2.583050 8 386 -FAST f=20 a=8 51.437931 2.574875 6 242 -FAST f=20 a=8 0.453094 2.574875 6 242 -FAST f=20 a=9 44.105456 2.576700 6 242 -FAST f=20 a=9 0.456633 2.576700 6 242 -FAST f=20 a=10 44.447580 2.578305 8 338 -FAST f=20 a=10 0.409121 2.578305 8 338 -FAST f=21 a=1 113.031686 2.582449 6 242 -FAST f=21 a=1 1.456971 2.582449 6 242 -FAST f=21 a=2 97.700932 2.582124 8 194 -FAST f=21 a=2 1.072078 2.582124 8 194 -FAST f=21 a=3 96.563648 2.585479 8 434 -FAST f=21 a=3 0.949528 2.585479 8 434 -FAST f=21 a=4 90.597813 2.582366 6 386 -FAST f=21 a=4 0.76944 2.582366 6 386 -FAST f=21 a=5 86.815980 2.579043 8 434 -FAST f=21 a=5 0.858167 2.579043 8 434 -FAST f=21 a=6 91.235820 2.578378 8 530 -FAST f=21 a=6 0.684274 2.578378 8 530 -FAST f=21 a=7 84.392788 2.581243 8 386 -FAST f=21 a=7 0.814386 2.581243 8 386 -FAST f=21 a=8 82.052310 2.582547 8 338 -FAST f=21 a=8 0.822633 2.582547 8 338 -FAST f=21 a=9 74.696074 2.579319 8 194 -FAST f=21 a=9 0.811028 2.579319 8 194 -FAST f=21 a=10 76.211170 2.578766 8 290 -FAST f=21 a=10 0.809715 2.578766 8 290 -FAST f=22 a=1 138.976871 2.580478 8 194 -FAST f=22 a=1 1.748932 2.580478 8 194 -FAST f=22 a=2 120.164097 2.583633 8 386 -FAST f=22 a=2 1.333239 2.583633 8 386 -FAST f=22 a=3 111.986474 2.582566 6 194 -FAST f=22 a=3 1.305734 2.582566 6 194 -FAST f=22 a=4 108.548148 2.583068 6 194 -FAST f=22 a=4 1.314026 2.583068 6 194 -FAST f=22 a=5 103.173017 2.583495 6 290 -FAST f=22 a=5 1.228664 2.583495 6 290 -FAST f=22 a=6 108.421262 2.582349 8 530 -FAST f=22 a=6 1.076773 2.582349 8 530 -FAST f=22 a=7 103.284127 2.581022 8 386 -FAST f=22 a=7 1.112117 2.581022 8 386 -FAST f=22 a=8 96.330279 2.581073 8 290 -FAST f=22 a=8 1.109303 2.581073 8 290 -FAST f=22 a=9 97.651348 2.580075 6 194 -FAST f=22 a=9 0.933032 2.580075 6 194 -FAST f=22 a=10 101.660621 2.584886 8 194 -FAST f=22 a=10 0.796823 2.584886 8 194 -FAST f=23 a=1 159.322978 2.581474 6 242 -FAST f=23 a=1 2.015878 2.581474 6 242 -FAST f=23 a=2 134.331775 2.581619 8 194 -FAST f=23 a=2 1.545845 2.581619 8 194 -FAST f=23 a=3 127.724552 2.579888 6 338 -FAST f=23 a=3 1.444496 2.579888 6 338 -FAST f=23 a=4 126.077675 2.578137 6 242 -FAST f=23 a=4 1.364394 2.578137 6 242 -FAST f=23 a=5 124.914027 2.580843 8 338 -FAST f=23 a=5 1.116059 2.580843 8 338 -FAST f=23 a=6 122.874153 2.577637 6 338 -FAST f=23 a=6 1.164584 2.577637 6 338 -FAST f=23 a=7 123.099257 2.582715 6 386 -FAST f=23 a=7 1.354042 2.582715 6 386 -FAST f=23 a=8 122.026753 2.577681 8 194 -FAST f=23 a=8 1.210966 2.577681 8 194 -FAST f=23 a=9 121.164312 2.584599 6 290 -FAST f=23 a=9 1.174859 2.584599 6 290 -FAST f=23 a=10 117.462222 2.580358 8 194 -FAST f=23 a=10 1.075258 2.580358 8 194 -FAST f=24 a=1 169.539659 2.581642 6 194 -FAST f=24 a=1 1.916804 2.581642 6 194 -FAST f=24 a=2 160.539270 2.580421 6 290 -FAST f=24 a=2 1.71087 2.580421 6 290 -FAST f=24 a=3 155.455874 2.580449 6 242 -FAST f=24 a=3 1.60307 2.580449 6 242 -FAST f=24 a=4 147.630320 2.582953 6 338 -FAST f=24 a=4 1.396364 2.582953 6 338 -FAST f=24 a=5 133.767428 2.580589 6 290 -FAST f=24 a=5 1.19933 2.580589 6 290 -FAST f=24 a=6 146.437535 2.579453 8 194 -FAST f=24 a=6 1.385405 2.579453 8 194 -FAST f=24 a=7 147.227507 2.584155 8 386 -FAST f=24 a=7 1.48942 2.584155 8 386 -FAST f=24 a=8 138.005773 2.584115 8 194 -FAST f=24 a=8 1.352 2.584115 8 194 -FAST f=24 a=9 141.442625 2.582902 8 290 -FAST f=24 a=9 1.39647 2.582902 8 290 -FAST f=24 a=10 142.157446 2.582701 8 434 -FAST f=24 a=10 1.498889 2.582701 8 434 diff --git a/contrib/experimental_dict_builders/benchmarkDictBuilder/benchmark.c b/contrib/experimental_dict_builders/benchmarkDictBuilder/benchmark.c deleted file mode 100644 index cd943797bde..00000000000 --- a/contrib/experimental_dict_builders/benchmarkDictBuilder/benchmark.c +++ /dev/null @@ -1,442 +0,0 @@ -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* strcmp, strlen */ -#include /* errno */ -#include -#include -#include "random.h" -#include "dictBuilder.h" -#include "zstd_internal.h" /* includes zstd.h */ -#include "io.h" -#include "util.h" -#include "zdict.h" - - - -/*-************************************* -* Console display -***************************************/ -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } - -static const U64 g_refreshRate = SEC_TO_MICRO / 6; -static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; - -#define DISPLAYUPDATE(l, ...) { if (displayLevel>=l) { \ - if ((UTIL_clockSpanMicro(g_displayClock) > g_refreshRate) || (displayLevel>=4)) \ - { g_displayClock = UTIL_getTime(); DISPLAY(__VA_ARGS__); \ - if (displayLevel>=4) fflush(stderr); } } } - - -/*-************************************* -* Exceptions -***************************************/ -#ifndef DEBUG -# define DEBUG 0 -#endif -#define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__); -#define EXM_THROW(error, ...) \ -{ \ - DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAY("Error %i : ", error); \ - DISPLAY(__VA_ARGS__); \ - DISPLAY("\n"); \ - exit(error); \ -} - - -/*-************************************* -* Constants -***************************************/ -static const unsigned g_defaultMaxDictSize = 110 KB; -#define DEFAULT_CLEVEL 3 -#define DEFAULT_DISPLAYLEVEL 2 - - -/*-************************************* -* Struct -***************************************/ -typedef struct { - const void* dictBuffer; - size_t dictSize; -} dictInfo; - - -/*-************************************* -* Dictionary related operations -***************************************/ -/** createDictFromFiles() : - * Based on type of param given, train dictionary using the corresponding algorithm - * @return dictInfo containing dictionary buffer and dictionary size - */ -dictInfo* createDictFromFiles(sampleInfo *info, unsigned maxDictSize, - ZDICT_random_params_t *randomParams, ZDICT_cover_params_t *coverParams, - ZDICT_legacy_params_t *legacyParams, ZDICT_fastCover_params_t *fastParams) { - unsigned const displayLevel = randomParams ? randomParams->zParams.notificationLevel : - coverParams ? coverParams->zParams.notificationLevel : - legacyParams ? legacyParams->zParams.notificationLevel : - fastParams ? fastParams->zParams.notificationLevel : - DEFAULT_DISPLAYLEVEL; /* no dict */ - void* const dictBuffer = malloc(maxDictSize); - - dictInfo* dInfo = NULL; - - /* Checks */ - if (!dictBuffer) - EXM_THROW(12, "not enough memory for trainFromFiles"); /* should not happen */ - - { size_t dictSize; - if(randomParams) { - dictSize = ZDICT_trainFromBuffer_random(dictBuffer, maxDictSize, info->srcBuffer, - info->samplesSizes, info->nbSamples, *randomParams); - }else if(coverParams) { - /* Run the optimize version if either k or d is not provided */ - if (!coverParams->d || !coverParams->k){ - dictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, maxDictSize, info->srcBuffer, - info->samplesSizes, info->nbSamples, coverParams); - } else { - dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, info->srcBuffer, - info->samplesSizes, info->nbSamples, *coverParams); - } - } else if(legacyParams) { - dictSize = ZDICT_trainFromBuffer_legacy(dictBuffer, maxDictSize, info->srcBuffer, - info->samplesSizes, info->nbSamples, *legacyParams); - } else if(fastParams) { - /* Run the optimize version if either k or d is not provided */ - if (!fastParams->d || !fastParams->k) { - dictSize = ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, maxDictSize, info->srcBuffer, - info->samplesSizes, info->nbSamples, fastParams); - } else { - dictSize = ZDICT_trainFromBuffer_fastCover(dictBuffer, maxDictSize, info->srcBuffer, - info->samplesSizes, info->nbSamples, *fastParams); - } - } else { - dictSize = 0; - } - if (ZDICT_isError(dictSize)) { - DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */ - free(dictBuffer); - return dInfo; - } - dInfo = (dictInfo *)malloc(sizeof(dictInfo)); - dInfo->dictBuffer = dictBuffer; - dInfo->dictSize = dictSize; - } - return dInfo; -} - - -/** compressWithDict() : - * Compress samples from sample buffer given dictionary stored on dictionary buffer and compression level - * @return compression ratio - */ -double compressWithDict(sampleInfo *srcInfo, dictInfo* dInfo, int compressionLevel, int displayLevel) { - /* Local variables */ - size_t totalCompressedSize = 0; - size_t totalOriginalSize = 0; - const unsigned hasDict = dInfo->dictSize > 0 ? 1 : 0; - double cRatio; - size_t dstCapacity; - int i; - - /* Pointers */ - ZSTD_CDict *cdict = NULL; - ZSTD_CCtx* cctx = NULL; - size_t *offsets = NULL; - void* dst = NULL; - - /* Allocate dst with enough space to compress the maximum sized sample */ - { - size_t maxSampleSize = 0; - for (i = 0; i < srcInfo->nbSamples; i++) { - maxSampleSize = MAX(srcInfo->samplesSizes[i], maxSampleSize); - } - dstCapacity = ZSTD_compressBound(maxSampleSize); - dst = malloc(dstCapacity); - } - - /* Calculate offset for each sample */ - offsets = (size_t *)malloc((srcInfo->nbSamples + 1) * sizeof(size_t)); - offsets[0] = 0; - for (i = 1; i <= srcInfo->nbSamples; i++) { - offsets[i] = offsets[i - 1] + srcInfo->samplesSizes[i - 1]; - } - - /* Create the cctx */ - cctx = ZSTD_createCCtx(); - if(!cctx || !dst) { - cRatio = -1; - goto _cleanup; - } - - /* Create CDict if there's a dictionary stored on buffer */ - if (hasDict) { - cdict = ZSTD_createCDict(dInfo->dictBuffer, dInfo->dictSize, compressionLevel); - if(!cdict) { - cRatio = -1; - goto _cleanup; - } - } - - /* Compress each sample and sum their sizes*/ - const BYTE *const samples = (const BYTE *)srcInfo->srcBuffer; - for (i = 0; i < srcInfo->nbSamples; i++) { - size_t compressedSize; - if(hasDict) { - compressedSize = ZSTD_compress_usingCDict(cctx, dst, dstCapacity, samples + offsets[i], srcInfo->samplesSizes[i], cdict); - } else { - compressedSize = ZSTD_compressCCtx(cctx, dst, dstCapacity,samples + offsets[i], srcInfo->samplesSizes[i], compressionLevel); - } - if (ZSTD_isError(compressedSize)) { - cRatio = -1; - goto _cleanup; - } - totalCompressedSize += compressedSize; - } - - /* Sum original sizes */ - for (i = 0; inbSamples; i++) { - totalOriginalSize += srcInfo->samplesSizes[i]; - } - - /* Calculate compression ratio */ - DISPLAYLEVEL(2, "original size is %lu\n", totalOriginalSize); - DISPLAYLEVEL(2, "compressed size is %lu\n", totalCompressedSize); - cRatio = (double)totalOriginalSize/(double)totalCompressedSize; - -_cleanup: - free(dst); - free(offsets); - ZSTD_freeCCtx(cctx); - ZSTD_freeCDict(cdict); - return cRatio; -} - - -/** FreeDictInfo() : - * Free memory allocated for dictInfo - */ -void freeDictInfo(dictInfo* info) { - if (!info) return; - if (info->dictBuffer) free((void*)(info->dictBuffer)); - free(info); -} - - - -/*-******************************************************** - * Benchmarking functions -**********************************************************/ -/** benchmarkDictBuilder() : - * Measure how long a dictionary builder takes and compression ratio with the dictionary built - * @return 0 if benchmark successfully, 1 otherwise - */ -int benchmarkDictBuilder(sampleInfo *srcInfo, unsigned maxDictSize, ZDICT_random_params_t *randomParam, - ZDICT_cover_params_t *coverParam, ZDICT_legacy_params_t *legacyParam, - ZDICT_fastCover_params_t *fastParam) { - /* Local variables */ - const unsigned displayLevel = randomParam ? randomParam->zParams.notificationLevel : - coverParam ? coverParam->zParams.notificationLevel : - legacyParam ? legacyParam->zParams.notificationLevel : - fastParam ? fastParam->zParams.notificationLevel: - DEFAULT_DISPLAYLEVEL; /* no dict */ - const char* name = randomParam ? "RANDOM" : - coverParam ? "COVER" : - legacyParam ? "LEGACY" : - fastParam ? "FAST": - "NODICT"; /* no dict */ - const unsigned cLevel = randomParam ? randomParam->zParams.compressionLevel : - coverParam ? coverParam->zParams.compressionLevel : - legacyParam ? legacyParam->zParams.compressionLevel : - fastParam ? fastParam->zParams.compressionLevel: - DEFAULT_CLEVEL; /* no dict */ - int result = 0; - - /* Calculate speed */ - const UTIL_time_t begin = UTIL_getTime(); - dictInfo* dInfo = createDictFromFiles(srcInfo, maxDictSize, randomParam, coverParam, legacyParam, fastParam); - const U64 timeMicro = UTIL_clockSpanMicro(begin); - const double timeSec = timeMicro / (double)SEC_TO_MICRO; - if (!dInfo) { - DISPLAYLEVEL(1, "%s does not train successfully\n", name); - result = 1; - goto _cleanup; - } - DISPLAYLEVEL(1, "%s took %f seconds to execute \n", name, timeSec); - - /* Calculate compression ratio */ - const double cRatio = compressWithDict(srcInfo, dInfo, cLevel, displayLevel); - if (cRatio < 0) { - DISPLAYLEVEL(1, "Compressing with %s dictionary does not work\n", name); - result = 1; - goto _cleanup; - - } - DISPLAYLEVEL(1, "Compression ratio with %s dictionary is %f\n", name, cRatio); - -_cleanup: - freeDictInfo(dInfo); - return result; -} - - - -int main(int argCount, const char* argv[]) -{ - const int displayLevel = DEFAULT_DISPLAYLEVEL; - const char* programName = argv[0]; - int result = 0; - - /* Initialize arguments to default values */ - unsigned k = 200; - unsigned d = 8; - unsigned f; - unsigned accel; - unsigned i; - const unsigned cLevel = DEFAULT_CLEVEL; - const unsigned dictID = 0; - const unsigned maxDictSize = g_defaultMaxDictSize; - - /* Initialize table to store input files */ - const char** filenameTable = (const char**)malloc(argCount * sizeof(const char*)); - unsigned filenameIdx = 0; - - char* fileNamesBuf = NULL; - unsigned fileNamesNb = filenameIdx; - const int followLinks = 0; - const char** extendedFileList = NULL; - - /* Parse arguments */ - for (i = 1; i < argCount; i++) { - const char* argument = argv[i]; - if (longCommandWArg(&argument, "in=")) { - filenameTable[filenameIdx] = argument; - filenameIdx++; - continue; - } - DISPLAYLEVEL(1, "benchmark: Incorrect parameters\n"); - return 1; - } - - /* Get the list of all files recursively (because followLinks==0)*/ - extendedFileList = UTIL_createFileList(filenameTable, filenameIdx, &fileNamesBuf, - &fileNamesNb, followLinks); - if (extendedFileList) { - unsigned u; - for (u=0; u /* fprintf */ -#include /* malloc, free, qsort */ -#include /* memset */ -#include /* clock */ -#include "mem.h" /* read */ -#include "pool.h" -#include "threading.h" -#include "fastCover.h" -#include "zstd_internal.h" /* includes zstd.h */ -#include "zdict.h" - - -/*-************************************* -* Constants -***************************************/ -#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) -#define FASTCOVER_MAX_F 32 -#define DEFAULT_SPLITPOINT 1.0 - -/*-************************************* -* Console display -***************************************/ -static int g_displayLevel = 2; -#define DISPLAY(...) \ - { \ - fprintf(stderr, __VA_ARGS__); \ - fflush(stderr); \ - } -#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ - if (displayLevel >= l) { \ - DISPLAY(__VA_ARGS__); \ - } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ -#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) - -#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ - if (displayLevel >= l) { \ - if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ - g_time = clock(); \ - DISPLAY(__VA_ARGS__); \ - } \ - } -#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) -static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; -static clock_t g_time = 0; - - -/*-************************************* -* Hash Functions -***************************************/ -static const U64 prime6bytes = 227718039650203ULL; -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } -static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } - -static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } -static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } - - -/** - * Hash the d-byte value pointed to by p and mod 2^f - */ -static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) { - if (d == 6) { - return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1); - } - return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1); -} - - -/*-************************************* -* Context -***************************************/ -typedef struct { - const BYTE *samples; - size_t *offsets; - const size_t *samplesSizes; - size_t nbSamples; - size_t nbTrainSamples; - size_t nbTestSamples; - size_t nbDmers; - U32 *freqs; - U16 *segmentFreqs; - unsigned d; -} FASTCOVER_ctx_t; - - -/*-************************************* -* Helper functions -***************************************/ -/** - * Returns the sum of the sample sizes. - */ -static size_t FASTCOVER_sum(const size_t *samplesSizes, unsigned nbSamples) { - size_t sum = 0; - unsigned i; - for (i = 0; i < nbSamples; ++i) { - sum += samplesSizes[i]; - } - return sum; -} - - -/*-************************************* -* fast functions -***************************************/ -/** - * A segment is a range in the source as well as the score of the segment. - */ -typedef struct { - U32 begin; - U32 end; - U32 score; -} FASTCOVER_segment_t; - - -/** - * Selects the best segment in an epoch. - * Segments of are scored according to the function: - * - * Let F(d) be the frequency of all dmers with hash value d. - * Let S_i be hash value of the dmer at position i of segment S which has length k. - * - * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) - * - * Once the dmer with hash value d is in the dictionary we set F(d) = F(d)/2. - */ -static FASTCOVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx, - U32 *freqs, U32 begin,U32 end, - ZDICT_fastCover_params_t parameters) { - /* Constants */ - const U32 k = parameters.k; - const U32 d = parameters.d; - const U32 dmersInK = k - d + 1; - /* Try each segment (activeSegment) and save the best (bestSegment) */ - FASTCOVER_segment_t bestSegment = {0, 0, 0}; - FASTCOVER_segment_t activeSegment; - /* Reset the activeDmers in the segment */ - /* The activeSegment starts at the beginning of the epoch. */ - activeSegment.begin = begin; - activeSegment.end = begin; - activeSegment.score = 0; - { - /* Slide the activeSegment through the whole epoch. - * Save the best segment in bestSegment. - */ - while (activeSegment.end < end) { - /* Get hash value of current dmer */ - const size_t index = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, parameters.f, ctx->d); - /* Add frequency of this index to score if this is the first occurrence of index in active segment */ - if (ctx->segmentFreqs[index] == 0) { - activeSegment.score += freqs[index]; - } - ctx->segmentFreqs[index] += 1; - /* Increment end of segment */ - activeSegment.end += 1; - /* If the window is now too large, drop the first position */ - if (activeSegment.end - activeSegment.begin == dmersInK + 1) { - /* Get hash value of the dmer to be eliminated from active segment */ - const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, parameters.f, ctx->d); - ctx->segmentFreqs[delIndex] -= 1; - /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */ - if (ctx->segmentFreqs[delIndex] == 0) { - activeSegment.score -= freqs[delIndex]; - } - /* Increment start of segment */ - activeSegment.begin += 1; - } - /* If this segment is the best so far save it */ - if (activeSegment.score > bestSegment.score) { - bestSegment = activeSegment; - } - } - /* Zero out rest of segmentFreqs array */ - while (activeSegment.begin < end) { - const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, parameters.f, ctx->d); - ctx->segmentFreqs[delIndex] -= 1; - activeSegment.begin += 1; - } - } - { - /* Trim off the zero frequency head and tail from the segment. */ - U32 newBegin = bestSegment.end; - U32 newEnd = bestSegment.begin; - U32 pos; - for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { - const size_t index = FASTCOVER_hashPtrToIndex(ctx->samples + pos, parameters.f, ctx->d); - U32 freq = freqs[index]; - if (freq != 0) { - newBegin = MIN(newBegin, pos); - newEnd = pos + 1; - } - } - bestSegment.begin = newBegin; - bestSegment.end = newEnd; - } - { - /* Zero the frequency of hash value of each dmer covered by the chosen segment. */ - U32 pos; - for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { - const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, parameters.f, ctx->d); - freqs[i] = 0; - } - } - return bestSegment; -} - -/** - * Check the validity of the parameters. - * Returns non-zero if the parameters are valid and 0 otherwise. - */ -static int FASTCOVER_checkParameters(ZDICT_fastCover_params_t parameters, - size_t maxDictSize) { - /* k, d, and f are required parameters */ - if (parameters.d == 0 || parameters.k == 0 || parameters.f == 0) { - return 0; - } - /* d has to be 6 or 8 */ - if (parameters.d != 6 && parameters.d != 8) { - return 0; - } - /* 0 < f <= FASTCOVER_MAX_F */ - if (parameters.f > FASTCOVER_MAX_F) { - return 0; - } - /* k <= maxDictSize */ - if (parameters.k > maxDictSize) { - return 0; - } - /* d <= k */ - if (parameters.d > parameters.k) { - return 0; - } - /* 0 < splitPoint <= 1 */ - if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) { - return 0; - } - return 1; -} - - -/** - * Clean up a context initialized with `FASTCOVER_ctx_init()`. - */ -static void FASTCOVER_ctx_destroy(FASTCOVER_ctx_t *ctx) { - if (!ctx) { - return; - } - if (ctx->segmentFreqs) { - free(ctx->segmentFreqs); - ctx->segmentFreqs = NULL; - } - if (ctx->freqs) { - free(ctx->freqs); - ctx->freqs = NULL; - } - if (ctx->offsets) { - free(ctx->offsets); - ctx->offsets = NULL; - } -} - -/** - * Calculate for frequency of hash value of each dmer in ctx->samples - */ -static void FASTCOVER_computeFrequency(U32 *freqs, unsigned f, FASTCOVER_ctx_t *ctx){ - size_t start; /* start of current dmer */ - for (unsigned i = 0; i < ctx->nbTrainSamples; i++) { - size_t currSampleStart = ctx->offsets[i]; - size_t currSampleEnd = ctx->offsets[i+1]; - start = currSampleStart; - while (start + ctx->d <= currSampleEnd) { - const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, ctx->d); - freqs[dmerIndex]++; - start++; - } - } -} - -/** - * Prepare a context for dictionary building. - * The context is only dependent on the parameter `d` and can used multiple - * times. - * Returns 1 on success or zero on error. - * The context must be destroyed with `FASTCOVER_ctx_destroy()`. - */ -static int FASTCOVER_ctx_init(FASTCOVER_ctx_t *ctx, const void *samplesBuffer, - const size_t *samplesSizes, unsigned nbSamples, - unsigned d, double splitPoint, unsigned f) { - const BYTE *const samples = (const BYTE *)samplesBuffer; - const size_t totalSamplesSize = FASTCOVER_sum(samplesSizes, nbSamples); - /* Split samples into testing and training sets */ - const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; - const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; - const size_t trainingSamplesSize = splitPoint < 1.0 ? FASTCOVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; - const size_t testSamplesSize = splitPoint < 1.0 ? FASTCOVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; - /* Checks */ - if (totalSamplesSize < MAX(d, sizeof(U64)) || - totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) { - DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", - (U32)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20)); - return 0; - } - /* Check if there are at least 5 training samples */ - if (nbTrainSamples < 5) { - DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples); - return 0; - } - /* Check if there's testing sample */ - if (nbTestSamples < 1) { - DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples); - return 0; - } - /* Zero the context */ - memset(ctx, 0, sizeof(*ctx)); - DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, - (U32)trainingSamplesSize); - DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, - (U32)testSamplesSize); - - ctx->samples = samples; - ctx->samplesSizes = samplesSizes; - ctx->nbSamples = nbSamples; - ctx->nbTrainSamples = nbTrainSamples; - ctx->nbTestSamples = nbTestSamples; - ctx->nbDmers = trainingSamplesSize - d + 1; - ctx->d = d; - - /* The offsets of each file */ - ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t)); - if (!ctx->offsets) { - DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n"); - FASTCOVER_ctx_destroy(ctx); - return 0; - } - - /* Fill offsets from the samplesSizes */ - { - U32 i; - ctx->offsets[0] = 0; - for (i = 1; i <= nbSamples; ++i) { - ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; - } - } - - /* Initialize frequency array of size 2^f */ - ctx->freqs = (U32 *)calloc((1 << f), sizeof(U32)); - ctx->segmentFreqs = (U16 *)calloc((1 << f), sizeof(U16)); - DISPLAYLEVEL(2, "Computing frequencies\n"); - FASTCOVER_computeFrequency(ctx->freqs, f, ctx); - - return 1; -} - - -/** - * Given the prepared context build the dictionary. - */ -static size_t FASTCOVER_buildDictionary(const FASTCOVER_ctx_t *ctx, U32 *freqs, - void *dictBuffer, - size_t dictBufferCapacity, - ZDICT_fastCover_params_t parameters){ - BYTE *const dict = (BYTE *)dictBuffer; - size_t tail = dictBufferCapacity; - /* Divide the data up into epochs of equal size. - * We will select at least one segment from each epoch. - */ - const U32 epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k)); - const U32 epochSize = (U32)(ctx->nbDmers / epochs); - size_t epoch; - DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs, - epochSize); - /* Loop through the epochs until there are no more segments or the dictionary - * is full. - */ - for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) { - const U32 epochBegin = (U32)(epoch * epochSize); - const U32 epochEnd = epochBegin + epochSize; - size_t segmentSize; - /* Select a segment */ - FASTCOVER_segment_t segment = FASTCOVER_selectSegment( - ctx, freqs, epochBegin, epochEnd, parameters); - - /* If the segment covers no dmers, then we are out of content */ - if (segment.score == 0) { - break; - } - - /* Trim the segment if necessary and if it is too small then we are done */ - segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); - if (segmentSize < parameters.d) { - break; - } - - /* We fill the dictionary from the back to allow the best segments to be - * referenced with the smallest offsets. - */ - tail -= segmentSize; - memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); - DISPLAYUPDATE( - 2, "\r%u%% ", - (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); - } - DISPLAYLEVEL(2, "\r%79s\r", ""); - return tail; -} - - -/** - * FASTCOVER_best_t is used for two purposes: - * 1. Synchronizing threads. - * 2. Saving the best parameters and dictionary. - * - * All of the methods except FASTCOVER_best_init() are thread safe if zstd is - * compiled with multithreaded support. - */ -typedef struct fast_best_s { - ZSTD_pthread_mutex_t mutex; - ZSTD_pthread_cond_t cond; - size_t liveJobs; - void *dict; - size_t dictSize; - ZDICT_fastCover_params_t parameters; - size_t compressedSize; -} FASTCOVER_best_t; - -/** - * Initialize the `FASTCOVER_best_t`. - */ -static void FASTCOVER_best_init(FASTCOVER_best_t *best) { - if (best==NULL) return; /* compatible with init on NULL */ - (void)ZSTD_pthread_mutex_init(&best->mutex, NULL); - (void)ZSTD_pthread_cond_init(&best->cond, NULL); - best->liveJobs = 0; - best->dict = NULL; - best->dictSize = 0; - best->compressedSize = (size_t)-1; - memset(&best->parameters, 0, sizeof(best->parameters)); -} - -/** - * Wait until liveJobs == 0. - */ -static void FASTCOVER_best_wait(FASTCOVER_best_t *best) { - if (!best) { - return; - } - ZSTD_pthread_mutex_lock(&best->mutex); - while (best->liveJobs != 0) { - ZSTD_pthread_cond_wait(&best->cond, &best->mutex); - } - ZSTD_pthread_mutex_unlock(&best->mutex); -} - -/** - * Call FASTCOVER_best_wait() and then destroy the FASTCOVER_best_t. - */ -static void FASTCOVER_best_destroy(FASTCOVER_best_t *best) { - if (!best) { - return; - } - FASTCOVER_best_wait(best); - if (best->dict) { - free(best->dict); - } - ZSTD_pthread_mutex_destroy(&best->mutex); - ZSTD_pthread_cond_destroy(&best->cond); -} - -/** - * Called when a thread is about to be launched. - * Increments liveJobs. - */ -static void FASTCOVER_best_start(FASTCOVER_best_t *best) { - if (!best) { - return; - } - ZSTD_pthread_mutex_lock(&best->mutex); - ++best->liveJobs; - ZSTD_pthread_mutex_unlock(&best->mutex); -} - -/** - * Called when a thread finishes executing, both on error or success. - * Decrements liveJobs and signals any waiting threads if liveJobs == 0. - * If this dictionary is the best so far save it and its parameters. - */ -static void FASTCOVER_best_finish(FASTCOVER_best_t *best, size_t compressedSize, - ZDICT_fastCover_params_t parameters, void *dict, - size_t dictSize) { - if (!best) { - return; - } - { - size_t liveJobs; - ZSTD_pthread_mutex_lock(&best->mutex); - --best->liveJobs; - liveJobs = best->liveJobs; - /* If the new dictionary is better */ - if (compressedSize < best->compressedSize) { - /* Allocate space if necessary */ - if (!best->dict || best->dictSize < dictSize) { - if (best->dict) { - free(best->dict); - } - best->dict = malloc(dictSize); - if (!best->dict) { - best->compressedSize = ERROR(GENERIC); - best->dictSize = 0; - return; - } - } - /* Save the dictionary, parameters, and size */ - memcpy(best->dict, dict, dictSize); - best->dictSize = dictSize; - best->parameters = parameters; - best->compressedSize = compressedSize; - } - ZSTD_pthread_mutex_unlock(&best->mutex); - if (liveJobs == 0) { - ZSTD_pthread_cond_broadcast(&best->cond); - } - } -} - -/** - * Parameters for FASTCOVER_tryParameters(). - */ -typedef struct FASTCOVER_tryParameters_data_s { - const FASTCOVER_ctx_t *ctx; - FASTCOVER_best_t *best; - size_t dictBufferCapacity; - ZDICT_fastCover_params_t parameters; -} FASTCOVER_tryParameters_data_t; - -/** - * Tries a set of parameters and updates the FASTCOVER_best_t with the results. - * This function is thread safe if zstd is compiled with multithreaded support. - * It takes its parameters as an *OWNING* opaque pointer to support threading. - */ -static void FASTCOVER_tryParameters(void *opaque) { - /* Save parameters as local variables */ - FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque; - const FASTCOVER_ctx_t *const ctx = data->ctx; - const ZDICT_fastCover_params_t parameters = data->parameters; - size_t dictBufferCapacity = data->dictBufferCapacity; - size_t totalCompressedSize = ERROR(GENERIC); - /* Allocate space for hash table, dict, and freqs */ - BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); - U32 *freqs = (U32*) malloc((1 << parameters.f) * sizeof(U32)); - if (!dict || !freqs) { - DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); - goto _cleanup; - } - /* Copy the frequencies because we need to modify them */ - memcpy(freqs, ctx->freqs, (1 << parameters.f) * sizeof(U32)); - /* Build the dictionary */ - { - const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, - dictBufferCapacity, parameters); - - dictBufferCapacity = ZDICT_finalizeDictionary( - dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, - ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, - parameters.zParams); - if (ZDICT_isError(dictBufferCapacity)) { - DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); - goto _cleanup; - } - } - /* Check total compressed size */ - { - /* Pointers */ - ZSTD_CCtx *cctx; - ZSTD_CDict *cdict; - void *dst; - /* Local variables */ - size_t dstCapacity; - size_t i; - /* Allocate dst with enough space to compress the maximum sized sample */ - { - size_t maxSampleSize = 0; - i = parameters.splitPoint < 1.0 ? ctx->nbTrainSamples : 0; - for (; i < ctx->nbSamples; ++i) { - maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize); - } - dstCapacity = ZSTD_compressBound(maxSampleSize); - dst = malloc(dstCapacity); - } - /* Create the cctx and cdict */ - cctx = ZSTD_createCCtx(); - cdict = ZSTD_createCDict(dict, dictBufferCapacity, - parameters.zParams.compressionLevel); - if (!dst || !cctx || !cdict) { - goto _compressCleanup; - } - /* Compress each sample and sum their sizes (or error) */ - totalCompressedSize = dictBufferCapacity; - i = parameters.splitPoint < 1.0 ? ctx->nbTrainSamples : 0; - for (; i < ctx->nbSamples; ++i) { - const size_t size = ZSTD_compress_usingCDict( - cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i], - ctx->samplesSizes[i], cdict); - if (ZSTD_isError(size)) { - totalCompressedSize = ERROR(GENERIC); - goto _compressCleanup; - } - totalCompressedSize += size; - } - _compressCleanup: - ZSTD_freeCCtx(cctx); - ZSTD_freeCDict(cdict); - if (dst) { - free(dst); - } - } - -_cleanup: - FASTCOVER_best_finish(data->best, totalCompressedSize, parameters, dict, - dictBufferCapacity); - free(data); - if (dict) { - free(dict); - } - if (freqs) { - free(freqs); - } -} - -ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover( - void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, - const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters) { - BYTE* const dict = (BYTE*)dictBuffer; - FASTCOVER_ctx_t ctx; - parameters.splitPoint = 1.0; - /* Initialize global data */ - g_displayLevel = parameters.zParams.notificationLevel; - /* Checks */ - if (!FASTCOVER_checkParameters(parameters, dictBufferCapacity)) { - DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); - return ERROR(GENERIC); - } - if (nbSamples == 0) { - DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n"); - return ERROR(GENERIC); - } - if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { - DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", - ZDICT_DICTSIZE_MIN); - return ERROR(dstSize_tooSmall); - } - /* Initialize context */ - if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, - parameters.d, parameters.splitPoint, parameters.f)) { - DISPLAYLEVEL(1, "Failed to initialize context\n"); - return ERROR(GENERIC); - } - /* Build the dictionary */ - DISPLAYLEVEL(2, "Building dictionary\n"); - { - const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer, - dictBufferCapacity, parameters); - - const size_t dictionarySize = ZDICT_finalizeDictionary( - dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, - samplesBuffer, samplesSizes, (unsigned)ctx.nbTrainSamples, - parameters.zParams); - if (!ZSTD_isError(dictionarySize)) { - DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", - (U32)dictionarySize); - } - FASTCOVER_ctx_destroy(&ctx); - return dictionarySize; - } -} - - - -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover( - void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, - const size_t *samplesSizes, unsigned nbSamples, - ZDICT_fastCover_params_t *parameters) { - /* constants */ - const unsigned nbThreads = parameters->nbThreads; - const double splitPoint = - parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint; - const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; - const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; - const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; - const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; - const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; - const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); - const unsigned kIterations = - (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); - const unsigned f = parameters->f == 0 ? 23 : parameters->f; - - /* Local variables */ - const int displayLevel = parameters->zParams.notificationLevel; - unsigned iteration = 1; - unsigned d; - unsigned k; - FASTCOVER_best_t best; - POOL_ctx *pool = NULL; - - /* Checks */ - if (splitPoint <= 0 || splitPoint > 1) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n"); - return ERROR(GENERIC); - } - if (kMinK < kMaxD || kMaxK < kMinK) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n"); - return ERROR(GENERIC); - } - if (nbSamples == 0) { - DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n"); - return ERROR(GENERIC); - } - if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { - DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", - ZDICT_DICTSIZE_MIN); - return ERROR(dstSize_tooSmall); - } - if (nbThreads > 1) { - pool = POOL_create(nbThreads, 1); - if (!pool) { - return ERROR(memory_allocation); - } - } - /* Initialization */ - FASTCOVER_best_init(&best); - /* Turn down global display level to clean up display at level 2 and below */ - g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; - /* Loop through d first because each new value needs a new context */ - LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", - kIterations); - for (d = kMinD; d <= kMaxD; d += 2) { - /* Initialize the context for this value of d */ - FASTCOVER_ctx_t ctx; - LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); - if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f)) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); - FASTCOVER_best_destroy(&best); - POOL_free(pool); - return ERROR(GENERIC); - } - /* Loop through k reusing the same context */ - for (k = kMinK; k <= kMaxK; k += kStepSize) { - /* Prepare the arguments */ - FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc( - sizeof(FASTCOVER_tryParameters_data_t)); - LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); - if (!data) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); - FASTCOVER_best_destroy(&best); - FASTCOVER_ctx_destroy(&ctx); - POOL_free(pool); - return ERROR(GENERIC); - } - data->ctx = &ctx; - data->best = &best; - data->dictBufferCapacity = dictBufferCapacity; - data->parameters = *parameters; - data->parameters.k = k; - data->parameters.d = d; - data->parameters.f = f; - data->parameters.splitPoint = splitPoint; - data->parameters.steps = kSteps; - data->parameters.zParams.notificationLevel = g_displayLevel; - /* Check the parameters */ - if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity)) { - DISPLAYLEVEL(1, "fastCover parameters incorrect\n"); - free(data); - continue; - } - /* Call the function and pass ownership of data to it */ - FASTCOVER_best_start(&best); - if (pool) { - POOL_add(pool, &FASTCOVER_tryParameters, data); - } else { - FASTCOVER_tryParameters(data); - } - /* Print status */ - LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", - (U32)((iteration * 100) / kIterations)); - ++iteration; - } - FASTCOVER_best_wait(&best); - FASTCOVER_ctx_destroy(&ctx); - } - LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); - /* Fill the output buffer and parameters with output of the best parameters */ - { - const size_t dictSize = best.dictSize; - if (ZSTD_isError(best.compressedSize)) { - const size_t compressedSize = best.compressedSize; - FASTCOVER_best_destroy(&best); - POOL_free(pool); - return compressedSize; - } - *parameters = best.parameters; - memcpy(dictBuffer, best.dict, dictSize); - FASTCOVER_best_destroy(&best); - POOL_free(pool); - return dictSize; - } - -} diff --git a/contrib/experimental_dict_builders/fastCover/fastCover.h b/contrib/experimental_dict_builders/fastCover/fastCover.h deleted file mode 100644 index 958e9f42393..00000000000 --- a/contrib/experimental_dict_builders/fastCover/fastCover.h +++ /dev/null @@ -1,57 +0,0 @@ -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* memset */ -#include /* clock */ -#include "mem.h" /* read */ -#include "pool.h" -#include "threading.h" -#include "zstd_internal.h" /* includes zstd.h */ -#ifndef ZDICT_STATIC_LINKING_ONLY -#define ZDICT_STATIC_LINKING_ONLY -#endif -#include "zdict.h" - - -typedef struct { - unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ - unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ - unsigned f; /* log of size of frequency array */ - unsigned steps; /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */ - unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ - double splitPoint; /* Percentage of samples used for training: the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ - ZDICT_params_t zParams; -} ZDICT_fastCover_params_t; - - -/*! ZDICT_optimizeTrainFromBuffer_fastCover(): - * Train a dictionary from an array of samples using a modified version of the COVER algorithm. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * The resulting dictionary will be saved into `dictBuffer`. - * All of the parameters except for f are optional. - * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}. - * if steps is zero it defaults to its default value. - * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048]. - * - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * On success `*parameters` contains the parameters selected. - */ - ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover( - void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, - const size_t *samplesSizes, unsigned nbSamples, - ZDICT_fastCover_params_t *parameters); - - -/*! ZDICT_trainFromBuffer_fastCover(): - * Train a dictionary from an array of samples using a modified version of the COVER algorithm. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * The resulting dictionary will be saved into `dictBuffer`. - * d, k, and f are required. - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover( - void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, - const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters); diff --git a/contrib/experimental_dict_builders/fastCover/main.c b/contrib/experimental_dict_builders/fastCover/main.c deleted file mode 100644 index df7d91812e2..00000000000 --- a/contrib/experimental_dict_builders/fastCover/main.c +++ /dev/null @@ -1,183 +0,0 @@ -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* strcmp, strlen */ -#include /* errno */ -#include -#include "fastCover.h" -#include "io.h" -#include "util.h" -#include "zdict.h" - - -/*-************************************* -* Console display -***************************************/ -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } - -static const U64 g_refreshRate = SEC_TO_MICRO / 6; -static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; - -#define DISPLAYUPDATE(l, ...) { if (displayLevel>=l) { \ - if ((UTIL_clockSpanMicro(g_displayClock) > g_refreshRate) || (displayLevel>=4)) \ - { g_displayClock = UTIL_getTime(); DISPLAY(__VA_ARGS__); \ - if (displayLevel>=4) fflush(stderr); } } } - - -/*-************************************* -* Exceptions -***************************************/ -#ifndef DEBUG -# define DEBUG 0 -#endif -#define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__); -#define EXM_THROW(error, ...) \ -{ \ - DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAY("Error %i : ", error); \ - DISPLAY(__VA_ARGS__); \ - DISPLAY("\n"); \ - exit(error); \ -} - - -/*-************************************* -* Constants -***************************************/ -static const unsigned g_defaultMaxDictSize = 110 KB; -#define DEFAULT_CLEVEL 3 - - -/*-************************************* -* FASTCOVER -***************************************/ -int FASTCOVER_trainFromFiles(const char* dictFileName, sampleInfo *info, - unsigned maxDictSize, - ZDICT_fastCover_params_t *params) { - unsigned const displayLevel = params->zParams.notificationLevel; - void* const dictBuffer = malloc(maxDictSize); - - int result = 0; - - /* Checks */ - if (!dictBuffer) - EXM_THROW(12, "not enough memory for trainFromFiles"); /* should not happen */ - - { size_t dictSize; - /* Run the optimize version if either k or d is not provided */ - if (!params->d || !params->k) { - dictSize = ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, maxDictSize, info->srcBuffer, - info->samplesSizes, info->nbSamples, params); - } else { - dictSize = ZDICT_trainFromBuffer_fastCover(dictBuffer, maxDictSize, info->srcBuffer, - info->samplesSizes, info->nbSamples, *params); - } - DISPLAYLEVEL(2, "k=%u\nd=%u\nf=%u\nsteps=%u\nsplit=%u\n", params->k, params->d, params->f, params->steps, (unsigned)(params->splitPoint*100)); - if (ZDICT_isError(dictSize)) { - DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */ - result = 1; - goto _done; - } - /* save dict */ - DISPLAYLEVEL(2, "Save dictionary of size %u into file %s \n", (U32)dictSize, dictFileName); - saveDict(dictFileName, dictBuffer, dictSize); - } - - /* clean up */ -_done: - free(dictBuffer); - return result; -} - - - -int main(int argCount, const char* argv[]) -{ - int displayLevel = 2; - const char* programName = argv[0]; - int operationResult = 0; - - /* Initialize arguments to default values */ - unsigned k = 0; - unsigned d = 0; - unsigned f = 23; - unsigned steps = 32; - unsigned nbThreads = 1; - unsigned split = 100; - const char* outputFile = "fastCoverDict"; - unsigned dictID = 0; - unsigned maxDictSize = g_defaultMaxDictSize; - - /* Initialize table to store input files */ - const char** filenameTable = (const char**)malloc(argCount * sizeof(const char*)); - unsigned filenameIdx = 0; - - char* fileNamesBuf = NULL; - unsigned fileNamesNb = filenameIdx; - int followLinks = 0; /* follow directory recursively */ - const char** extendedFileList = NULL; - - /* Parse arguments */ - for (int i = 1; i < argCount; i++) { - const char* argument = argv[i]; - if (longCommandWArg(&argument, "k=")) { k = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "d=")) { d = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "f=")) { f = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "steps=")) { steps = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "split=")) { split = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "dictID=")) { dictID = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "maxdict=")) { maxDictSize = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "in=")) { - filenameTable[filenameIdx] = argument; - filenameIdx++; - continue; - } - if (longCommandWArg(&argument, "out=")) { - outputFile = argument; - continue; - } - DISPLAYLEVEL(1, "Incorrect parameters\n"); - operationResult = 1; - return operationResult; - } - - /* Get the list of all files recursively (because followLinks==0)*/ - extendedFileList = UTIL_createFileList(filenameTable, filenameIdx, &fileNamesBuf, - &fileNamesNb, followLinks); - if (extendedFileList) { - unsigned u; - for (u=0; u /* fprintf */ -#include /* malloc, free, qsort */ -#include /* strcmp, strlen */ -#include /* errno */ -#include -#include "io.h" -#include "fileio.h" /* stdinmark, stdoutmark, ZSTD_EXTENSION */ -#include "platform.h" /* Large Files support */ -#include "util.h" -#include "zdict.h" - -/*-************************************* -* Console display -***************************************/ -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } - -static const U64 g_refreshRate = SEC_TO_MICRO / 6; -static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; - -#define DISPLAYUPDATE(l, ...) { if (displayLevel>=l) { \ - if ((UTIL_clockSpanMicro(g_displayClock) > g_refreshRate) || (displayLevel>=4)) \ - { g_displayClock = UTIL_getTime(); DISPLAY(__VA_ARGS__); \ - if (displayLevel>=4) fflush(stderr); } } } - -/*-************************************* -* Exceptions -***************************************/ -#ifndef DEBUG -# define DEBUG 0 -#endif -#define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__); -#define EXM_THROW(error, ...) \ -{ \ - DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAY("Error %i : ", error); \ - DISPLAY(__VA_ARGS__); \ - DISPLAY("\n"); \ - exit(error); \ -} - - -/*-************************************* -* Constants -***************************************/ - -#define SAMPLESIZE_MAX (128 KB) -#define RANDOM_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) -#define RANDOM_MEMMULT 9 -static const size_t g_maxMemory = (sizeof(size_t) == 4) ? - (2 GB - 64 MB) : ((size_t)(512 MB) << sizeof(size_t)); - -#define NOISELENGTH 32 - - -/*-************************************* -* Commandline related functions -***************************************/ -unsigned readU32FromChar(const char** stringPtr){ - const char errorMsg[] = "error: numeric value too large"; - unsigned result = 0; - while ((**stringPtr >='0') && (**stringPtr <='9')) { - unsigned const max = (((unsigned)(-1)) / 10) - 1; - if (result > max) exit(1); - result *= 10, result += **stringPtr - '0', (*stringPtr)++ ; - } - if ((**stringPtr=='K') || (**stringPtr=='M')) { - unsigned const maxK = ((unsigned)(-1)) >> 10; - if (result > maxK) exit(1); - result <<= 10; - if (**stringPtr=='M') { - if (result > maxK) exit(1); - result <<= 10; - } - (*stringPtr)++; /* skip `K` or `M` */ - if (**stringPtr=='i') (*stringPtr)++; - if (**stringPtr=='B') (*stringPtr)++; - } - return result; -} - -unsigned longCommandWArg(const char** stringPtr, const char* longCommand){ - size_t const comSize = strlen(longCommand); - int const result = !strncmp(*stringPtr, longCommand, comSize); - if (result) *stringPtr += comSize; - return result; -} - - -/* ******************************************************** -* File related operations -**********************************************************/ -/** loadFiles() : - * load samples from files listed in fileNamesTable into buffer. - * works even if buffer is too small to load all samples. - * Also provides the size of each sample into sampleSizes table - * which must be sized correctly, using DiB_fileStats(). - * @return : nb of samples effectively loaded into `buffer` - * *bufferSizePtr is modified, it provides the amount data loaded within buffer. - * sampleSizes is filled with the size of each sample. - */ -static unsigned loadFiles(void* buffer, size_t* bufferSizePtr, size_t* sampleSizes, - unsigned sstSize, const char** fileNamesTable, unsigned nbFiles, - size_t targetChunkSize, unsigned displayLevel) { - char* const buff = (char*)buffer; - size_t pos = 0; - unsigned nbLoadedChunks = 0, fileIndex; - - for (fileIndex=0; fileIndex *bufferSizePtr-pos) break; - { size_t const readSize = fread(buff+pos, 1, toLoad, f); - if (readSize != toLoad) EXM_THROW(11, "Pb reading %s", fileName); - pos += readSize; - sampleSizes[nbLoadedChunks++] = toLoad; - remainingToLoad -= targetChunkSize; - if (nbLoadedChunks == sstSize) { /* no more space left in sampleSizes table */ - fileIndex = nbFiles; /* stop there */ - break; - } - if (toLoad < targetChunkSize) { - fseek(f, (long)(targetChunkSize - toLoad), SEEK_CUR); - } } } - fclose(f); - } - DISPLAYLEVEL(2, "\r%79s\r", ""); - *bufferSizePtr = pos; - DISPLAYLEVEL(4, "loaded : %u KB \n", (U32)(pos >> 10)) - return nbLoadedChunks; -} - -#define rotl32(x,r) ((x << r) | (x >> (32 - r))) -static U32 getRand(U32* src) -{ - static const U32 prime1 = 2654435761U; - static const U32 prime2 = 2246822519U; - U32 rand32 = *src; - rand32 *= prime1; - rand32 ^= prime2; - rand32 = rotl32(rand32, 13); - *src = rand32; - return rand32 >> 5; -} - -/* shuffle() : - * shuffle a table of file names in a semi-random way - * It improves dictionary quality by reducing "locality" impact, so if sample set is very large, - * it will load random elements from it, instead of just the first ones. */ -static void shuffle(const char** fileNamesTable, unsigned nbFiles) { - U32 seed = 0xFD2FB528; - unsigned i; - for (i = nbFiles - 1; i > 0; --i) { - unsigned const j = getRand(&seed) % (i + 1); - const char* const tmp = fileNamesTable[j]; - fileNamesTable[j] = fileNamesTable[i]; - fileNamesTable[i] = tmp; - } -} - - -/*-******************************************************** -* Dictionary training functions -**********************************************************/ -size_t findMaxMem(unsigned long long requiredMem) { - size_t const step = 8 MB; - void* testmem = NULL; - - requiredMem = (((requiredMem >> 23) + 1) << 23); - requiredMem += step; - if (requiredMem > g_maxMemory) requiredMem = g_maxMemory; - - while (!testmem) { - testmem = malloc((size_t)requiredMem); - requiredMem -= step; - } - - free(testmem); - return (size_t)requiredMem; -} - -void saveDict(const char* dictFileName, - const void* buff, size_t buffSize) { - FILE* const f = fopen(dictFileName, "wb"); - if (f==NULL) EXM_THROW(3, "cannot open %s ", dictFileName); - - { size_t const n = fwrite(buff, 1, buffSize, f); - if (n!=buffSize) EXM_THROW(4, "%s : write error", dictFileName) } - - { size_t const n = (size_t)fclose(f); - if (n!=0) EXM_THROW(5, "%s : flush error", dictFileName) } -} - -/*! getFileStats() : - * Given a list of files, and a chunkSize (0 == no chunk, whole files) - * provides the amount of data to be loaded and the resulting nb of samples. - * This is useful primarily for allocation purpose => sample buffer, and sample sizes table. - */ -static fileStats getFileStats(const char** fileNamesTable, unsigned nbFiles, - size_t chunkSize, unsigned displayLevel) { - fileStats fs; - unsigned n; - memset(&fs, 0, sizeof(fs)); - for (n=0; n 2*SAMPLESIZE_MAX); - fs.nbSamples += nbSamples; - } - DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (U32)(fs.totalSizeToLoad >> 10)); - return fs; -} - - - - -sampleInfo* getSampleInfo(const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, - unsigned maxDictSize, const unsigned displayLevel) { - fileStats const fs = getFileStats(fileNamesTable, nbFiles, chunkSize, displayLevel); - size_t* const sampleSizes = (size_t*)malloc(fs.nbSamples * sizeof(size_t)); - size_t const memMult = RANDOM_MEMMULT; - size_t const maxMem = findMaxMem(fs.totalSizeToLoad * memMult) / memMult; - size_t loadedSize = (size_t) MIN ((unsigned long long)maxMem, fs.totalSizeToLoad); - void* const srcBuffer = malloc(loadedSize+NOISELENGTH); - - /* Checks */ - if ((!sampleSizes) || (!srcBuffer)) - EXM_THROW(12, "not enough memory for trainFromFiles"); /* should not happen */ - if (fs.oneSampleTooLarge) { - DISPLAYLEVEL(2, "! Warning : some sample(s) are very large \n"); - DISPLAYLEVEL(2, "! Note that dictionary is only useful for small samples. \n"); - DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each sample are loaded \n", SAMPLESIZE_MAX); - } - if (fs.nbSamples < 5) { - DISPLAYLEVEL(2, "! Warning : nb of samples too low for proper processing ! \n"); - DISPLAYLEVEL(2, "! Please provide _one file per sample_. \n"); - DISPLAYLEVEL(2, "! Alternatively, split files into fixed-size blocks representative of samples, with -B# \n"); - EXM_THROW(14, "nb of samples too low"); /* we now clearly forbid this case */ - } - if (fs.totalSizeToLoad < (unsigned long long)(8 * maxDictSize)) { - DISPLAYLEVEL(2, "! Warning : data size of samples too small for target dictionary size \n"); - DISPLAYLEVEL(2, "! Samples should be about 100x larger than target dictionary size \n"); - } - - /* init */ - if (loadedSize < fs.totalSizeToLoad) - DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(loadedSize >> 20)); - - /* Load input buffer */ - DISPLAYLEVEL(3, "Shuffling input files\n"); - shuffle(fileNamesTable, nbFiles); - nbFiles = loadFiles(srcBuffer, &loadedSize, sampleSizes, fs.nbSamples, - fileNamesTable, nbFiles, chunkSize, displayLevel); - - sampleInfo *info = (sampleInfo *)malloc(sizeof(sampleInfo)); - - info->nbSamples = fs.nbSamples; - info->samplesSizes = sampleSizes; - info->srcBuffer = srcBuffer; - - return info; -} - - -void freeSampleInfo(sampleInfo *info) { - if (!info) return; - if (info->samplesSizes) free((void*)(info->samplesSizes)); - if (info->srcBuffer) free((void*)(info->srcBuffer)); - free(info); -} diff --git a/contrib/experimental_dict_builders/randomDictBuilder/io.h b/contrib/experimental_dict_builders/randomDictBuilder/io.h deleted file mode 100644 index 0ee24604eed..00000000000 --- a/contrib/experimental_dict_builders/randomDictBuilder/io.h +++ /dev/null @@ -1,60 +0,0 @@ -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* strcmp, strlen */ -#include /* errno */ -#include -#include "zstd_internal.h" /* includes zstd.h */ -#include "fileio.h" /* stdinmark, stdoutmark, ZSTD_EXTENSION */ -#include "platform.h" /* Large Files support */ -#include "util.h" -#include "zdict.h" - - -/*-************************************* -* Structs -***************************************/ -typedef struct { - U64 totalSizeToLoad; - unsigned oneSampleTooLarge; - unsigned nbSamples; -} fileStats; - -typedef struct { - const void* srcBuffer; - const size_t *samplesSizes; - size_t nbSamples; -}sampleInfo; - - - -/*! getSampleInfo(): - * Load from input files and add samples to buffer - * @return: a sampleInfo struct containing infomation about buffer where samples are stored, - * size of each sample, and total number of samples - */ -sampleInfo* getSampleInfo(const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, - unsigned maxDictSize, const unsigned displayLevel); - - - -/*! freeSampleInfo(): - * Free memory allocated for info - */ -void freeSampleInfo(sampleInfo *info); - - - -/*! saveDict(): - * Save data stored on buff to dictFileName - */ -void saveDict(const char* dictFileName, const void* buff, size_t buffSize); - - -unsigned readU32FromChar(const char** stringPtr); - -/** longCommandWArg() : - * check if *stringPtr is the same as longCommand. - * If yes, @return 1 and advances *stringPtr to the position which immediately follows longCommand. - * @return 0 and doesn't modify *stringPtr otherwise. - */ -unsigned longCommandWArg(const char** stringPtr, const char* longCommand); diff --git a/contrib/experimental_dict_builders/randomDictBuilder/main.c b/contrib/experimental_dict_builders/randomDictBuilder/main.c deleted file mode 100644 index 3ad88574609..00000000000 --- a/contrib/experimental_dict_builders/randomDictBuilder/main.c +++ /dev/null @@ -1,161 +0,0 @@ -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* strcmp, strlen */ -#include /* errno */ -#include -#include "random.h" -#include "io.h" -#include "util.h" -#include "zdict.h" - - -/*-************************************* -* Console display -***************************************/ -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } - -static const U64 g_refreshRate = SEC_TO_MICRO / 6; -static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; - -#define DISPLAYUPDATE(l, ...) { if (displayLevel>=l) { \ - if ((UTIL_clockSpanMicro(g_displayClock) > g_refreshRate) || (displayLevel>=4)) \ - { g_displayClock = UTIL_getTime(); DISPLAY(__VA_ARGS__); \ - if (displayLevel>=4) fflush(stderr); } } } - - -/*-************************************* -* Exceptions -***************************************/ -#ifndef DEBUG -# define DEBUG 0 -#endif -#define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__); -#define EXM_THROW(error, ...) \ -{ \ - DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAY("Error %i : ", error); \ - DISPLAY(__VA_ARGS__); \ - DISPLAY("\n"); \ - exit(error); \ -} - - -/*-************************************* -* Constants -***************************************/ -static const unsigned g_defaultMaxDictSize = 110 KB; -#define DEFAULT_CLEVEL 3 -#define DEFAULT_k 200 -#define DEFAULT_OUTPUTFILE "defaultDict" -#define DEFAULT_DICTID 0 - - - -/*-************************************* -* RANDOM -***************************************/ -int RANDOM_trainFromFiles(const char* dictFileName, sampleInfo *info, - unsigned maxDictSize, - ZDICT_random_params_t *params) { - unsigned const displayLevel = params->zParams.notificationLevel; - void* const dictBuffer = malloc(maxDictSize); - - int result = 0; - - /* Checks */ - if (!dictBuffer) - EXM_THROW(12, "not enough memory for trainFromFiles"); /* should not happen */ - - { size_t dictSize; - dictSize = ZDICT_trainFromBuffer_random(dictBuffer, maxDictSize, info->srcBuffer, - info->samplesSizes, info->nbSamples, *params); - DISPLAYLEVEL(2, "k=%u\n", params->k); - if (ZDICT_isError(dictSize)) { - DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */ - result = 1; - goto _done; - } - /* save dict */ - DISPLAYLEVEL(2, "Save dictionary of size %u into file %s \n", (U32)dictSize, dictFileName); - saveDict(dictFileName, dictBuffer, dictSize); - } - - /* clean up */ -_done: - free(dictBuffer); - return result; -} - - - -int main(int argCount, const char* argv[]) -{ - int displayLevel = 2; - const char* programName = argv[0]; - int operationResult = 0; - - /* Initialize arguments to default values */ - unsigned k = DEFAULT_k; - const char* outputFile = DEFAULT_OUTPUTFILE; - unsigned dictID = DEFAULT_DICTID; - unsigned maxDictSize = g_defaultMaxDictSize; - - /* Initialize table to store input files */ - const char** filenameTable = (const char**)malloc(argCount * sizeof(const char*)); - unsigned filenameIdx = 0; - - /* Parse arguments */ - for (int i = 1; i < argCount; i++) { - const char* argument = argv[i]; - if (longCommandWArg(&argument, "k=")) { k = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "dictID=")) { dictID = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "maxdict=")) { maxDictSize = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "in=")) { - filenameTable[filenameIdx] = argument; - filenameIdx++; - continue; - } - if (longCommandWArg(&argument, "out=")) { - outputFile = argument; - continue; - } - DISPLAYLEVEL(1, "Incorrect parameters\n"); - operationResult = 1; - return operationResult; - } - - char* fileNamesBuf = NULL; - unsigned fileNamesNb = filenameIdx; - int followLinks = 0; /* follow directory recursively */ - const char** extendedFileList = NULL; - extendedFileList = UTIL_createFileList(filenameTable, filenameIdx, &fileNamesBuf, - &fileNamesNb, followLinks); - if (extendedFileList) { - unsigned u; - for (u=0; u /* fprintf */ -#include /* malloc, free, qsort */ -#include /* memset */ -#include /* clock */ -#include "random.h" -#include "util.h" /* UTIL_getFileSize, UTIL_getTotalFileSize */ -#ifndef ZDICT_STATIC_LINKING_ONLY -#define ZDICT_STATIC_LINKING_ONLY -#endif -#include "zdict.h" - -/*-************************************* -* Console display -***************************************/ -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } - -#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ - if (displayLevel >= l) { \ - if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ - g_time = clock(); \ - DISPLAY(__VA_ARGS__); \ - } \ - } -#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(displayLevel, l, __VA_ARGS__) -static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; -static clock_t g_time = 0; - - - -/* ******************************************************** -* Random Dictionary Builder -**********************************************************/ -/** - * Returns the sum of the sample sizes. - */ -static size_t RANDOM_sum(const size_t *samplesSizes, unsigned nbSamples) { - size_t sum = 0; - unsigned i; - for (i = 0; i < nbSamples; ++i) { - sum += samplesSizes[i]; - } - return sum; -} - - -/** - * A segment is an inclusive range in the source. - */ -typedef struct { - U32 begin; - U32 end; -} RANDOM_segment_t; - - -/** - * Selects a random segment from totalSamplesSize - k + 1 possible segments - */ -static RANDOM_segment_t RANDOM_selectSegment(const size_t totalSamplesSize, - ZDICT_random_params_t parameters) { - const U32 k = parameters.k; - RANDOM_segment_t segment; - unsigned index; - - /* Randomly generate a number from 0 to sampleSizes - k */ - index = rand()%(totalSamplesSize - k + 1); - - /* inclusive */ - segment.begin = index; - segment.end = index + k - 1; - - return segment; -} - - -/** - * Check the validity of the parameters. - * Returns non-zero if the parameters are valid and 0 otherwise. - */ -static int RANDOM_checkParameters(ZDICT_random_params_t parameters, - size_t maxDictSize) { - /* k is a required parameter */ - if (parameters.k == 0) { - return 0; - } - /* k <= maxDictSize */ - if (parameters.k > maxDictSize) { - return 0; - } - return 1; -} - - -/** - * Given the prepared context build the dictionary. - */ -static size_t RANDOM_buildDictionary(const size_t totalSamplesSize, const BYTE *samples, - void *dictBuffer, size_t dictBufferCapacity, - ZDICT_random_params_t parameters) { - BYTE *const dict = (BYTE *)dictBuffer; - size_t tail = dictBufferCapacity; - const int displayLevel = parameters.zParams.notificationLevel; - while (tail > 0) { - - /* Select a segment */ - RANDOM_segment_t segment = RANDOM_selectSegment(totalSamplesSize, parameters); - - size_t segmentSize; - segmentSize = MIN(segment.end - segment.begin + 1, tail); - - tail -= segmentSize; - memcpy(dict + tail, samples + segment.begin, segmentSize); - DISPLAYUPDATE( - 2, "\r%u%% ", - (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); - } - - return tail; -} - - - - -ZDICTLIB_API size_t ZDICT_trainFromBuffer_random( - void *dictBuffer, size_t dictBufferCapacity, - const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, - ZDICT_random_params_t parameters) { - const int displayLevel = parameters.zParams.notificationLevel; - BYTE* const dict = (BYTE*)dictBuffer; - /* Checks */ - if (!RANDOM_checkParameters(parameters, dictBufferCapacity)) { - DISPLAYLEVEL(1, "k is incorrect\n"); - return ERROR(GENERIC); - } - if (nbSamples == 0) { - DISPLAYLEVEL(1, "Random must have at least one input file\n"); - return ERROR(GENERIC); - } - if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { - DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", - ZDICT_DICTSIZE_MIN); - return ERROR(dstSize_tooSmall); - } - const size_t totalSamplesSize = RANDOM_sum(samplesSizes, nbSamples); - const BYTE *const samples = (const BYTE *)samplesBuffer; - - DISPLAYLEVEL(2, "Building dictionary\n"); - { - const size_t tail = RANDOM_buildDictionary(totalSamplesSize, samples, - dictBuffer, dictBufferCapacity, parameters); - const size_t dictSize = ZDICT_finalizeDictionary( - dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, - samplesBuffer, samplesSizes, nbSamples, parameters.zParams); - if (!ZSTD_isError(dictSize)) { - DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", - (U32)dictSize); - } - return dictSize; - } -} diff --git a/contrib/experimental_dict_builders/randomDictBuilder/random.h b/contrib/experimental_dict_builders/randomDictBuilder/random.h deleted file mode 100644 index 352775f950c..00000000000 --- a/contrib/experimental_dict_builders/randomDictBuilder/random.h +++ /dev/null @@ -1,29 +0,0 @@ -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* memset */ -#include /* clock */ -#include "zstd_internal.h" /* includes zstd.h */ -#ifndef ZDICT_STATIC_LINKING_ONLY -#define ZDICT_STATIC_LINKING_ONLY -#endif -#include "zdict.h" - - - -typedef struct { - unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+]; Default to 200 */ - ZDICT_params_t zParams; -} ZDICT_random_params_t; - - -/*! ZDICT_trainFromBuffer_random(): - * Train a dictionary from an array of samples. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * The resulting dictionary will be saved into `dictBuffer`. - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_random( void *dictBuffer, size_t dictBufferCapacity, - const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, - ZDICT_random_params_t parameters); diff --git a/contrib/experimental_dict_builders/randomDictBuilder/test.sh b/contrib/experimental_dict_builders/randomDictBuilder/test.sh deleted file mode 100644 index 1eb732e52a0..00000000000 --- a/contrib/experimental_dict_builders/randomDictBuilder/test.sh +++ /dev/null @@ -1,14 +0,0 @@ -echo "Building random dictionary with in=../../lib/common k=200 out=dict1" -./main in=../../../lib/common k=200 out=dict1 -zstd -be3 -D dict1 -r ../../../lib/common -q -echo "Building random dictionary with in=../../lib/common k=500 out=dict2 dictID=100 maxdict=140000" -./main in=../../../lib/common k=500 out=dict2 dictID=100 maxdict=140000 -zstd -be3 -D dict2 -r ../../../lib/common -q -echo "Building random dictionary with 2 sample sources" -./main in=../../../lib/common in=../../../lib/compress out=dict3 -zstd -be3 -D dict3 -r ../../../lib/common -q -echo "Removing dict1 dict2 dict3" -rm -f dict1 dict2 dict3 - -echo "Testing with invalid parameters, should fail" -! ./main r=10 diff --git a/contrib/externalSequenceProducer/.gitignore b/contrib/externalSequenceProducer/.gitignore new file mode 100644 index 00000000000..147710aee78 --- /dev/null +++ b/contrib/externalSequenceProducer/.gitignore @@ -0,0 +1,2 @@ +# build artifacts +externalSequenceProducer diff --git a/contrib/externalSequenceProducer/Makefile b/contrib/externalSequenceProducer/Makefile new file mode 100644 index 00000000000..0591ae01ba5 --- /dev/null +++ b/contrib/externalSequenceProducer/Makefile @@ -0,0 +1,40 @@ +# ################################################################ +# Copyright (c) Yann Collet, Meta Platforms, Inc. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ + +PROGDIR = ../../programs +LIBDIR = ../../lib + +LIBZSTD = $(LIBDIR)/libzstd.a + +CPPFLAGS+= -I$(LIBDIR) -I$(LIBDIR)/compress -I$(LIBDIR)/common + +CFLAGS ?= -O3 +CFLAGS += -std=gnu99 +DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum \ + -Wstrict-prototypes -Wundef -Wpointer-arith \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls +CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) + +default: externalSequenceProducer + +all: externalSequenceProducer + +externalSequenceProducer: sequence_producer.c main.c $(LIBZSTD) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ + +.PHONY: $(LIBZSTD) +$(LIBZSTD): + $(MAKE) -C $(LIBDIR) libzstd.a CFLAGS="$(CFLAGS)" + +clean: + $(RM) *.o + $(MAKE) -C $(LIBDIR) clean > /dev/null + $(RM) externalSequenceProducer diff --git a/contrib/externalSequenceProducer/README.md b/contrib/externalSequenceProducer/README.md new file mode 100644 index 00000000000..c16a170073b --- /dev/null +++ b/contrib/externalSequenceProducer/README.md @@ -0,0 +1,14 @@ +externalSequenceProducer +===================== + +`externalSequenceProducer` is a test tool for the Block-Level Sequence Producer API. +It demonstrates how to use the API to perform a simple round-trip test. + +A sample sequence producer is provided in sequence_producer.c, but the user can swap +this out with a different one if desired. The sample sequence producer implements +LZ parsing with a 1KB hashtable. Dictionary-based parsing is not currently supported. + +Command line : +``` +externalSequenceProducer filename +``` diff --git a/contrib/externalSequenceProducer/main.c b/contrib/externalSequenceProducer/main.c new file mode 100644 index 00000000000..c81e9bf4097 --- /dev/null +++ b/contrib/externalSequenceProducer/main.c @@ -0,0 +1,108 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include +#include +#include +#include + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#include "zstd_errors.h" +#include "sequence_producer.h" // simpleSequenceProducer + +#define CHECK(res) \ +do { \ + if (ZSTD_isError(res)) { \ + printf("ERROR: %s\n", ZSTD_getErrorName(res)); \ + return 1; \ + } \ +} while (0) \ + +int main(int argc, char *argv[]) { + int retn = 0; + if (argc != 2) { + printf("Usage: externalSequenceProducer \n"); + return 1; + } + + ZSTD_CCtx* const zc = ZSTD_createCCtx(); + + int simpleSequenceProducerState = 0xdeadbeef; + + // Here is the crucial bit of code! + ZSTD_registerSequenceProducer( + zc, + &simpleSequenceProducerState, + simpleSequenceProducer + ); + + { + size_t const res = ZSTD_CCtx_setParameter(zc, ZSTD_c_enableSeqProducerFallback, 1); + CHECK(res); + } + + FILE *f = fopen(argv[1], "rb"); + assert(f); + { + int const ret = fseek(f, 0, SEEK_END); + assert(ret == 0); + } + size_t const srcSize = ftell(f); + { + int const ret = fseek(f, 0, SEEK_SET); + assert(ret == 0); + } + + char* const src = malloc(srcSize + 1); + assert(src); + { + size_t const ret = fread(src, srcSize, 1, f); + assert(ret == 1); + int const ret2 = fclose(f); + assert(ret2 == 0); + } + + size_t const dstSize = ZSTD_compressBound(srcSize); + char* const dst = malloc(dstSize); + assert(dst); + + size_t const cSize = ZSTD_compress2(zc, dst, dstSize, src, srcSize); + CHECK(cSize); + + char* const val = malloc(srcSize); + assert(val); + + { + size_t const res = ZSTD_decompress(val, srcSize, dst, cSize); + CHECK(res); + } + + if (memcmp(src, val, srcSize) == 0) { + printf("Compression and decompression were successful!\n"); + printf("Original size: %lu\n", srcSize); + printf("Compressed size: %lu\n", cSize); + } else { + printf("ERROR: input and validation buffers don't match!\n"); + for (size_t i = 0; i < srcSize; i++) { + if (src[i] != val[i]) { + printf("First bad index: %zu\n", i); + break; + } + } + retn = 1; + } + + ZSTD_freeCCtx(zc); + free(src); + free(dst); + free(val); + return retn; +} diff --git a/contrib/externalSequenceProducer/sequence_producer.c b/contrib/externalSequenceProducer/sequence_producer.c new file mode 100644 index 00000000000..60a2f9572b0 --- /dev/null +++ b/contrib/externalSequenceProducer/sequence_producer.c @@ -0,0 +1,80 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" +#include "sequence_producer.h" + +#define HSIZE 1024 +static U32 const HLOG = 10; +static U32 const MLS = 4; +static U32 const BADIDX = 0xffffffff; + +size_t simpleSequenceProducer( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +) { + const BYTE* const istart = (const BYTE*)src; + const BYTE* const iend = istart + srcSize; + const BYTE* ip = istart; + const BYTE* anchor = istart; + size_t seqCount = 0; + U32 hashTable[HSIZE]; + + (void)sequenceProducerState; + (void)dict; + (void)dictSize; + (void)outSeqsCapacity; + (void)compressionLevel; + + { int i; + for (i=0; i < HSIZE; i++) { + hashTable[i] = BADIDX; + } } + + while (ip + MLS < iend) { + size_t const hash = ZSTD_hashPtr(ip, HLOG, MLS); + U32 const matchIndex = hashTable[hash]; + hashTable[hash] = (U32)(ip - istart); + + if (matchIndex != BADIDX) { + const BYTE* const match = istart + matchIndex; + U32 const matchLen = (U32)ZSTD_count(ip, match, iend); + if (matchLen >= ZSTD_MINMATCH_MIN) { + U32 const litLen = (U32)(ip - anchor); + U32 const offset = (U32)(ip - match); + ZSTD_Sequence const seq = { + offset, litLen, matchLen, 0 + }; + + /* Note: it's crucial to stay within the window size! */ + if (offset <= windowSize) { + outSeqs[seqCount++] = seq; + ip += matchLen; + anchor = ip; + continue; + } + } + } + + ip++; + } + + { ZSTD_Sequence const finalSeq = { + 0, (U32)(iend - anchor), 0, 0 + }; + outSeqs[seqCount++] = finalSeq; + } + + return seqCount; +} diff --git a/contrib/externalSequenceProducer/sequence_producer.h b/contrib/externalSequenceProducer/sequence_producer.h new file mode 100644 index 00000000000..19f9982ac36 --- /dev/null +++ b/contrib/externalSequenceProducer/sequence_producer.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef MATCHFINDER_H +#define MATCHFINDER_H + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" + +size_t simpleSequenceProducer( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +); + +#endif diff --git a/contrib/freestanding_lib/freestanding.py b/contrib/freestanding_lib/freestanding.py new file mode 100755 index 00000000000..df6983245d6 --- /dev/null +++ b/contrib/freestanding_lib/freestanding.py @@ -0,0 +1,774 @@ +#!/usr/bin/env python3 +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ########################################################################## + +import argparse +import contextlib +import os +import re +import shutil +import sys +from typing import Optional + + +INCLUDED_SUBDIRS = ["common", "compress", "decompress"] + +SKIPPED_FILES = [ + "common/mem.h", + "common/zstd_deps.h", + "common/pool.c", + "common/pool.h", + "common/threading.c", + "common/threading.h", + "common/zstd_trace.h", + "compress/zstdmt_compress.h", + "compress/zstdmt_compress.c", +] + +XXHASH_FILES = [ + "common/xxhash.c", + "common/xxhash.h", +] + + +class FileLines(object): + def __init__(self, filename): + self.filename = filename + with open(self.filename, "r") as f: + self.lines = f.readlines() + + def write(self): + with open(self.filename, "w") as f: + f.write("".join(self.lines)) + + +class PartialPreprocessor(object): + """ + Looks for simple ifdefs and ifndefs and replaces them. + Handles && and ||. + Has fancy logic to handle translating elifs to ifs. + Only looks for macros in the first part of the expression with no + parens. + Does not handle multi-line macros (only looks in first line). + """ + def __init__(self, defs: [(str, Optional[str])], replaces: [(str, str)], undefs: [str]): + MACRO_GROUP = r"(?P[a-zA-Z_][a-zA-Z_0-9]*)" + ELIF_GROUP = r"(?Pel)?" + OP_GROUP = r"(?P&&|\|\|)?" + + self._defs = {macro:value for macro, value in defs} + self._replaces = {macro:value for macro, value in replaces} + self._defs.update(self._replaces) + self._undefs = set(undefs) + + self._define = re.compile(r"\s*#\s*define") + self._if = re.compile(r"\s*#\s*if") + self._elif = re.compile(r"\s*#\s*(?Pel)if") + self._else = re.compile(r"\s*#\s*(?Pelse)") + self._endif = re.compile(r"\s*#\s*endif") + + self._ifdef = re.compile(fr"\s*#\s*if(?Pn)?def {MACRO_GROUP}\s*") + self._if_defined = re.compile( + fr"\s*#\s*{ELIF_GROUP}if\s+(?P!)?\s*defined\s*\(\s*{MACRO_GROUP}\s*\)\s*{OP_GROUP}" + ) + self._if_defined_value = re.compile( + fr"\s*#\s*{ELIF_GROUP}if\s+defined\s*\(\s*{MACRO_GROUP}\s*\)\s*" + fr"(?P&&)\s*" + fr"(?P\()?\s*" + fr"(?P[a-zA-Z_][a-zA-Z_0-9]*)\s*" + fr"(?P[=>[0-9]*)\s*" + fr"(?P\))?\s*" + ) + self._if_true = re.compile( + fr"\s*#\s*{ELIF_GROUP}if\s+{MACRO_GROUP}\s*{OP_GROUP}" + ) + + self._c_comment = re.compile(r"/\*.*?\*/") + self._cpp_comment = re.compile(r"//") + + def _log(self, *args, **kwargs): + print(*args, **kwargs) + + def _strip_comments(self, line): + # First strip c-style comments (may include //) + while True: + m = self._c_comment.search(line) + if m is None: + break + line = line[:m.start()] + line[m.end():] + + # Then strip cpp-style comments + m = self._cpp_comment.search(line) + if m is not None: + line = line[:m.start()] + + return line + + def _fixup_indentation(self, macro, replace: [str]): + if len(replace) == 0: + return replace + if len(replace) == 1 and self._define.match(replace[0]) is None: + # If there is only one line, only replace defines + return replace + + + all_pound = True + for line in replace: + if not line.startswith('#'): + all_pound = False + if all_pound: + replace = [line[1:] for line in replace] + + min_spaces = len(replace[0]) + for line in replace: + spaces = 0 + for i, c in enumerate(line): + if c != ' ': + # Non-preprocessor line ==> skip the fixup + if not all_pound and c != '#': + return replace + spaces = i + break + min_spaces = min(min_spaces, spaces) + + replace = [line[min_spaces:] for line in replace] + + if all_pound: + replace = ["#" + line for line in replace] + + return replace + + def _handle_if_block(self, macro, idx, is_true, prepend): + """ + Remove the #if or #elif block starting on this line. + """ + REMOVE_ONE = 0 + KEEP_ONE = 1 + REMOVE_REST = 2 + + if is_true: + state = KEEP_ONE + else: + state = REMOVE_ONE + + line = self._inlines[idx] + is_if = self._if.match(line) is not None + assert is_if or self._elif.match(line) is not None + depth = 0 + + start_idx = idx + + idx += 1 + replace = prepend + finished = False + while idx < len(self._inlines): + line = self._inlines[idx] + # Nested if statement + if self._if.match(line): + depth += 1 + idx += 1 + continue + # We're inside a nested statement + if depth > 0: + if self._endif.match(line): + depth -= 1 + idx += 1 + continue + + # We're at the original depth + + # Looking only for an endif. + # We've found a true statement, but haven't + # completely elided the if block, so we just + # remove the remainder. + if state == REMOVE_REST: + if self._endif.match(line): + if is_if: + # Remove the endif because we took the first if + idx += 1 + finished = True + break + idx += 1 + continue + + if state == KEEP_ONE: + m = self._elif.match(line) + if self._endif.match(line): + replace += self._inlines[start_idx + 1:idx] + idx += 1 + finished = True + break + if self._elif.match(line) or self._else.match(line): + replace += self._inlines[start_idx + 1:idx] + state = REMOVE_REST + idx += 1 + continue + + if state == REMOVE_ONE: + m = self._elif.match(line) + if m is not None: + if is_if: + idx += 1 + b = m.start('elif') + e = m.end('elif') + assert e - b == 2 + replace.append(line[:b] + line[e:]) + finished = True + break + m = self._else.match(line) + if m is not None: + if is_if: + idx += 1 + while self._endif.match(self._inlines[idx]) is None: + replace.append(self._inlines[idx]) + idx += 1 + idx += 1 + finished = True + break + if self._endif.match(line): + if is_if: + # Remove the endif because no other elifs + idx += 1 + finished = True + break + idx += 1 + continue + if not finished: + raise RuntimeError("Unterminated if block!") + + replace = self._fixup_indentation(macro, replace) + + self._log(f"\tHardwiring {macro}") + if start_idx > 0: + self._log(f"\t\t {self._inlines[start_idx - 1][:-1]}") + for x in range(start_idx, idx): + self._log(f"\t\t- {self._inlines[x][:-1]}") + for line in replace: + self._log(f"\t\t+ {line[:-1]}") + if idx < len(self._inlines): + self._log(f"\t\t {self._inlines[idx][:-1]}") + + return idx, replace + + def _preprocess_once(self): + outlines = [] + idx = 0 + changed = False + while idx < len(self._inlines): + line = self._inlines[idx] + sline = self._strip_comments(line) + m = self._ifdef.fullmatch(sline) + if_true = False + if m is None: + m = self._if_defined_value.fullmatch(sline) + if m is None: + m = self._if_defined.match(sline) + if m is None: + m = self._if_true.match(sline) + if_true = (m is not None) + if m is None: + outlines.append(line) + idx += 1 + continue + + groups = m.groupdict() + macro = groups['macro'] + op = groups.get('op') + + if not (macro in self._defs or macro in self._undefs): + outlines.append(line) + idx += 1 + continue + + defined = macro in self._defs + + # Needed variables set: + # resolved: Is the statement fully resolved? + # is_true: If resolved, is the statement true? + ifdef = False + if if_true: + if not defined: + outlines.append(line) + idx += 1 + continue + + defined_value = self._defs[macro] + is_int = True + try: + defined_value = int(defined_value) + except TypeError: + is_int = False + except ValueError: + is_int = False + + resolved = is_int + is_true = (defined_value != 0) + + if resolved and op is not None: + if op == '&&': + resolved = not is_true + else: + assert op == '||' + resolved = is_true + + else: + ifdef = groups.get('not') is None + elseif = groups.get('elif') is not None + + macro2 = groups.get('macro2') + cmp = groups.get('cmp') + value = groups.get('value') + openp = groups.get('openp') + closep = groups.get('closep') + + is_true = (ifdef == defined) + resolved = True + if op is not None: + if op == '&&': + resolved = not is_true + else: + assert op == '||' + resolved = is_true + + if macro2 is not None and not resolved: + assert ifdef and defined and op == '&&' and cmp is not None + # If the statement is true, but we have a single value check, then + # check the value. + defined_value = self._defs[macro] + are_ints = True + try: + defined_value = int(defined_value) + value = int(value) + except TypeError: + are_ints = False + except ValueError: + are_ints = False + if ( + macro == macro2 and + ((openp is None) == (closep is None)) and + are_ints + ): + resolved = True + if cmp == '<': + is_true = defined_value < value + elif cmp == '<=': + is_true = defined_value <= value + elif cmp == '==': + is_true = defined_value == value + elif cmp == '!=': + is_true = defined_value != value + elif cmp == '>=': + is_true = defined_value >= value + elif cmp == '>': + is_true = defined_value > value + else: + resolved = False + + if op is not None and not resolved: + # Remove the first op in the line + spaces + if op == '&&': + opre = op + else: + assert op == '||' + opre = r'\|\|' + needle = re.compile(fr"(?P\s*#\s*(el)?if\s+).*?(?P{opre}\s*)") + match = needle.match(line) + assert match is not None + newline = line[:match.end('if')] + line[match.end('op'):] + + self._log(f"\tHardwiring partially resolved {macro}") + self._log(f"\t\t- {line[:-1]}") + self._log(f"\t\t+ {newline[:-1]}") + + outlines.append(newline) + idx += 1 + continue + + # Skip any statements we cannot fully compute + if not resolved: + outlines.append(line) + idx += 1 + continue + + prepend = [] + if macro in self._replaces: + assert not ifdef + assert op is None + value = self._replaces.pop(macro) + prepend = [f"#define {macro} {value}\n"] + + idx, replace = self._handle_if_block(macro, idx, is_true, prepend) + outlines += replace + changed = True + + return changed, outlines + + def preprocess(self, filename): + with open(filename, 'r') as f: + self._inlines = f.readlines() + changed = True + iters = 0 + while changed: + iters += 1 + changed, outlines = self._preprocess_once() + self._inlines = outlines + + with open(filename, 'w') as f: + f.write(''.join(self._inlines)) + + +class Freestanding(object): + def __init__( + self, zstd_deps: str, mem: str, source_lib: str, output_lib: str, + external_xxhash: bool, xxh64_state: Optional[str], + xxh64_prefix: Optional[str], rewritten_includes: [(str, str)], + defs: [(str, Optional[str])], replaces: [(str, str)], + undefs: [str], excludes: [str], seds: [str], spdx: bool, + ): + self._zstd_deps = zstd_deps + self._mem = mem + self._src_lib = source_lib + self._dst_lib = output_lib + self._external_xxhash = external_xxhash + self._xxh64_state = xxh64_state + self._xxh64_prefix = xxh64_prefix + self._rewritten_includes = rewritten_includes + self._defs = defs + self._replaces = replaces + self._undefs = undefs + self._excludes = excludes + self._seds = seds + self._spdx = spdx + + def _dst_lib_file_paths(self): + """ + Yields all the file paths in the dst_lib. + """ + for root, dirname, filenames in os.walk(self._dst_lib): + for filename in filenames: + filepath = os.path.join(root, filename) + yield filepath + + def _log(self, *args, **kwargs): + print(*args, **kwargs) + + def _copy_file(self, lib_path): + suffixes = [".c", ".h", ".S"] + if not any((lib_path.endswith(suffix) for suffix in suffixes)): + return + if lib_path in SKIPPED_FILES: + self._log(f"\tSkipping file: {lib_path}") + return + if self._external_xxhash and lib_path in XXHASH_FILES: + self._log(f"\tSkipping xxhash file: {lib_path}") + return + + src_path = os.path.join(self._src_lib, lib_path) + dst_path = os.path.join(self._dst_lib, lib_path) + self._log(f"\tCopying: {src_path} -> {dst_path}") + shutil.copyfile(src_path, dst_path) + + def _copy_source_lib(self): + self._log("Copying source library into output library") + + assert os.path.exists(self._src_lib) + os.makedirs(self._dst_lib, exist_ok=True) + self._copy_file("zstd.h") + self._copy_file("zstd_errors.h") + for subdir in INCLUDED_SUBDIRS: + src_dir = os.path.join(self._src_lib, subdir) + dst_dir = os.path.join(self._dst_lib, subdir) + + assert os.path.exists(src_dir) + os.makedirs(dst_dir, exist_ok=True) + + for filename in os.listdir(src_dir): + lib_path = os.path.join(subdir, filename) + self._copy_file(lib_path) + + def _copy_zstd_deps(self): + dst_zstd_deps = os.path.join(self._dst_lib, "common", "zstd_deps.h") + self._log(f"Copying zstd_deps: {self._zstd_deps} -> {dst_zstd_deps}") + shutil.copyfile(self._zstd_deps, dst_zstd_deps) + + def _copy_mem(self): + dst_mem = os.path.join(self._dst_lib, "common", "mem.h") + self._log(f"Copying mem: {self._mem} -> {dst_mem}") + shutil.copyfile(self._mem, dst_mem) + + def _hardwire_preprocessor(self, name: str, value: Optional[str] = None, undef=False): + """ + If value=None then hardwire that it is defined, but not what the value is. + If undef=True then value must be None. + If value='' then the macro is defined to '' exactly. + """ + assert not (undef and value is not None) + for filepath in self._dst_lib_file_paths(): + file = FileLines(filepath) + + def _hardwire_defines(self): + self._log("Hardwiring macros") + partial_preprocessor = PartialPreprocessor(self._defs, self._replaces, self._undefs) + for filepath in self._dst_lib_file_paths(): + partial_preprocessor.preprocess(filepath) + + def _remove_excludes(self): + self._log("Removing excluded sections") + for exclude in self._excludes: + self._log(f"\tRemoving excluded sections for: {exclude}") + begin_re = re.compile(f"BEGIN {exclude}") + end_re = re.compile(f"END {exclude}") + for filepath in self._dst_lib_file_paths(): + file = FileLines(filepath) + outlines = [] + skipped = [] + emit = True + for line in file.lines: + if emit and begin_re.search(line) is not None: + assert end_re.search(line) is None + emit = False + if emit: + outlines.append(line) + else: + skipped.append(line) + if end_re.search(line) is not None: + assert begin_re.search(line) is None + self._log(f"\t\tRemoving excluded section: {exclude}") + for s in skipped: + self._log(f"\t\t\t- {s}") + emit = True + skipped = [] + if not emit: + raise RuntimeError("Excluded section unfinished!") + file.lines = outlines + file.write() + + def _rewrite_include(self, original, rewritten): + self._log(f"\tRewriting include: {original} -> {rewritten}") + regex = re.compile(f"\\s*#\\s*include\\s*(?P{original})") + for filepath in self._dst_lib_file_paths(): + file = FileLines(filepath) + for i, line in enumerate(file.lines): + match = regex.match(line) + if match is None: + continue + s = match.start('include') + e = match.end('include') + file.lines[i] = line[:s] + rewritten + line[e:] + file.write() + + def _rewrite_includes(self): + self._log("Rewriting includes") + for original, rewritten in self._rewritten_includes: + self._rewrite_include(original, rewritten) + + def _replace_xxh64_prefix(self): + if self._xxh64_prefix is None: + return + self._log(f"Replacing XXH64 prefix with {self._xxh64_prefix}") + replacements = [] + if self._xxh64_state is not None: + replacements.append( + (re.compile(r"([^\w]|^)(?PXXH64_state_t)([^\w]|$)"), self._xxh64_state) + ) + if self._xxh64_prefix is not None: + replacements.append( + (re.compile(r"([^\w]|^)(?PXXH64)[\(_]"), self._xxh64_prefix) + ) + for filepath in self._dst_lib_file_paths(): + file = FileLines(filepath) + for i, line in enumerate(file.lines): + modified = False + for regex, replacement in replacements: + match = regex.search(line) + while match is not None: + modified = True + b = match.start('orig') + e = match.end('orig') + line = line[:b] + replacement + line[e:] + match = regex.search(line) + if modified: + self._log(f"\t- {file.lines[i][:-1]}") + self._log(f"\t+ {line[:-1]}") + file.lines[i] = line + file.write() + + def _parse_sed(self, sed): + assert sed[0] == 's' + delim = sed[1] + match = re.fullmatch(f's{delim}(.+){delim}(.*){delim}(.*)', sed) + assert match is not None + regex = re.compile(match.group(1)) + format_str = match.group(2) + is_global = match.group(3) == 'g' + return regex, format_str, is_global + + def _process_sed(self, sed): + self._log(f"Processing sed: {sed}") + regex, format_str, is_global = self._parse_sed(sed) + + for filepath in self._dst_lib_file_paths(): + file = FileLines(filepath) + for i, line in enumerate(file.lines): + modified = False + while True: + match = regex.search(line) + if match is None: + break + replacement = format_str.format(match.groups(''), match.groupdict('')) + b = match.start() + e = match.end() + line = line[:b] + replacement + line[e:] + modified = True + if not is_global: + break + if modified: + self._log(f"\t- {file.lines[i][:-1]}") + self._log(f"\t+ {line[:-1]}") + file.lines[i] = line + file.write() + + def _process_seds(self): + self._log("Processing seds") + for sed in self._seds: + self._process_sed(sed) + + def _process_spdx(self): + if not self._spdx: + return + self._log("Processing spdx") + SPDX_C = "// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause\n" + SPDX_H_S = "/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */\n" + for filepath in self._dst_lib_file_paths(): + file = FileLines(filepath) + if file.lines[0] == SPDX_C or file.lines[0] == SPDX_H_S: + continue + for line in file.lines: + if "SPDX-License-Identifier" in line: + raise RuntimeError(f"Unexpected SPDX license identifier: {file.filename} {repr(line)}") + if file.filename.endswith(".c"): + file.lines.insert(0, SPDX_C) + elif file.filename.endswith(".h") or file.filename.endswith(".S"): + file.lines.insert(0, SPDX_H_S) + else: + raise RuntimeError(f"Unexpected file extension: {file.filename}") + file.write() + + + + def go(self): + self._copy_source_lib() + self._copy_zstd_deps() + self._copy_mem() + self._hardwire_defines() + self._remove_excludes() + self._rewrite_includes() + self._replace_xxh64_prefix() + self._process_seds() + self._process_spdx() + + +def parse_optional_pair(defines: [str]) -> [(str, Optional[str])]: + output = [] + for define in defines: + parsed = define.split('=') + if len(parsed) == 1: + output.append((parsed[0], None)) + elif len(parsed) == 2: + output.append((parsed[0], parsed[1])) + else: + raise RuntimeError(f"Bad define: {define}") + return output + + +def parse_pair(rewritten_includes: [str]) -> [(str, str)]: + output = [] + for rewritten_include in rewritten_includes: + parsed = rewritten_include.split('=') + if len(parsed) == 2: + output.append((parsed[0], parsed[1])) + else: + raise RuntimeError(f"Bad rewritten include: {rewritten_include}") + return output + + + +def main(name, args): + parser = argparse.ArgumentParser(prog=name) + parser.add_argument("--zstd-deps", default="zstd_deps.h", help="Zstd dependencies file") + parser.add_argument("--mem", default="mem.h", help="Memory module") + parser.add_argument("--source-lib", default="../../lib", help="Location of the zstd library") + parser.add_argument("--output-lib", default="./freestanding_lib", help="Where to output the freestanding zstd library") + parser.add_argument("--xxhash", default=None, help="Alternate external xxhash include e.g. --xxhash=''. If set xxhash is not included.") + parser.add_argument("--xxh64-state", default=None, help="Alternate XXH64 state type (excluding _) e.g. --xxh64-state='struct xxh64_state'") + parser.add_argument("--xxh64-prefix", default=None, help="Alternate XXH64 function prefix (excluding _) e.g. --xxh64-prefix=xxh64") + parser.add_argument("--rewrite-include", default=[], dest="rewritten_includes", action="append", help="Rewrite an include REGEX=NEW (e.g. '=')") + parser.add_argument("--sed", default=[], dest="seds", action="append", help="Apply a sed replacement. Format: `s/REGEX/FORMAT/[g]`. REGEX is a Python regex. FORMAT is a Python format string formatted by the regex dict.") + parser.add_argument("--spdx", action="store_true", help="Add SPDX License Identifiers") + parser.add_argument("-D", "--define", default=[], dest="defs", action="append", help="Pre-define this macro (can be passed multiple times)") + parser.add_argument("-U", "--undefine", default=[], dest="undefs", action="append", help="Pre-undefine this macro (can be passed multiple times)") + parser.add_argument("-R", "--replace", default=[], dest="replaces", action="append", help="Pre-define this macro and replace the first ifndef block with its definition") + parser.add_argument("-E", "--exclude", default=[], dest="excludes", action="append", help="Exclude all lines between 'BEGIN ' and 'END '") + args = parser.parse_args(args) + + # Always remove threading + if "ZSTD_MULTITHREAD" not in args.undefs: + args.undefs.append("ZSTD_MULTITHREAD") + + args.defs = parse_optional_pair(args.defs) + for name, _ in args.defs: + if name in args.undefs: + raise RuntimeError(f"{name} is both defined and undefined!") + + # Always set tracing to 0 + if "ZSTD_NO_TRACE" not in (arg[0] for arg in args.defs): + args.defs.append(("ZSTD_NO_TRACE", None)) + args.defs.append(("ZSTD_TRACE", "0")) + + args.replaces = parse_pair(args.replaces) + for name, _ in args.replaces: + if name in args.undefs or name in args.defs: + raise RuntimeError(f"{name} is both replaced and (un)defined!") + + args.rewritten_includes = parse_pair(args.rewritten_includes) + + external_xxhash = False + if args.xxhash is not None: + external_xxhash = True + args.rewritten_includes.append(('"(\\.\\./common/)?xxhash.h"', args.xxhash)) + + if args.xxh64_prefix is not None: + if not external_xxhash: + raise RuntimeError("--xxh64-prefix may only be used with --xxhash provided") + + if args.xxh64_state is not None: + if not external_xxhash: + raise RuntimeError("--xxh64-state may only be used with --xxhash provided") + + Freestanding( + args.zstd_deps, + args.mem, + args.source_lib, + args.output_lib, + external_xxhash, + args.xxh64_state, + args.xxh64_prefix, + args.rewritten_includes, + args.defs, + args.replaces, + args.undefs, + args.excludes, + args.seds, + args.spdx, + ).go() + +if __name__ == "__main__": + main(sys.argv[0], sys.argv[1:]) diff --git a/contrib/gen_html/Makefile b/contrib/gen_html/Makefile index 425f266c4e4..f93669e023f 100644 --- a/contrib/gen_html/Makefile +++ b/contrib/gen_html/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -40,7 +40,7 @@ gen_html: gen_html.cpp $(ZSTDMANUAL): gen_html $(ZSTDAPI) echo "Update zstd manual in /doc" - ./gen_html $(LIBVER) $(ZSTDAPI) $(ZSTDMANUAL) + ./gen_html$(EXT) $(LIBVER) $(ZSTDAPI) $(ZSTDMANUAL) .PHONY: manual manual: gen_html $(ZSTDMANUAL) diff --git a/contrib/gen_html/gen_html.cpp b/contrib/gen_html/gen_html.cpp index 90d5b21a3aa..adf0f41b905 100644 --- a/contrib/gen_html/gen_html.cpp +++ b/contrib/gen_html/gen_html.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Przemyslaw Skibinski, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -211,6 +211,7 @@ int main(int argc, char *argv[]) { ostream << "\n\n\n" << version << "\n\n" << endl; ostream << "

" << version << "

\n"; + ostream << "Note: the content of this file has been automatically generated by parsing \"zstd.h\" \n"; ostream << "
\n

Contents

\n
    \n"; for (size_t i=0; i /* size_t */ -#include /* malloc, free, abort */ +#include /* malloc, free, abort, qsort*/ #include /* fprintf */ #include /* UINT_MAX */ #include /* assert */ @@ -39,6 +39,7 @@ #define BLOCKSIZE_DEFAULT 0 /* no slicing into blocks */ #define DICTSIZE (4 KB) #define CLEVEL_DEFAULT 3 +#define DICT_LOAD_METHOD ZSTD_dlm_byCopy #define BENCH_TIME_DEFAULT_S 6 #define RUN_TIME_DEFAULT_MS 1000 @@ -156,7 +157,6 @@ createDictionaryBuffer(const char* dictionaryName, } } - /*! BMK_loadFiles() : * Loads `buffer`, with content from files listed within `fileNamesTable`. * Fills `buffer` entirely. @@ -334,6 +334,42 @@ createBufferCollection_fromSliceCollectionSizes(slice_collection_t sc) return result; } +static buffer_collection_t +createBufferCollection_fromSliceCollection(slice_collection_t sc) +{ + size_t const bufferSize = sliceCollection_totalCapacity(sc); + + buffer_t buffer = createBuffer(bufferSize); + CONTROL(buffer.ptr != NULL); + + size_t const nbSlices = sc.nbSlices; + void** const slices = (void**)malloc(nbSlices * sizeof(*slices)); + CONTROL(slices != NULL); + + size_t* const capacities = (size_t*)malloc(nbSlices * sizeof(*capacities)); + CONTROL(capacities != NULL); + + char* const ptr = (char*)buffer.ptr; + size_t pos = 0; + for (size_t n=0; n < nbSlices; n++) { + capacities[n] = sc.capacities[n]; + slices[n] = ptr + pos; + pos += capacities[n]; + } + + for (size_t i = 0; i < nbSlices; i++) { + memcpy(slices[i], sc.slicePtrs[i], sc.capacities[i]); + capacities[i] = sc.capacities[i]; + } + + buffer_collection_t result; + result.buffer = buffer; + result.slices.nbSlices = nbSlices; + result.slices.capacities = capacities; + result.slices.slicePtrs = slices; + + return result; +} /* @return : kBuffNull if any error */ static buffer_collection_t @@ -397,6 +433,36 @@ typedef struct { size_t nbDDict; } ddict_collection_t; +typedef struct { + ZSTD_CDict** cdicts; + size_t nbCDict; +} cdict_collection_t; + +static const cdict_collection_t kNullCDictCollection = { NULL, 0 }; + +static void freeCDictCollection(cdict_collection_t cdictc) +{ + for (size_t dictNb=0; dictNb < cdictc.nbCDict; dictNb++) { + ZSTD_freeCDict(cdictc.cdicts[dictNb]); + } + free(cdictc.cdicts); +} + +/* returns .buffers=NULL if operation fails */ +static cdict_collection_t createCDictCollection(const void* dictBuffer, size_t dictSize, size_t nbCDict, ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params* cctxParams) +{ + ZSTD_CDict** const cdicts = malloc(nbCDict * sizeof(ZSTD_CDict*)); + if (cdicts==NULL) return kNullCDictCollection; + for (size_t dictNb=0; dictNb < nbCDict; dictNb++) { + cdicts[dictNb] = ZSTD_createCDict_advanced2(dictBuffer, dictSize, DICT_LOAD_METHOD, dictContentType, cctxParams, ZSTD_defaultCMem); + CONTROL(cdicts[dictNb] != NULL); + } + cdict_collection_t cdictc; + cdictc.cdicts = cdicts; + cdictc.nbCDict = nbCDict; + return cdictc; +} + static const ddict_collection_t kNullDDictCollection = { NULL, 0 }; static void freeDDictCollection(ddict_collection_t ddictc) @@ -425,18 +491,37 @@ static ddict_collection_t createDDictCollection(const void* dictBuffer, size_t d /* mess with addresses, so that linear scanning dictionaries != linear address scanning */ -void shuffleDictionaries(ddict_collection_t dicts) +void shuffleCDictionaries(cdict_collection_t dicts) +{ + size_t const nbDicts = dicts.nbCDict; + for (size_t r=0; rcctx, ci->dictionaries.cdicts[ci->dictNb]); + ZSTD_compress2(ci->cctx, + dst, srcSize, + src, srcSize); + + ci->dictNb = ci->dictNb + 1; + if (ci->dictNb >= ci->nbDicts) ci->dictNb = 0; + + return srcSize; +} + /* benched function */ size_t decompress(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* payload) { @@ -524,25 +651,59 @@ size_t decompress(const void* src, size_t srcSize, void* dst, size_t dstCapacity return result; } +typedef enum { + fastest = 0, + median = 1, +} metricAggregatePref_e; -static int benchMem(slice_collection_t dstBlocks, - slice_collection_t srcBlocks, - ddict_collection_t dictionaries, - int nbRounds) +/* compareFunction() : + * Sort input in decreasing order when used with qsort() */ +int compareFunction(const void *a, const void *b) +{ + double x = *(const double *)a; + double y = *(const double *)b; + if (x < y) + return 1; + else if (x > y) + return -1; + return 0; +} + +double aggregateData(double *data, size_t size, + metricAggregatePref_e metricAggregatePref) +{ + qsort(data, size, sizeof(*data), compareFunction); + if (metricAggregatePref == fastest) + return data[0]; + else /* median */ + return (data[(size - 1) / 2] + data[size / 2]) / 2; +} + +static int benchMem(slice_collection_t dstBlocks, slice_collection_t srcBlocks, + ddict_collection_t ddictionaries, + cdict_collection_t cdictionaries, unsigned nbRounds, + int benchCompression, const char *exeName, + ZSTD_CCtx_params *cctxParams, + metricAggregatePref_e metricAggregatePref) { assert(dstBlocks.nbSlices == srcBlocks.nbSlices); + if (benchCompression) assert(cctxParams); unsigned const ms_per_round = RUN_TIME_DEFAULT_MS; unsigned const total_time_ms = nbRounds * ms_per_round; - double bestSpeed = 0.; + double *const speedPerRound = (double *)malloc(nbRounds * sizeof(double)); BMK_timedFnState_t* const benchState = BMK_createTimedFnState(total_time_ms, ms_per_round); - decompressInstructions di = createDecompressInstructions(dictionaries); + + decompressInstructions di = createDecompressInstructions(ddictionaries); + compressInstructions ci = + createCompressInstructions(cdictionaries, cctxParams); + void* payload = benchCompression ? (void*)&ci : (void*)&di; BMK_benchParams_t const bp = { - .benchFn = decompress, - .benchPayload = &di, + .benchFn = benchCompression ? compress : decompress, + .benchPayload = payload, .initFn = NULL, .initPayload = NULL, .errorFn = ZSTD_isError, @@ -554,23 +715,72 @@ static int benchMem(slice_collection_t dstBlocks, .blockResults = NULL }; + size_t roundNb = 0; for (;;) { BMK_runOutcome_t const outcome = BMK_benchTimedFn(benchState, bp); CONTROL(BMK_isSuccessful_runOutcome(outcome)); BMK_runTime_t const result = BMK_extract_runTime(outcome); - U64 const dTime_ns = result.nanoSecPerRun; + double const dTime_ns = result.nanoSecPerRun; double const dTime_sec = (double)dTime_ns / 1000000000; size_t const srcSize = result.sumOfReturn; - double const dSpeed_MBps = (double)srcSize / dTime_sec / (1 MB); - if (dSpeed_MBps > bestSpeed) bestSpeed = dSpeed_MBps; - DISPLAY("Decompression Speed : %.1f MB/s \r", bestSpeed); + double const speed_MBps = (double)srcSize / dTime_sec / (1 MB); + speedPerRound[roundNb] = speed_MBps; + if (benchCompression) + DISPLAY("Compression Speed : %.1f MB/s \r", speed_MBps); + else + DISPLAY("Decompression Speed : %.1f MB/s \r", speed_MBps); + fflush(stdout); if (BMK_isCompleted_TimedFn(benchState)) break; + roundNb++; } DISPLAY("\n"); + /* BMK_benchTimedFn may not run exactly nbRounds iterations */ + double speedAggregated = + aggregateData(speedPerRound, roundNb + 1, metricAggregatePref); + free(speedPerRound); + + if (metricAggregatePref == fastest) + DISPLAY("Fastest Speed : %.1f MB/s \n", speedAggregated); + else + DISPLAY("Median Speed : %.1f MB/s \n", speedAggregated); + + char* csvFileName = malloc(strlen(exeName) + 5); + strcpy(csvFileName, exeName); + strcat(csvFileName, ".csv"); + FILE* csvFile = fopen(csvFileName, "r"); + if (!csvFile) { + csvFile = fopen(csvFileName, "wt"); + assert(csvFile); + fprintf(csvFile, "%s\n", exeName); + /* Print table headers */ + fprintf( + csvFile, + "Compression/Decompression,Level,nbDicts,dictAttachPref,metricAggregatePref,Speed\n"); + } else { + fclose(csvFile); + csvFile = fopen(csvFileName, "at"); + assert(csvFile); + } + + int cLevel = -1; + int dictAttachPref = -1; + if (benchCompression) { + ZSTD_CCtxParams_getParameter(cctxParams, ZSTD_c_compressionLevel, + &cLevel); + ZSTD_CCtxParams_getParameter(cctxParams, ZSTD_c_forceAttachDict, + &dictAttachPref); + } + fprintf(csvFile, "%s,%d,%ld,%d,%d,%.1f\n", + benchCompression ? "Compression" : "Decompression", cLevel, + benchCompression ? ci.nbDicts : di.nbDicts, dictAttachPref, + metricAggregatePref, speedAggregated); + fclose(csvFile); + free(csvFileName); freeDecompressInstructions(di); + freeCompressInstructions(ci); BMK_freeTimedFnState(benchState); return 0; /* success */ @@ -582,11 +792,11 @@ static int benchMem(slice_collection_t dstBlocks, * dictionary : optional (can be NULL), file to load as dictionary, * if none provided : will be calculated on the fly by the program. * @return : 0 is success, 1+ otherwise */ -int bench(const char** fileNameTable, unsigned nbFiles, - const char* dictionary, - size_t blockSize, int clevel, - unsigned nbDictMax, unsigned nbBlocks, - int nbRounds) +int bench(const char **fileNameTable, unsigned nbFiles, const char *dictionary, + size_t blockSize, int clevel, unsigned nbDictMax, unsigned nbBlocks, + unsigned nbRounds, int benchCompression, + ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params *cctxParams, + const char *exeName, metricAggregatePref_e metricAggregatePref) { int result = 0; @@ -637,18 +847,18 @@ int bench(const char** fileNameTable, unsigned nbFiles, /* dictionary determination */ buffer_t const dictBuffer = createDictionaryBuffer(dictionary, srcs.buffer.ptr, - srcs.slices.capacities, srcs.slices.nbSlices, + srcSlices.capacities, srcSlices.nbSlices, DICTSIZE); CONTROL(dictBuffer.ptr != NULL); - ZSTD_CDict* const cdict = ZSTD_createCDict(dictBuffer.ptr, dictBuffer.size, clevel); + ZSTD_CDict* const cdict = ZSTD_createCDict_advanced2(dictBuffer.ptr, dictBuffer.size, DICT_LOAD_METHOD, dictContentType, cctxParams, ZSTD_defaultCMem); CONTROL(cdict != NULL); size_t const cTotalSizeNoDict = compressBlocks(NULL, dstSlices, srcSlices, NULL, clevel); CONTROL(cTotalSizeNoDict != 0); DISPLAYLEVEL(3, "compressing at level %u without dictionary : Ratio=%.2f (%u bytes) \n", clevel, - (double)totalSrcSlicesSize / cTotalSizeNoDict, (unsigned)cTotalSizeNoDict); + (double)totalSrcSlicesSize / (double)cTotalSizeNoDict, (unsigned)cTotalSizeNoDict); size_t* const cSizes = malloc(nbBlocks * sizeof(size_t)); CONTROL(cSizes != NULL); @@ -657,30 +867,56 @@ int bench(const char** fileNameTable, unsigned nbFiles, CONTROL(cTotalSize != 0); DISPLAYLEVEL(3, "compressed using a %u bytes dictionary : Ratio=%.2f (%u bytes) \n", (unsigned)dictBuffer.size, - (double)totalSrcSlicesSize / cTotalSize, (unsigned)cTotalSize); + (double)totalSrcSlicesSize / (double)cTotalSize, (unsigned)cTotalSize); /* now dstSlices contain the real compressed size of each block, instead of the maximum capacity */ shrinkSizes(dstSlices, cSizes); - size_t const dictMem = ZSTD_estimateDDictSize(dictBuffer.size, ZSTD_dlm_byCopy); unsigned const nbDicts = nbDictMax ? nbDictMax : nbBlocks; - size_t const allDictMem = dictMem * nbDicts; - DISPLAYLEVEL(3, "generating %u dictionaries, using %.1f MB of memory \n", - nbDicts, (double)allDictMem / (1 MB)); - ddict_collection_t const dictionaries = createDDictCollection(dictBuffer.ptr, dictBuffer.size, nbDicts); - CONTROL(dictionaries.ddicts != NULL); + cdict_collection_t const cdictionaries = createCDictCollection(dictBuffer.ptr, dictBuffer.size, nbDicts, dictContentType, cctxParams); + CONTROL(cdictionaries.cdicts != NULL); + + ddict_collection_t const ddictionaries = createDDictCollection(dictBuffer.ptr, dictBuffer.size, nbDicts); + CONTROL(ddictionaries.ddicts != NULL); - shuffleDictionaries(dictionaries); + if (benchCompression) { + size_t const dictMem = ZSTD_sizeof_CDict(cdictionaries.cdicts[0]); + size_t const allDictMem = dictMem * nbDicts; + DISPLAYLEVEL(3, "generating %u dictionaries, using %.1f MB of memory \n", + nbDicts, (double)allDictMem / (1 MB)); - buffer_collection_t resultCollection = createBufferCollection_fromSliceCollectionSizes(srcSlices); - CONTROL(resultCollection.buffer.ptr != NULL); + shuffleCDictionaries(cdictionaries); - result = benchMem(resultCollection.slices, dstSlices, dictionaries, nbRounds); + buffer_collection_t resultCollection = createBufferCollection_fromSliceCollection(srcSlices); + CONTROL(resultCollection.buffer.ptr != NULL); + + result = benchMem(dstSlices, resultCollection.slices, ddictionaries, + cdictionaries, nbRounds, benchCompression, exeName, + cctxParams, metricAggregatePref); + + freeBufferCollection(resultCollection); + } else { + size_t const dictMem = ZSTD_estimateDDictSize(dictBuffer.size, DICT_LOAD_METHOD); + size_t const allDictMem = dictMem * nbDicts; + DISPLAYLEVEL(3, "generating %u dictionaries, using %.1f MB of memory \n", + nbDicts, (double)allDictMem / (1 MB)); + + shuffleDDictionaries(ddictionaries); + + buffer_collection_t resultCollection = createBufferCollection_fromSliceCollectionSizes(srcSlices); + CONTROL(resultCollection.buffer.ptr != NULL); + + result = benchMem(resultCollection.slices, dstSlices, ddictionaries, + cdictionaries, nbRounds, benchCompression, exeName, + NULL, metricAggregatePref); + + freeBufferCollection(resultCollection); + } /* free all heap objects in reverse order */ - freeBufferCollection(resultCollection); - freeDDictCollection(dictionaries); + freeCDictCollection(cdictionaries); + freeDDictCollection(ddictionaries); free(cSizes); ZSTD_freeCDict(cdict); freeBuffer(dictBuffer); @@ -707,7 +943,7 @@ static unsigned readU32FromChar(const char** stringPtr) while ((**stringPtr >='0') && (**stringPtr <='9')) { unsigned const max = (((unsigned)(-1)) / 10) - 1; assert(result <= max); /* check overflow */ - result *= 10, result += **stringPtr - '0', (*stringPtr)++ ; + result *= 10, result += (unsigned)**stringPtr - '0', (*stringPtr)++ ; } if ((**stringPtr=='K') || (**stringPtr=='M')) { unsigned const maxK = ((unsigned)(-1)) >> 10; @@ -729,7 +965,7 @@ static unsigned readU32FromChar(const char** stringPtr) * If yes, @return 1 and advances *stringPtr to the position which immediately follows longCommand. * @return 0 and doesn't modify *stringPtr otherwise. */ -static unsigned longCommandWArg(const char** stringPtr, const char* longCommand) +static int longCommandWArg(const char** stringPtr, const char* longCommand) { size_t const comSize = strlen(longCommand); int const result = !strncmp(*stringPtr, longCommand, comSize); @@ -744,14 +980,22 @@ int usage(const char* exeName) DISPLAY (" %s [Options] filename(s) \n", exeName); DISPLAY (" \n"); DISPLAY ("Options : \n"); + DISPLAY ("-z : benchmark compression (default) \n"); + DISPLAY ("-d : benchmark decompression \n"); DISPLAY ("-r : recursively load all files in subdirectories (default: off) \n"); DISPLAY ("-B# : split input into blocks of size # (default: no split) \n"); DISPLAY ("-# : use compression level # (default: %u) \n", CLEVEL_DEFAULT); DISPLAY ("-D # : use # as a dictionary (default: create one) \n"); DISPLAY ("-i# : nb benchmark rounds (default: %u) \n", BENCH_TIME_DEFAULT_S); + DISPLAY ("-p# : print speed for all rounds 0=fastest 1=median (default: 0) \n"); DISPLAY ("--nbBlocks=#: use # blocks for bench (default: one per file) \n"); DISPLAY ("--nbDicts=# : create # dictionaries for bench (default: one per block) \n"); DISPLAY ("-h : help (this text) \n"); + DISPLAY (" \n"); + DISPLAY ("Advanced Options (see zstd.h for documentation) : \n"); + DISPLAY ("--dedicated-dict-search\n"); + DISPLAY ("--dict-content-type=#\n"); + DISPLAY ("--dict-attach-pref=#\n"); return 0; } @@ -765,12 +1009,14 @@ int bad_usage(const char* exeName) int main (int argc, const char** argv) { int recursiveMode = 0; - int nbRounds = BENCH_TIME_DEFAULT_S; + int benchCompression = 1; + int dedicatedDictSearch = 0; + unsigned nbRounds = BENCH_TIME_DEFAULT_S; const char* const exeName = argv[0]; if (argc < 2) return bad_usage(exeName); - const char** nameTable = (const char**)malloc(argc * sizeof(const char*)); + const char** nameTable = (const char**)malloc((size_t)argc * sizeof(const char*)); assert(nameTable != NULL); unsigned nameIdx = 0; @@ -779,39 +1025,63 @@ int main (int argc, const char** argv) size_t blockSize = BLOCKSIZE_DEFAULT; unsigned nbDicts = 0; /* determine nbDicts automatically: 1 dictionary per block */ unsigned nbBlocks = 0; /* determine nbBlocks automatically, from source and blockSize */ + ZSTD_dictContentType_e dictContentType = ZSTD_dct_auto; + ZSTD_dictAttachPref_e dictAttachPref = ZSTD_dictDefaultAttach; + ZSTD_ParamSwitch_e prefetchCDictTables = ZSTD_ps_auto; + metricAggregatePref_e metricAggregatePref = fastest; for (int argNb = 1; argNb < argc ; argNb++) { const char* argument = argv[argNb]; if (!strcmp(argument, "-h")) { free(nameTable); return usage(exeName); } + if (!strcmp(argument, "-d")) { benchCompression = 0; continue; } + if (!strcmp(argument, "-z")) { benchCompression = 1; continue; } if (!strcmp(argument, "-r")) { recursiveMode = 1; continue; } if (!strcmp(argument, "-D")) { argNb++; assert(argNb < argc); dictionary = argv[argNb]; continue; } if (longCommandWArg(&argument, "-i")) { nbRounds = readU32FromChar(&argument); continue; } + if (longCommandWArg(&argument, "-p")) { metricAggregatePref = (int)readU32FromChar(&argument); continue;} if (longCommandWArg(&argument, "--dictionary=")) { dictionary = argument; continue; } if (longCommandWArg(&argument, "-B")) { blockSize = readU32FromChar(&argument); continue; } if (longCommandWArg(&argument, "--blockSize=")) { blockSize = readU32FromChar(&argument); continue; } if (longCommandWArg(&argument, "--nbDicts=")) { nbDicts = readU32FromChar(&argument); continue; } if (longCommandWArg(&argument, "--nbBlocks=")) { nbBlocks = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "--clevel=")) { cLevel = readU32FromChar(&argument); continue; } - if (longCommandWArg(&argument, "-")) { cLevel = readU32FromChar(&argument); continue; } + if (longCommandWArg(&argument, "--clevel=")) { cLevel = (int)readU32FromChar(&argument); continue; } + if (longCommandWArg(&argument, "--dedicated-dict-search")) { dedicatedDictSearch = 1; continue; } + if (longCommandWArg(&argument, "--dict-content-type=")) { dictContentType = (int)readU32FromChar(&argument); continue; } + if (longCommandWArg(&argument, "--dict-attach-pref=")) { dictAttachPref = (int)readU32FromChar(&argument); continue; } + if (longCommandWArg(&argument, "--prefetch-cdict-tables=")) { prefetchCDictTables = (int)readU32FromChar(&argument); continue; } + if (longCommandWArg(&argument, "-")) { cLevel = (int)readU32FromChar(&argument); continue; } /* anything that's not a command is a filename */ nameTable[nameIdx++] = argument; } - const char** filenameTable = nameTable; - unsigned nbFiles = nameIdx; - char* buffer_containing_filenames = NULL; + FileNamesTable* filenameTable; if (recursiveMode) { #ifndef UTIL_HAS_CREATEFILELIST assert(0); /* missing capability, do not run */ #endif - filenameTable = UTIL_createFileList(nameTable, nameIdx, &buffer_containing_filenames, &nbFiles, 1 /* follow_links */); + filenameTable = UTIL_createExpandedFNT(nameTable, nameIdx, 1 /* follow_links */); + } else { + filenameTable = UTIL_assembleFileNamesTable(nameTable, nameIdx, NULL); + nameTable = NULL; /* UTIL_createFileNamesTable() takes ownership of nameTable */ } - int result = bench(filenameTable, nbFiles, dictionary, blockSize, cLevel, nbDicts, nbBlocks, nbRounds); + ZSTD_CCtx_params* cctxParams = ZSTD_createCCtxParams(); + ZSTD_CCtxParams_init(cctxParams, cLevel); + ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_enableDedicatedDictSearch, dedicatedDictSearch); + ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_nbWorkers, 0); + ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_forceAttachDict, dictAttachPref); + ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_prefetchCDictTables, prefetchCDictTables); + + int result = + bench(filenameTable->fileNames, (unsigned)filenameTable->tableSize, + dictionary, blockSize, cLevel, nbDicts, nbBlocks, nbRounds, + benchCompression, dictContentType, cctxParams, exeName, + metricAggregatePref); - free(buffer_containing_filenames); + UTIL_freeFileNamesTable(filenameTable); free(nameTable); + ZSTD_freeCCtxParams(cctxParams); return result; } diff --git a/contrib/linux-kernel/0000-cover-letter.patch b/contrib/linux-kernel/0000-cover-letter.patch deleted file mode 100644 index d57ef27e72a..00000000000 --- a/contrib/linux-kernel/0000-cover-letter.patch +++ /dev/null @@ -1,122 +0,0 @@ -From 308795a7713ca6fcd468b60fba9a2fca99cee6a0 Mon Sep 17 00:00:00 2001 -From: Nick Terrell -Date: Tue, 8 Aug 2017 19:20:25 -0700 -Subject: [PATCH v5 0/5] Add xxhash and zstd modules - -Hi all, - -This patch set adds xxhash, zstd compression, and zstd decompression -modules. It also adds zstd support to BtrFS and SquashFS. - -Each patch has relevant summaries, benchmarks, and tests. - -Best, -Nick Terrell - -Changelog: - -v1 -> v2: -- Make pointer in lib/xxhash.c:394 non-const (1/5) -- Use div_u64() for division of u64s (2/5) -- Reduce stack usage of ZSTD_compressSequences(), ZSTD_buildSeqTable(), - ZSTD_decompressSequencesLong(), FSE_buildDTable(), FSE_decompress_wksp(), - HUF_writeCTable(), HUF_readStats(), HUF_readCTable(), - HUF_compressWeights(), HUF_readDTableX2(), and HUF_readDTableX4() (2/5) -- No zstd function uses more than 400 B of stack space (2/5) - -v2 -> v3: -- Work around gcc-7 bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388 - (2/5) -- Fix bug in dictionary compression from upstream commit cc1522351f (2/5) -- Port upstream BtrFS commits e1ddce71d6, 389a6cfc2a, and 6acafd1eff (3/5) -- Change default compression level for BtrFS to 3 (3/5) - -v3 -> v4: -- Fix compiler warnings (2/5) -- Add missing includes (3/5) -- Fix minor linter warnings (3/5, 4/5) -- Add crypto patch (5/5) - -v4 -> v5: -- Fix rare compression bug from upstream commit 308047eb5d (2/5) -- Fix bug introduced in v3 when working around the gcc-7 bug (2/5) -- Fix ZSTD_DStream initialization code in squashfs (4/5) -- Fix patch documentation for patches written by Sean Purcell (4/5) - -Nick Terrell (5): - lib: Add xxhash module - lib: Add zstd modules - btrfs: Add zstd support - squashfs: Add zstd support - crypto: Add zstd support - - crypto/Kconfig | 9 + - crypto/Makefile | 1 + - crypto/testmgr.c | 10 + - crypto/testmgr.h | 71 + - crypto/zstd.c | 265 ++++ - fs/btrfs/Kconfig | 2 + - fs/btrfs/Makefile | 2 +- - fs/btrfs/compression.c | 1 + - fs/btrfs/compression.h | 6 +- - fs/btrfs/ctree.h | 1 + - fs/btrfs/disk-io.c | 2 + - fs/btrfs/ioctl.c | 6 +- - fs/btrfs/props.c | 6 + - fs/btrfs/super.c | 12 +- - fs/btrfs/sysfs.c | 2 + - fs/btrfs/zstd.c | 432 ++++++ - fs/squashfs/Kconfig | 14 + - fs/squashfs/Makefile | 1 + - fs/squashfs/decompressor.c | 7 + - fs/squashfs/decompressor.h | 4 + - fs/squashfs/squashfs_fs.h | 1 + - fs/squashfs/zstd_wrapper.c | 151 ++ - include/linux/xxhash.h | 236 +++ - include/linux/zstd.h | 1157 +++++++++++++++ - include/uapi/linux/btrfs.h | 8 +- - lib/Kconfig | 11 + - lib/Makefile | 3 + - lib/xxhash.c | 500 +++++++ - lib/zstd/Makefile | 18 + - lib/zstd/bitstream.h | 374 +++++ - lib/zstd/compress.c | 3484 ++++++++++++++++++++++++++++++++++++++++++++ - lib/zstd/decompress.c | 2528 ++++++++++++++++++++++++++++++++ - lib/zstd/entropy_common.c | 243 +++ - lib/zstd/error_private.h | 53 + - lib/zstd/fse.h | 575 ++++++++ - lib/zstd/fse_compress.c | 795 ++++++++++ - lib/zstd/fse_decompress.c | 332 +++++ - lib/zstd/huf.h | 212 +++ - lib/zstd/huf_compress.c | 770 ++++++++++ - lib/zstd/huf_decompress.c | 960 ++++++++++++ - lib/zstd/mem.h | 151 ++ - lib/zstd/zstd_common.c | 75 + - lib/zstd/zstd_internal.h | 263 ++++ - lib/zstd/zstd_opt.h | 1014 +++++++++++++ - 44 files changed, 14756 insertions(+), 12 deletions(-) - create mode 100644 crypto/zstd.c - create mode 100644 fs/btrfs/zstd.c - create mode 100644 fs/squashfs/zstd_wrapper.c - create mode 100644 include/linux/xxhash.h - create mode 100644 include/linux/zstd.h - create mode 100644 lib/xxhash.c - create mode 100644 lib/zstd/Makefile - create mode 100644 lib/zstd/bitstream.h - create mode 100644 lib/zstd/compress.c - create mode 100644 lib/zstd/decompress.c - create mode 100644 lib/zstd/entropy_common.c - create mode 100644 lib/zstd/error_private.h - create mode 100644 lib/zstd/fse.h - create mode 100644 lib/zstd/fse_compress.c - create mode 100644 lib/zstd/fse_decompress.c - create mode 100644 lib/zstd/huf.h - create mode 100644 lib/zstd/huf_compress.c - create mode 100644 lib/zstd/huf_decompress.c - create mode 100644 lib/zstd/mem.h - create mode 100644 lib/zstd/zstd_common.c - create mode 100644 lib/zstd/zstd_internal.h - create mode 100644 lib/zstd/zstd_opt.h - --- -2.9.3 diff --git a/contrib/linux-kernel/0001-lib-Add-xxhash-module.patch b/contrib/linux-kernel/0001-lib-Add-xxhash-module.patch deleted file mode 100644 index 83f09924fdb..00000000000 --- a/contrib/linux-kernel/0001-lib-Add-xxhash-module.patch +++ /dev/null @@ -1,862 +0,0 @@ -From a4b1ffb6e89bbccd519f9afa0910635668436105 Mon Sep 17 00:00:00 2001 -From: Nick Terrell -Date: Mon, 17 Jul 2017 17:07:18 -0700 -Subject: [PATCH v5 1/5] lib: Add xxhash module - -Adds xxhash kernel module with xxh32 and xxh64 hashes. xxhash is an -extremely fast non-cryptographic hash algorithm for checksumming. -The zstd compression and decompression modules added in the next patch -require xxhash. I extracted it out from zstd since it is useful on its -own. I copied the code from the upstream XXHash source repository and -translated it into kernel style. I ran benchmarks and tests in the kernel -and tests in userland. - -I benchmarked xxhash as a special character device. I ran in four modes, -no-op, xxh32, xxh64, and crc32. The no-op mode simply copies the data to -kernel space and ignores it. The xxh32, xxh64, and crc32 modes compute -hashes on the copied data. I also ran it with four different buffer sizes. -The benchmark file is located in the upstream zstd source repository under -`contrib/linux-kernel/xxhash_test.c` [1]. - -I ran the benchmarks on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM. -The VM is running on a MacBook Pro with a 3.1 GHz Intel Core i7 processor, -16 GB of RAM, and a SSD. I benchmarked using the file `filesystem.squashfs` -from `ubuntu-16.10-desktop-amd64.iso`, which is 1,536,217,088 B large. -Run the following commands for the benchmark: - - modprobe xxhash_test - mknod xxhash_test c 245 0 - time cp filesystem.squashfs xxhash_test - -The time is reported by the time of the userland `cp`. -The GB/s is computed with - - 1,536,217,008 B / time(buffer size, hash) - -which includes the time to copy from userland. -The Normalized GB/s is computed with - - 1,536,217,088 B / (time(buffer size, hash) - time(buffer size, none)). - - -| Buffer Size (B) | Hash | Time (s) | GB/s | Adjusted GB/s | -|-----------------|-------|----------|------|---------------| -| 1024 | none | 0.408 | 3.77 | - | -| 1024 | xxh32 | 0.649 | 2.37 | 6.37 | -| 1024 | xxh64 | 0.542 | 2.83 | 11.46 | -| 1024 | crc32 | 1.290 | 1.19 | 1.74 | -| 4096 | none | 0.380 | 4.04 | - | -| 4096 | xxh32 | 0.645 | 2.38 | 5.79 | -| 4096 | xxh64 | 0.500 | 3.07 | 12.80 | -| 4096 | crc32 | 1.168 | 1.32 | 1.95 | -| 8192 | none | 0.351 | 4.38 | - | -| 8192 | xxh32 | 0.614 | 2.50 | 5.84 | -| 8192 | xxh64 | 0.464 | 3.31 | 13.60 | -| 8192 | crc32 | 1.163 | 1.32 | 1.89 | -| 16384 | none | 0.346 | 4.43 | - | -| 16384 | xxh32 | 0.590 | 2.60 | 6.30 | -| 16384 | xxh64 | 0.466 | 3.30 | 12.80 | -| 16384 | crc32 | 1.183 | 1.30 | 1.84 | - -Tested in userland using the test-suite in the zstd repo under -`contrib/linux-kernel/test/XXHashUserlandTest.cpp` [2] by mocking the -kernel functions. A line in each branch of every function in `xxhash.c` -was commented out to ensure that the test-suite fails. Additionally -tested while testing zstd and with SMHasher [3]. - -[1] https://phabricator.intern.facebook.com/P57526246 -[2] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/test/XXHashUserlandTest.cpp -[3] https://github.com/aappleby/smhasher - -zstd source repository: https://github.com/facebook/zstd -XXHash source repository: https://github.com/cyan4973/xxhash - -Signed-off-by: Nick Terrell ---- -v1 -> v2: -- Make pointer in lib/xxhash.c:394 non-const - - include/linux/xxhash.h | 236 +++++++++++++++++++++++ - lib/Kconfig | 3 + - lib/Makefile | 1 + - lib/xxhash.c | 500 +++++++++++++++++++++++++++++++++++++++++++++++++ - 4 files changed, 740 insertions(+) - create mode 100644 include/linux/xxhash.h - create mode 100644 lib/xxhash.c - -diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h -new file mode 100644 -index 0000000..9e1f42c ---- /dev/null -+++ b/include/linux/xxhash.h -@@ -0,0 +1,236 @@ -+/* -+ * xxHash - Extremely Fast Hash algorithm -+ * Copyright (C) 2012-2016, Yann Collet. -+ * -+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are -+ * met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above -+ * copyright notice, this list of conditions and the following disclaimer -+ * in the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ * -+ * You can contact the author at: -+ * - xxHash homepage: http://cyan4973.github.io/xxHash/ -+ * - xxHash source repository: https://github.com/Cyan4973/xxHash -+ */ -+ -+/* -+ * Notice extracted from xxHash homepage: -+ * -+ * xxHash is an extremely fast Hash algorithm, running at RAM speed limits. -+ * It also successfully passes all tests from the SMHasher suite. -+ * -+ * Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 -+ * Duo @3GHz) -+ * -+ * Name Speed Q.Score Author -+ * xxHash 5.4 GB/s 10 -+ * CrapWow 3.2 GB/s 2 Andrew -+ * MumurHash 3a 2.7 GB/s 10 Austin Appleby -+ * SpookyHash 2.0 GB/s 10 Bob Jenkins -+ * SBox 1.4 GB/s 9 Bret Mulvey -+ * Lookup3 1.2 GB/s 9 Bob Jenkins -+ * SuperFastHash 1.2 GB/s 1 Paul Hsieh -+ * CityHash64 1.05 GB/s 10 Pike & Alakuijala -+ * FNV 0.55 GB/s 5 Fowler, Noll, Vo -+ * CRC32 0.43 GB/s 9 -+ * MD5-32 0.33 GB/s 10 Ronald L. Rivest -+ * SHA1-32 0.28 GB/s 10 -+ * -+ * Q.Score is a measure of quality of the hash function. -+ * It depends on successfully passing SMHasher test set. -+ * 10 is a perfect score. -+ * -+ * A 64-bits version, named xxh64 offers much better speed, -+ * but for 64-bits applications only. -+ * Name Speed on 64 bits Speed on 32 bits -+ * xxh64 13.8 GB/s 1.9 GB/s -+ * xxh32 6.8 GB/s 6.0 GB/s -+ */ -+ -+#ifndef XXHASH_H -+#define XXHASH_H -+ -+#include -+ -+/*-**************************** -+ * Simple Hash Functions -+ *****************************/ -+ -+/** -+ * xxh32() - calculate the 32-bit hash of the input with a given seed. -+ * -+ * @input: The data to hash. -+ * @length: The length of the data to hash. -+ * @seed: The seed can be used to alter the result predictably. -+ * -+ * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s -+ * -+ * Return: The 32-bit hash of the data. -+ */ -+uint32_t xxh32(const void *input, size_t length, uint32_t seed); -+ -+/** -+ * xxh64() - calculate the 64-bit hash of the input with a given seed. -+ * -+ * @input: The data to hash. -+ * @length: The length of the data to hash. -+ * @seed: The seed can be used to alter the result predictably. -+ * -+ * This function runs 2x faster on 64-bit systems, but slower on 32-bit systems. -+ * -+ * Return: The 64-bit hash of the data. -+ */ -+uint64_t xxh64(const void *input, size_t length, uint64_t seed); -+ -+/*-**************************** -+ * Streaming Hash Functions -+ *****************************/ -+ -+/* -+ * These definitions are only meant to allow allocation of XXH state -+ * statically, on stack, or in a struct for example. -+ * Do not use members directly. -+ */ -+ -+/** -+ * struct xxh32_state - private xxh32 state, do not use members directly -+ */ -+struct xxh32_state { -+ uint32_t total_len_32; -+ uint32_t large_len; -+ uint32_t v1; -+ uint32_t v2; -+ uint32_t v3; -+ uint32_t v4; -+ uint32_t mem32[4]; -+ uint32_t memsize; -+}; -+ -+/** -+ * struct xxh32_state - private xxh64 state, do not use members directly -+ */ -+struct xxh64_state { -+ uint64_t total_len; -+ uint64_t v1; -+ uint64_t v2; -+ uint64_t v3; -+ uint64_t v4; -+ uint64_t mem64[4]; -+ uint32_t memsize; -+}; -+ -+/** -+ * xxh32_reset() - reset the xxh32 state to start a new hashing operation -+ * -+ * @state: The xxh32 state to reset. -+ * @seed: Initialize the hash state with this seed. -+ * -+ * Call this function on any xxh32_state to prepare for a new hashing operation. -+ */ -+void xxh32_reset(struct xxh32_state *state, uint32_t seed); -+ -+/** -+ * xxh32_update() - hash the data given and update the xxh32 state -+ * -+ * @state: The xxh32 state to update. -+ * @input: The data to hash. -+ * @length: The length of the data to hash. -+ * -+ * After calling xxh32_reset() call xxh32_update() as many times as necessary. -+ * -+ * Return: Zero on success, otherwise an error code. -+ */ -+int xxh32_update(struct xxh32_state *state, const void *input, size_t length); -+ -+/** -+ * xxh32_digest() - produce the current xxh32 hash -+ * -+ * @state: Produce the current xxh32 hash of this state. -+ * -+ * A hash value can be produced at any time. It is still possible to continue -+ * inserting input into the hash state after a call to xxh32_digest(), and -+ * generate new hashes later on, by calling xxh32_digest() again. -+ * -+ * Return: The xxh32 hash stored in the state. -+ */ -+uint32_t xxh32_digest(const struct xxh32_state *state); -+ -+/** -+ * xxh64_reset() - reset the xxh64 state to start a new hashing operation -+ * -+ * @state: The xxh64 state to reset. -+ * @seed: Initialize the hash state with this seed. -+ */ -+void xxh64_reset(struct xxh64_state *state, uint64_t seed); -+ -+/** -+ * xxh64_update() - hash the data given and update the xxh64 state -+ * @state: The xxh64 state to update. -+ * @input: The data to hash. -+ * @length: The length of the data to hash. -+ * -+ * After calling xxh64_reset() call xxh64_update() as many times as necessary. -+ * -+ * Return: Zero on success, otherwise an error code. -+ */ -+int xxh64_update(struct xxh64_state *state, const void *input, size_t length); -+ -+/** -+ * xxh64_digest() - produce the current xxh64 hash -+ * -+ * @state: Produce the current xxh64 hash of this state. -+ * -+ * A hash value can be produced at any time. It is still possible to continue -+ * inserting input into the hash state after a call to xxh64_digest(), and -+ * generate new hashes later on, by calling xxh64_digest() again. -+ * -+ * Return: The xxh64 hash stored in the state. -+ */ -+uint64_t xxh64_digest(const struct xxh64_state *state); -+ -+/*-************************** -+ * Utils -+ ***************************/ -+ -+/** -+ * xxh32_copy_state() - copy the source state into the destination state -+ * -+ * @src: The source xxh32 state. -+ * @dst: The destination xxh32 state. -+ */ -+void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src); -+ -+/** -+ * xxh64_copy_state() - copy the source state into the destination state -+ * -+ * @src: The source xxh64 state. -+ * @dst: The destination xxh64 state. -+ */ -+void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src); -+ -+#endif /* XXHASH_H */ -diff --git a/lib/Kconfig b/lib/Kconfig -index 6762529..5e7541f 100644 ---- a/lib/Kconfig -+++ b/lib/Kconfig -@@ -192,6 +192,9 @@ config CRC8 - when they need to do cyclic redundancy check according CRC8 - algorithm. Module will be called crc8. - -+config XXHASH -+ tristate -+ - config AUDIT_GENERIC - bool - depends on AUDIT && !AUDIT_ARCH -diff --git a/lib/Makefile b/lib/Makefile -index 40c1837..d06b68a 100644 ---- a/lib/Makefile -+++ b/lib/Makefile -@@ -102,6 +102,7 @@ obj-$(CONFIG_CRC4) += crc4.o - obj-$(CONFIG_CRC7) += crc7.o - obj-$(CONFIG_LIBCRC32C) += libcrc32c.o - obj-$(CONFIG_CRC8) += crc8.o -+obj-$(CONFIG_XXHASH) += xxhash.o - obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o - - obj-$(CONFIG_842_COMPRESS) += 842/ -diff --git a/lib/xxhash.c b/lib/xxhash.c -new file mode 100644 -index 0000000..aa61e2a ---- /dev/null -+++ b/lib/xxhash.c -@@ -0,0 +1,500 @@ -+/* -+ * xxHash - Extremely Fast Hash algorithm -+ * Copyright (C) 2012-2016, Yann Collet. -+ * -+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are -+ * met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above -+ * copyright notice, this list of conditions and the following disclaimer -+ * in the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ * -+ * You can contact the author at: -+ * - xxHash homepage: http://cyan4973.github.io/xxHash/ -+ * - xxHash source repository: https://github.com/Cyan4973/xxHash -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/*-************************************* -+ * Macros -+ **************************************/ -+#define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r))) -+#define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r))) -+ -+#ifdef __LITTLE_ENDIAN -+# define XXH_CPU_LITTLE_ENDIAN 1 -+#else -+# define XXH_CPU_LITTLE_ENDIAN 0 -+#endif -+ -+/*-************************************* -+ * Constants -+ **************************************/ -+static const uint32_t PRIME32_1 = 2654435761U; -+static const uint32_t PRIME32_2 = 2246822519U; -+static const uint32_t PRIME32_3 = 3266489917U; -+static const uint32_t PRIME32_4 = 668265263U; -+static const uint32_t PRIME32_5 = 374761393U; -+ -+static const uint64_t PRIME64_1 = 11400714785074694791ULL; -+static const uint64_t PRIME64_2 = 14029467366897019727ULL; -+static const uint64_t PRIME64_3 = 1609587929392839161ULL; -+static const uint64_t PRIME64_4 = 9650029242287828579ULL; -+static const uint64_t PRIME64_5 = 2870177450012600261ULL; -+ -+/*-************************** -+ * Utils -+ ***************************/ -+void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src) -+{ -+ memcpy(dst, src, sizeof(*dst)); -+} -+EXPORT_SYMBOL(xxh32_copy_state); -+ -+void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src) -+{ -+ memcpy(dst, src, sizeof(*dst)); -+} -+EXPORT_SYMBOL(xxh64_copy_state); -+ -+/*-*************************** -+ * Simple Hash Functions -+ ****************************/ -+static uint32_t xxh32_round(uint32_t seed, const uint32_t input) -+{ -+ seed += input * PRIME32_2; -+ seed = xxh_rotl32(seed, 13); -+ seed *= PRIME32_1; -+ return seed; -+} -+ -+uint32_t xxh32(const void *input, const size_t len, const uint32_t seed) -+{ -+ const uint8_t *p = (const uint8_t *)input; -+ const uint8_t *b_end = p + len; -+ uint32_t h32; -+ -+ if (len >= 16) { -+ const uint8_t *const limit = b_end - 16; -+ uint32_t v1 = seed + PRIME32_1 + PRIME32_2; -+ uint32_t v2 = seed + PRIME32_2; -+ uint32_t v3 = seed + 0; -+ uint32_t v4 = seed - PRIME32_1; -+ -+ do { -+ v1 = xxh32_round(v1, get_unaligned_le32(p)); -+ p += 4; -+ v2 = xxh32_round(v2, get_unaligned_le32(p)); -+ p += 4; -+ v3 = xxh32_round(v3, get_unaligned_le32(p)); -+ p += 4; -+ v4 = xxh32_round(v4, get_unaligned_le32(p)); -+ p += 4; -+ } while (p <= limit); -+ -+ h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) + -+ xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18); -+ } else { -+ h32 = seed + PRIME32_5; -+ } -+ -+ h32 += (uint32_t)len; -+ -+ while (p + 4 <= b_end) { -+ h32 += get_unaligned_le32(p) * PRIME32_3; -+ h32 = xxh_rotl32(h32, 17) * PRIME32_4; -+ p += 4; -+ } -+ -+ while (p < b_end) { -+ h32 += (*p) * PRIME32_5; -+ h32 = xxh_rotl32(h32, 11) * PRIME32_1; -+ p++; -+ } -+ -+ h32 ^= h32 >> 15; -+ h32 *= PRIME32_2; -+ h32 ^= h32 >> 13; -+ h32 *= PRIME32_3; -+ h32 ^= h32 >> 16; -+ -+ return h32; -+} -+EXPORT_SYMBOL(xxh32); -+ -+static uint64_t xxh64_round(uint64_t acc, const uint64_t input) -+{ -+ acc += input * PRIME64_2; -+ acc = xxh_rotl64(acc, 31); -+ acc *= PRIME64_1; -+ return acc; -+} -+ -+static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val) -+{ -+ val = xxh64_round(0, val); -+ acc ^= val; -+ acc = acc * PRIME64_1 + PRIME64_4; -+ return acc; -+} -+ -+uint64_t xxh64(const void *input, const size_t len, const uint64_t seed) -+{ -+ const uint8_t *p = (const uint8_t *)input; -+ const uint8_t *const b_end = p + len; -+ uint64_t h64; -+ -+ if (len >= 32) { -+ const uint8_t *const limit = b_end - 32; -+ uint64_t v1 = seed + PRIME64_1 + PRIME64_2; -+ uint64_t v2 = seed + PRIME64_2; -+ uint64_t v3 = seed + 0; -+ uint64_t v4 = seed - PRIME64_1; -+ -+ do { -+ v1 = xxh64_round(v1, get_unaligned_le64(p)); -+ p += 8; -+ v2 = xxh64_round(v2, get_unaligned_le64(p)); -+ p += 8; -+ v3 = xxh64_round(v3, get_unaligned_le64(p)); -+ p += 8; -+ v4 = xxh64_round(v4, get_unaligned_le64(p)); -+ p += 8; -+ } while (p <= limit); -+ -+ h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) + -+ xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); -+ h64 = xxh64_merge_round(h64, v1); -+ h64 = xxh64_merge_round(h64, v2); -+ h64 = xxh64_merge_round(h64, v3); -+ h64 = xxh64_merge_round(h64, v4); -+ -+ } else { -+ h64 = seed + PRIME64_5; -+ } -+ -+ h64 += (uint64_t)len; -+ -+ while (p + 8 <= b_end) { -+ const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); -+ -+ h64 ^= k1; -+ h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; -+ p += 8; -+ } -+ -+ if (p + 4 <= b_end) { -+ h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; -+ h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; -+ p += 4; -+ } -+ -+ while (p < b_end) { -+ h64 ^= (*p) * PRIME64_5; -+ h64 = xxh_rotl64(h64, 11) * PRIME64_1; -+ p++; -+ } -+ -+ h64 ^= h64 >> 33; -+ h64 *= PRIME64_2; -+ h64 ^= h64 >> 29; -+ h64 *= PRIME64_3; -+ h64 ^= h64 >> 32; -+ -+ return h64; -+} -+EXPORT_SYMBOL(xxh64); -+ -+/*-************************************************** -+ * Advanced Hash Functions -+ ***************************************************/ -+void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed) -+{ -+ /* use a local state for memcpy() to avoid strict-aliasing warnings */ -+ struct xxh32_state state; -+ -+ memset(&state, 0, sizeof(state)); -+ state.v1 = seed + PRIME32_1 + PRIME32_2; -+ state.v2 = seed + PRIME32_2; -+ state.v3 = seed + 0; -+ state.v4 = seed - PRIME32_1; -+ memcpy(statePtr, &state, sizeof(state)); -+} -+EXPORT_SYMBOL(xxh32_reset); -+ -+void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed) -+{ -+ /* use a local state for memcpy() to avoid strict-aliasing warnings */ -+ struct xxh64_state state; -+ -+ memset(&state, 0, sizeof(state)); -+ state.v1 = seed + PRIME64_1 + PRIME64_2; -+ state.v2 = seed + PRIME64_2; -+ state.v3 = seed + 0; -+ state.v4 = seed - PRIME64_1; -+ memcpy(statePtr, &state, sizeof(state)); -+} -+EXPORT_SYMBOL(xxh64_reset); -+ -+int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) -+{ -+ const uint8_t *p = (const uint8_t *)input; -+ const uint8_t *const b_end = p + len; -+ -+ if (input == NULL) -+ return -EINVAL; -+ -+ state->total_len_32 += (uint32_t)len; -+ state->large_len |= (len >= 16) | (state->total_len_32 >= 16); -+ -+ if (state->memsize + len < 16) { /* fill in tmp buffer */ -+ memcpy((uint8_t *)(state->mem32) + state->memsize, input, len); -+ state->memsize += (uint32_t)len; -+ return 0; -+ } -+ -+ if (state->memsize) { /* some data left from previous update */ -+ const uint32_t *p32 = state->mem32; -+ -+ memcpy((uint8_t *)(state->mem32) + state->memsize, input, -+ 16 - state->memsize); -+ -+ state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32)); -+ p32++; -+ state->v2 = xxh32_round(state->v2, get_unaligned_le32(p32)); -+ p32++; -+ state->v3 = xxh32_round(state->v3, get_unaligned_le32(p32)); -+ p32++; -+ state->v4 = xxh32_round(state->v4, get_unaligned_le32(p32)); -+ p32++; -+ -+ p += 16-state->memsize; -+ state->memsize = 0; -+ } -+ -+ if (p <= b_end - 16) { -+ const uint8_t *const limit = b_end - 16; -+ uint32_t v1 = state->v1; -+ uint32_t v2 = state->v2; -+ uint32_t v3 = state->v3; -+ uint32_t v4 = state->v4; -+ -+ do { -+ v1 = xxh32_round(v1, get_unaligned_le32(p)); -+ p += 4; -+ v2 = xxh32_round(v2, get_unaligned_le32(p)); -+ p += 4; -+ v3 = xxh32_round(v3, get_unaligned_le32(p)); -+ p += 4; -+ v4 = xxh32_round(v4, get_unaligned_le32(p)); -+ p += 4; -+ } while (p <= limit); -+ -+ state->v1 = v1; -+ state->v2 = v2; -+ state->v3 = v3; -+ state->v4 = v4; -+ } -+ -+ if (p < b_end) { -+ memcpy(state->mem32, p, (size_t)(b_end-p)); -+ state->memsize = (uint32_t)(b_end-p); -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(xxh32_update); -+ -+uint32_t xxh32_digest(const struct xxh32_state *state) -+{ -+ const uint8_t *p = (const uint8_t *)state->mem32; -+ const uint8_t *const b_end = (const uint8_t *)(state->mem32) + -+ state->memsize; -+ uint32_t h32; -+ -+ if (state->large_len) { -+ h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) + -+ xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18); -+ } else { -+ h32 = state->v3 /* == seed */ + PRIME32_5; -+ } -+ -+ h32 += state->total_len_32; -+ -+ while (p + 4 <= b_end) { -+ h32 += get_unaligned_le32(p) * PRIME32_3; -+ h32 = xxh_rotl32(h32, 17) * PRIME32_4; -+ p += 4; -+ } -+ -+ while (p < b_end) { -+ h32 += (*p) * PRIME32_5; -+ h32 = xxh_rotl32(h32, 11) * PRIME32_1; -+ p++; -+ } -+ -+ h32 ^= h32 >> 15; -+ h32 *= PRIME32_2; -+ h32 ^= h32 >> 13; -+ h32 *= PRIME32_3; -+ h32 ^= h32 >> 16; -+ -+ return h32; -+} -+EXPORT_SYMBOL(xxh32_digest); -+ -+int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) -+{ -+ const uint8_t *p = (const uint8_t *)input; -+ const uint8_t *const b_end = p + len; -+ -+ if (input == NULL) -+ return -EINVAL; -+ -+ state->total_len += len; -+ -+ if (state->memsize + len < 32) { /* fill in tmp buffer */ -+ memcpy(((uint8_t *)state->mem64) + state->memsize, input, len); -+ state->memsize += (uint32_t)len; -+ return 0; -+ } -+ -+ if (state->memsize) { /* tmp buffer is full */ -+ uint64_t *p64 = state->mem64; -+ -+ memcpy(((uint8_t *)p64) + state->memsize, input, -+ 32 - state->memsize); -+ -+ state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64)); -+ p64++; -+ state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64)); -+ p64++; -+ state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64)); -+ p64++; -+ state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64)); -+ -+ p += 32 - state->memsize; -+ state->memsize = 0; -+ } -+ -+ if (p + 32 <= b_end) { -+ const uint8_t *const limit = b_end - 32; -+ uint64_t v1 = state->v1; -+ uint64_t v2 = state->v2; -+ uint64_t v3 = state->v3; -+ uint64_t v4 = state->v4; -+ -+ do { -+ v1 = xxh64_round(v1, get_unaligned_le64(p)); -+ p += 8; -+ v2 = xxh64_round(v2, get_unaligned_le64(p)); -+ p += 8; -+ v3 = xxh64_round(v3, get_unaligned_le64(p)); -+ p += 8; -+ v4 = xxh64_round(v4, get_unaligned_le64(p)); -+ p += 8; -+ } while (p <= limit); -+ -+ state->v1 = v1; -+ state->v2 = v2; -+ state->v3 = v3; -+ state->v4 = v4; -+ } -+ -+ if (p < b_end) { -+ memcpy(state->mem64, p, (size_t)(b_end-p)); -+ state->memsize = (uint32_t)(b_end - p); -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(xxh64_update); -+ -+uint64_t xxh64_digest(const struct xxh64_state *state) -+{ -+ const uint8_t *p = (const uint8_t *)state->mem64; -+ const uint8_t *const b_end = (const uint8_t *)state->mem64 + -+ state->memsize; -+ uint64_t h64; -+ -+ if (state->total_len >= 32) { -+ const uint64_t v1 = state->v1; -+ const uint64_t v2 = state->v2; -+ const uint64_t v3 = state->v3; -+ const uint64_t v4 = state->v4; -+ -+ h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) + -+ xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); -+ h64 = xxh64_merge_round(h64, v1); -+ h64 = xxh64_merge_round(h64, v2); -+ h64 = xxh64_merge_round(h64, v3); -+ h64 = xxh64_merge_round(h64, v4); -+ } else { -+ h64 = state->v3 + PRIME64_5; -+ } -+ -+ h64 += (uint64_t)state->total_len; -+ -+ while (p + 8 <= b_end) { -+ const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); -+ -+ h64 ^= k1; -+ h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; -+ p += 8; -+ } -+ -+ if (p + 4 <= b_end) { -+ h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; -+ h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; -+ p += 4; -+ } -+ -+ while (p < b_end) { -+ h64 ^= (*p) * PRIME64_5; -+ h64 = xxh_rotl64(h64, 11) * PRIME64_1; -+ p++; -+ } -+ -+ h64 ^= h64 >> 33; -+ h64 *= PRIME64_2; -+ h64 ^= h64 >> 29; -+ h64 *= PRIME64_3; -+ h64 ^= h64 >> 32; -+ -+ return h64; -+} -+EXPORT_SYMBOL(xxh64_digest); -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_DESCRIPTION("xxHash"); --- -2.9.3 diff --git a/contrib/linux-kernel/0002-lib-Add-zstd-modules.patch b/contrib/linux-kernel/0002-lib-Add-zstd-modules.patch deleted file mode 100644 index 0232d2d4ab5..00000000000 --- a/contrib/linux-kernel/0002-lib-Add-zstd-modules.patch +++ /dev/null @@ -1,13285 +0,0 @@ -From 2b29ec569f8438a0307debd29873859ca6d407fc Mon Sep 17 00:00:00 2001 -From: Nick Terrell -Date: Mon, 17 Jul 2017 17:08:19 -0700 -Subject: [PATCH v5 2/5] lib: Add zstd modules - -Add zstd compression and decompression kernel modules. -zstd offers a wide variety of compression speed and quality trade-offs. -It can compress at speeds approaching lz4, and quality approaching lzma. -zstd decompressions at speeds more than twice as fast as zlib, and -decompression speed remains roughly the same across all compression levels. - -The code was ported from the upstream zstd source repository. The -`linux/zstd.h` header was modified to match linux kernel style. -The cross-platform and allocation code was stripped out. Instead zstd -requires the caller to pass a preallocated workspace. The source files -were clang-formatted [1] to match the Linux Kernel style as much as -possible. Otherwise, the code was unmodified. We would like to avoid -as much further manual modification to the source code as possible, so it -will be easier to keep the kernel zstd up to date. - -I benchmarked zstd compression as a special character device. I ran zstd -and zlib compression at several levels, as well as performing no -compression, which measure the time spent copying the data to kernel space. -Data is passed to the compressor 4096 B at a time. The benchmark file is -located in the upstream zstd source repository under -`contrib/linux-kernel/zstd_compress_test.c` [2]. - -I ran the benchmarks on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM. -The VM is running on a MacBook Pro with a 3.1 GHz Intel Core i7 processor, -16 GB of RAM, and a SSD. I benchmarked using `silesia.tar` [3], which is -211,988,480 B large. Run the following commands for the benchmark: - - sudo modprobe zstd_compress_test - sudo mknod zstd_compress_test c 245 0 - sudo cp silesia.tar zstd_compress_test - -The time is reported by the time of the userland `cp`. -The MB/s is computed with - - 1,536,217,008 B / time(buffer size, hash) - -which includes the time to copy from userland. -The Adjusted MB/s is computed with - - 1,536,217,088 B / (time(buffer size, hash) - time(buffer size, none)). - -The memory reported is the amount of memory the compressor requests. - -| Method | Size (B) | Time (s) | Ratio | MB/s | Adj MB/s | Mem (MB) | -|----------|----------|----------|-------|---------|----------|----------| -| none | 11988480 | 0.100 | 1 | 2119.88 | - | - | -| zstd -1 | 73645762 | 1.044 | 2.878 | 203.05 | 224.56 | 1.23 | -| zstd -3 | 66988878 | 1.761 | 3.165 | 120.38 | 127.63 | 2.47 | -| zstd -5 | 65001259 | 2.563 | 3.261 | 82.71 | 86.07 | 2.86 | -| zstd -10 | 60165346 | 13.242 | 3.523 | 16.01 | 16.13 | 13.22 | -| zstd -15 | 58009756 | 47.601 | 3.654 | 4.45 | 4.46 | 21.61 | -| zstd -19 | 54014593 | 102.835 | 3.925 | 2.06 | 2.06 | 60.15 | -| zlib -1 | 77260026 | 2.895 | 2.744 | 73.23 | 75.85 | 0.27 | -| zlib -3 | 72972206 | 4.116 | 2.905 | 51.50 | 52.79 | 0.27 | -| zlib -6 | 68190360 | 9.633 | 3.109 | 22.01 | 22.24 | 0.27 | -| zlib -9 | 67613382 | 22.554 | 3.135 | 9.40 | 9.44 | 0.27 | - -I benchmarked zstd decompression using the same method on the same machine. -The benchmark file is located in the upstream zstd repo under -`contrib/linux-kernel/zstd_decompress_test.c` [4]. The memory reported is -the amount of memory required to decompress data compressed with the given -compression level. If you know the maximum size of your input, you can -reduce the memory usage of decompression irrespective of the compression -level. - -| Method | Time (s) | MB/s | Adjusted MB/s | Memory (MB) | -|----------|----------|---------|---------------|-------------| -| none | 0.025 | 8479.54 | - | - | -| zstd -1 | 0.358 | 592.15 | 636.60 | 0.84 | -| zstd -3 | 0.396 | 535.32 | 571.40 | 1.46 | -| zstd -5 | 0.396 | 535.32 | 571.40 | 1.46 | -| zstd -10 | 0.374 | 566.81 | 607.42 | 2.51 | -| zstd -15 | 0.379 | 559.34 | 598.84 | 4.61 | -| zstd -19 | 0.412 | 514.54 | 547.77 | 8.80 | -| zlib -1 | 0.940 | 225.52 | 231.68 | 0.04 | -| zlib -3 | 0.883 | 240.08 | 247.07 | 0.04 | -| zlib -6 | 0.844 | 251.17 | 258.84 | 0.04 | -| zlib -9 | 0.837 | 253.27 | 287.64 | 0.04 | - -Tested in userland using the test-suite in the zstd repo under -`contrib/linux-kernel/test/UserlandTest.cpp` [5] by mocking the kernel -functions. Fuzz tested using libfuzzer [6] with the fuzz harnesses under -`contrib/linux-kernel/test/{RoundTripCrash.c,DecompressCrash.c}` [7] [8] -with ASAN, UBSAN, and MSAN. Additionally, it was tested while testing the -BtrFS and SquashFS patches coming next. - -[1] https://clang.llvm.org/docs/ClangFormat.html -[2] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/zstd_compress_test.c -[3] http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia -[4] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/zstd_decompress_test.c -[5] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/test/UserlandTest.cpp -[6] http://llvm.org/docs/LibFuzzer.html -[7] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/test/RoundTripCrash.c -[8] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/test/DecompressCrash.c - -zstd source repository: https://github.com/facebook/zstd - -Signed-off-by: Nick Terrell ---- -v1 -> v2: -- Use div_u64() for division of u64s -- Reduce stack usage of ZSTD_compressSequences(), ZSTD_buildSeqTable(), - ZSTD_decompressSequencesLong(), FSE_buildDTable(), FSE_decompress_wksp(), - HUF_writeCTable(), HUF_readStats(), HUF_readCTable(), - HUF_compressWeights(), HUF_readDTableX2(), and HUF_readDTableX4() -- No function uses more than 400 B of stack space - -v2 -> v3: -- Work around gcc-7 bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388 -- Fix bug in dictionary compression from upstream commit cc1522351f - -v3 -> v4: -- Fix minor compiler warnings - -v4 -> v5: -- Fix rare compression bug from upstream commit 308047eb5d -- Fix bug introduced in v3 when working around the gcc-7 bug - - include/linux/zstd.h | 1155 +++++++++++++++ - lib/Kconfig | 8 + - lib/Makefile | 2 + - lib/zstd/Makefile | 18 + - lib/zstd/bitstream.h | 374 +++++ - lib/zstd/compress.c | 3482 +++++++++++++++++++++++++++++++++++++++++++++ - lib/zstd/decompress.c | 2526 ++++++++++++++++++++++++++++++++ - lib/zstd/entropy_common.c | 243 ++++ - lib/zstd/error_private.h | 51 + - lib/zstd/fse.h | 575 ++++++++ - lib/zstd/fse_compress.c | 795 +++++++++++ - lib/zstd/fse_decompress.c | 332 +++++ - lib/zstd/huf.h | 212 +++ - lib/zstd/huf_compress.c | 770 ++++++++++ - lib/zstd/huf_decompress.c | 960 +++++++++++++ - lib/zstd/mem.h | 149 ++ - lib/zstd/zstd_common.c | 73 + - lib/zstd/zstd_internal.h | 261 ++++ - lib/zstd/zstd_opt.h | 1012 +++++++++++++ - 19 files changed, 12998 insertions(+) - create mode 100644 include/linux/zstd.h - create mode 100644 lib/zstd/Makefile - create mode 100644 lib/zstd/bitstream.h - create mode 100644 lib/zstd/compress.c - create mode 100644 lib/zstd/decompress.c - create mode 100644 lib/zstd/entropy_common.c - create mode 100644 lib/zstd/error_private.h - create mode 100644 lib/zstd/fse.h - create mode 100644 lib/zstd/fse_compress.c - create mode 100644 lib/zstd/fse_decompress.c - create mode 100644 lib/zstd/huf.h - create mode 100644 lib/zstd/huf_compress.c - create mode 100644 lib/zstd/huf_decompress.c - create mode 100644 lib/zstd/mem.h - create mode 100644 lib/zstd/zstd_common.c - create mode 100644 lib/zstd/zstd_internal.h - create mode 100644 lib/zstd/zstd_opt.h - -diff --git a/include/linux/zstd.h b/include/linux/zstd.h -new file mode 100644 -index 0000000..305efd0 ---- /dev/null -+++ b/include/linux/zstd.h -@@ -0,0 +1,1155 @@ -+/* -+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. -+ * All rights reserved. -+ * -+ * This source code is licensed under the BSD-style license found in the -+ * LICENSE file in the root directory of https://github.com/facebook/zstd. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ */ -+ -+#ifndef ZSTD_H -+#define ZSTD_H -+ -+/* ====== Dependency ======*/ -+#include /* size_t */ -+ -+ -+/*-***************************************************************************** -+ * Introduction -+ * -+ * zstd, short for Zstandard, is a fast lossless compression algorithm, -+ * targeting real-time compression scenarios at zlib-level and better -+ * compression ratios. The zstd compression library provides in-memory -+ * compression and decompression functions. The library supports compression -+ * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled -+ * ultra, should be used with caution, as they require more memory. -+ * Compression can be done in: -+ * - a single step, reusing a context (described as Explicit memory management) -+ * - unbounded multiple steps (described as Streaming compression) -+ * The compression ratio achievable on small data can be highly improved using -+ * compression with a dictionary in: -+ * - a single step (described as Simple dictionary API) -+ * - a single step, reusing a dictionary (described as Fast dictionary API) -+ ******************************************************************************/ -+ -+/*====== Helper functions ======*/ -+ -+/** -+ * enum ZSTD_ErrorCode - zstd error codes -+ * -+ * Functions that return size_t can be checked for errors using ZSTD_isError() -+ * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode(). -+ */ -+typedef enum { -+ ZSTD_error_no_error, -+ ZSTD_error_GENERIC, -+ ZSTD_error_prefix_unknown, -+ ZSTD_error_version_unsupported, -+ ZSTD_error_parameter_unknown, -+ ZSTD_error_frameParameter_unsupported, -+ ZSTD_error_frameParameter_unsupportedBy32bits, -+ ZSTD_error_frameParameter_windowTooLarge, -+ ZSTD_error_compressionParameter_unsupported, -+ ZSTD_error_init_missing, -+ ZSTD_error_memory_allocation, -+ ZSTD_error_stage_wrong, -+ ZSTD_error_dstSize_tooSmall, -+ ZSTD_error_srcSize_wrong, -+ ZSTD_error_corruption_detected, -+ ZSTD_error_checksum_wrong, -+ ZSTD_error_tableLog_tooLarge, -+ ZSTD_error_maxSymbolValue_tooLarge, -+ ZSTD_error_maxSymbolValue_tooSmall, -+ ZSTD_error_dictionary_corrupted, -+ ZSTD_error_dictionary_wrong, -+ ZSTD_error_dictionaryCreation_failed, -+ ZSTD_error_maxCode -+} ZSTD_ErrorCode; -+ -+/** -+ * ZSTD_maxCLevel() - maximum compression level available -+ * -+ * Return: Maximum compression level available. -+ */ -+int ZSTD_maxCLevel(void); -+/** -+ * ZSTD_compressBound() - maximum compressed size in worst case scenario -+ * @srcSize: The size of the data to compress. -+ * -+ * Return: The maximum compressed size in the worst case scenario. -+ */ -+size_t ZSTD_compressBound(size_t srcSize); -+/** -+ * ZSTD_isError() - tells if a size_t function result is an error code -+ * @code: The function result to check for error. -+ * -+ * Return: Non-zero iff the code is an error. -+ */ -+static __attribute__((unused)) unsigned int ZSTD_isError(size_t code) -+{ -+ return code > (size_t)-ZSTD_error_maxCode; -+} -+/** -+ * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode -+ * @functionResult: The result of a function for which ZSTD_isError() is true. -+ * -+ * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0 -+ * if the functionResult isn't an error. -+ */ -+static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode( -+ size_t functionResult) -+{ -+ if (!ZSTD_isError(functionResult)) -+ return (ZSTD_ErrorCode)0; -+ return (ZSTD_ErrorCode)(0 - functionResult); -+} -+ -+/** -+ * enum ZSTD_strategy - zstd compression search strategy -+ * -+ * From faster to stronger. -+ */ -+typedef enum { -+ ZSTD_fast, -+ ZSTD_dfast, -+ ZSTD_greedy, -+ ZSTD_lazy, -+ ZSTD_lazy2, -+ ZSTD_btlazy2, -+ ZSTD_btopt, -+ ZSTD_btopt2 -+} ZSTD_strategy; -+ -+/** -+ * struct ZSTD_compressionParameters - zstd compression parameters -+ * @windowLog: Log of the largest match distance. Larger means more -+ * compression, and more memory needed during decompression. -+ * @chainLog: Fully searched segment. Larger means more compression, slower, -+ * and more memory (useless for fast). -+ * @hashLog: Dispatch table. Larger means more compression, -+ * slower, and more memory. -+ * @searchLog: Number of searches. Larger means more compression and slower. -+ * @searchLength: Match length searched. Larger means faster decompression, -+ * sometimes less compression. -+ * @targetLength: Acceptable match size for optimal parser (only). Larger means -+ * more compression, and slower. -+ * @strategy: The zstd compression strategy. -+ */ -+typedef struct { -+ unsigned int windowLog; -+ unsigned int chainLog; -+ unsigned int hashLog; -+ unsigned int searchLog; -+ unsigned int searchLength; -+ unsigned int targetLength; -+ ZSTD_strategy strategy; -+} ZSTD_compressionParameters; -+ -+/** -+ * struct ZSTD_frameParameters - zstd frame parameters -+ * @contentSizeFlag: Controls whether content size will be present in the frame -+ * header (when known). -+ * @checksumFlag: Controls whether a 32-bit checksum is generated at the end -+ * of the frame for error detection. -+ * @noDictIDFlag: Controls whether dictID will be saved into the frame header -+ * when using dictionary compression. -+ * -+ * The default value is all fields set to 0. -+ */ -+typedef struct { -+ unsigned int contentSizeFlag; -+ unsigned int checksumFlag; -+ unsigned int noDictIDFlag; -+} ZSTD_frameParameters; -+ -+/** -+ * struct ZSTD_parameters - zstd parameters -+ * @cParams: The compression parameters. -+ * @fParams: The frame parameters. -+ */ -+typedef struct { -+ ZSTD_compressionParameters cParams; -+ ZSTD_frameParameters fParams; -+} ZSTD_parameters; -+ -+/** -+ * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level -+ * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). -+ * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. -+ * @dictSize: The dictionary size or 0 if a dictionary isn't being used. -+ * -+ * Return: The selected ZSTD_compressionParameters. -+ */ -+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, -+ unsigned long long estimatedSrcSize, size_t dictSize); -+ -+/** -+ * ZSTD_getParams() - returns ZSTD_parameters for selected level -+ * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). -+ * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. -+ * @dictSize: The dictionary size or 0 if a dictionary isn't being used. -+ * -+ * The same as ZSTD_getCParams() except also selects the default frame -+ * parameters (all zero). -+ * -+ * Return: The selected ZSTD_parameters. -+ */ -+ZSTD_parameters ZSTD_getParams(int compressionLevel, -+ unsigned long long estimatedSrcSize, size_t dictSize); -+ -+/*-************************************* -+ * Explicit memory management -+ **************************************/ -+ -+/** -+ * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx -+ * @cParams: The compression parameters to be used for compression. -+ * -+ * If multiple compression parameters might be used, the caller must call -+ * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum -+ * size. -+ * -+ * Return: A lower bound on the size of the workspace that is passed to -+ * ZSTD_initCCtx(). -+ */ -+size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams); -+ -+/** -+ * struct ZSTD_CCtx - the zstd compression context -+ * -+ * When compressing many times it is recommended to allocate a context just once -+ * and reuse it for each successive compression operation. -+ */ -+typedef struct ZSTD_CCtx_s ZSTD_CCtx; -+/** -+ * ZSTD_initCCtx() - initialize a zstd compression context -+ * @workspace: The workspace to emplace the context into. It must outlive -+ * the returned context. -+ * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to -+ * determine how large the workspace must be. -+ * -+ * Return: A compression context emplaced into workspace. -+ */ -+ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize); -+ -+/** -+ * ZSTD_compressCCtx() - compress src into dst -+ * @ctx: The context. Must have been initialized with a workspace at -+ * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). -+ * @dst: The buffer to compress src into. -+ * @dstCapacity: The size of the destination buffer. May be any size, but -+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough. -+ * @src: The data to compress. -+ * @srcSize: The size of the data to compress. -+ * @params: The parameters to use for compression. See ZSTD_getParams(). -+ * -+ * Return: The compressed size or an error, which can be checked using -+ * ZSTD_isError(). -+ */ -+size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, -+ const void *src, size_t srcSize, ZSTD_parameters params); -+ -+/** -+ * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx -+ * -+ * Return: A lower bound on the size of the workspace that is passed to -+ * ZSTD_initDCtx(). -+ */ -+size_t ZSTD_DCtxWorkspaceBound(void); -+ -+/** -+ * struct ZSTD_DCtx - the zstd decompression context -+ * -+ * When decompressing many times it is recommended to allocate a context just -+ * once and reuse it for each successive decompression operation. -+ */ -+typedef struct ZSTD_DCtx_s ZSTD_DCtx; -+/** -+ * ZSTD_initDCtx() - initialize a zstd decompression context -+ * @workspace: The workspace to emplace the context into. It must outlive -+ * the returned context. -+ * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to -+ * determine how large the workspace must be. -+ * -+ * Return: A decompression context emplaced into workspace. -+ */ -+ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize); -+ -+/** -+ * ZSTD_decompressDCtx() - decompress zstd compressed src into dst -+ * @ctx: The decompression context. -+ * @dst: The buffer to decompress src into. -+ * @dstCapacity: The size of the destination buffer. Must be at least as large -+ * as the decompressed size. If the caller cannot upper bound the -+ * decompressed size, then it's better to use the streaming API. -+ * @src: The zstd compressed data to decompress. Multiple concatenated -+ * frames and skippable frames are allowed. -+ * @srcSize: The exact size of the data to decompress. -+ * -+ * Return: The decompressed size or an error, which can be checked using -+ * ZSTD_isError(). -+ */ -+size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, -+ const void *src, size_t srcSize); -+ -+/*-************************ -+ * Simple dictionary API -+ **************************/ -+ -+/** -+ * ZSTD_compress_usingDict() - compress src into dst using a dictionary -+ * @ctx: The context. Must have been initialized with a workspace at -+ * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). -+ * @dst: The buffer to compress src into. -+ * @dstCapacity: The size of the destination buffer. May be any size, but -+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough. -+ * @src: The data to compress. -+ * @srcSize: The size of the data to compress. -+ * @dict: The dictionary to use for compression. -+ * @dictSize: The size of the dictionary. -+ * @params: The parameters to use for compression. See ZSTD_getParams(). -+ * -+ * Compression using a predefined dictionary. The same dictionary must be used -+ * during decompression. -+ * -+ * Return: The compressed size or an error, which can be checked using -+ * ZSTD_isError(). -+ */ -+size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, -+ const void *src, size_t srcSize, const void *dict, size_t dictSize, -+ ZSTD_parameters params); -+ -+/** -+ * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary -+ * @ctx: The decompression context. -+ * @dst: The buffer to decompress src into. -+ * @dstCapacity: The size of the destination buffer. Must be at least as large -+ * as the decompressed size. If the caller cannot upper bound the -+ * decompressed size, then it's better to use the streaming API. -+ * @src: The zstd compressed data to decompress. Multiple concatenated -+ * frames and skippable frames are allowed. -+ * @srcSize: The exact size of the data to decompress. -+ * @dict: The dictionary to use for decompression. The same dictionary -+ * must've been used to compress the data. -+ * @dictSize: The size of the dictionary. -+ * -+ * Return: The decompressed size or an error, which can be checked using -+ * ZSTD_isError(). -+ */ -+size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, -+ const void *src, size_t srcSize, const void *dict, size_t dictSize); -+ -+/*-************************** -+ * Fast dictionary API -+ ***************************/ -+ -+/** -+ * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict -+ * @cParams: The compression parameters to be used for compression. -+ * -+ * Return: A lower bound on the size of the workspace that is passed to -+ * ZSTD_initCDict(). -+ */ -+size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams); -+ -+/** -+ * struct ZSTD_CDict - a digested dictionary to be used for compression -+ */ -+typedef struct ZSTD_CDict_s ZSTD_CDict; -+ -+/** -+ * ZSTD_initCDict() - initialize a digested dictionary for compression -+ * @dictBuffer: The dictionary to digest. The buffer is referenced by the -+ * ZSTD_CDict so it must outlive the returned ZSTD_CDict. -+ * @dictSize: The size of the dictionary. -+ * @params: The parameters to use for compression. See ZSTD_getParams(). -+ * @workspace: The workspace. It must outlive the returned ZSTD_CDict. -+ * @workspaceSize: The workspace size. Must be at least -+ * ZSTD_CDictWorkspaceBound(params.cParams). -+ * -+ * When compressing multiple messages / blocks with the same dictionary it is -+ * recommended to load it just once. The ZSTD_CDict merely references the -+ * dictBuffer, so it must outlive the returned ZSTD_CDict. -+ * -+ * Return: The digested dictionary emplaced into workspace. -+ */ -+ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize, -+ ZSTD_parameters params, void *workspace, size_t workspaceSize); -+ -+/** -+ * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict -+ * @ctx: The context. Must have been initialized with a workspace at -+ * least as large as ZSTD_CCtxWorkspaceBound(cParams) where -+ * cParams are the compression parameters used to initialize the -+ * cdict. -+ * @dst: The buffer to compress src into. -+ * @dstCapacity: The size of the destination buffer. May be any size, but -+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough. -+ * @src: The data to compress. -+ * @srcSize: The size of the data to compress. -+ * @cdict: The digested dictionary to use for compression. -+ * @params: The parameters to use for compression. See ZSTD_getParams(). -+ * -+ * Compression using a digested dictionary. The same dictionary must be used -+ * during decompression. -+ * -+ * Return: The compressed size or an error, which can be checked using -+ * ZSTD_isError(). -+ */ -+size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, -+ const void *src, size_t srcSize, const ZSTD_CDict *cdict); -+ -+ -+/** -+ * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict -+ * -+ * Return: A lower bound on the size of the workspace that is passed to -+ * ZSTD_initDDict(). -+ */ -+size_t ZSTD_DDictWorkspaceBound(void); -+ -+/** -+ * struct ZSTD_DDict - a digested dictionary to be used for decompression -+ */ -+typedef struct ZSTD_DDict_s ZSTD_DDict; -+ -+/** -+ * ZSTD_initDDict() - initialize a digested dictionary for decompression -+ * @dictBuffer: The dictionary to digest. The buffer is referenced by the -+ * ZSTD_DDict so it must outlive the returned ZSTD_DDict. -+ * @dictSize: The size of the dictionary. -+ * @workspace: The workspace. It must outlive the returned ZSTD_DDict. -+ * @workspaceSize: The workspace size. Must be at least -+ * ZSTD_DDictWorkspaceBound(). -+ * -+ * When decompressing multiple messages / blocks with the same dictionary it is -+ * recommended to load it just once. The ZSTD_DDict merely references the -+ * dictBuffer, so it must outlive the returned ZSTD_DDict. -+ * -+ * Return: The digested dictionary emplaced into workspace. -+ */ -+ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize, -+ void *workspace, size_t workspaceSize); -+ -+/** -+ * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict -+ * @ctx: The decompression context. -+ * @dst: The buffer to decompress src into. -+ * @dstCapacity: The size of the destination buffer. Must be at least as large -+ * as the decompressed size. If the caller cannot upper bound the -+ * decompressed size, then it's better to use the streaming API. -+ * @src: The zstd compressed data to decompress. Multiple concatenated -+ * frames and skippable frames are allowed. -+ * @srcSize: The exact size of the data to decompress. -+ * @ddict: The digested dictionary to use for decompression. The same -+ * dictionary must've been used to compress the data. -+ * -+ * Return: The decompressed size or an error, which can be checked using -+ * ZSTD_isError(). -+ */ -+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, -+ size_t dstCapacity, const void *src, size_t srcSize, -+ const ZSTD_DDict *ddict); -+ -+ -+/*-************************** -+ * Streaming -+ ***************************/ -+ -+/** -+ * struct ZSTD_inBuffer - input buffer for streaming -+ * @src: Start of the input buffer. -+ * @size: Size of the input buffer. -+ * @pos: Position where reading stopped. Will be updated. -+ * Necessarily 0 <= pos <= size. -+ */ -+typedef struct ZSTD_inBuffer_s { -+ const void *src; -+ size_t size; -+ size_t pos; -+} ZSTD_inBuffer; -+ -+/** -+ * struct ZSTD_outBuffer - output buffer for streaming -+ * @dst: Start of the output buffer. -+ * @size: Size of the output buffer. -+ * @pos: Position where writing stopped. Will be updated. -+ * Necessarily 0 <= pos <= size. -+ */ -+typedef struct ZSTD_outBuffer_s { -+ void *dst; -+ size_t size; -+ size_t pos; -+} ZSTD_outBuffer; -+ -+ -+ -+/*-***************************************************************************** -+ * Streaming compression - HowTo -+ * -+ * A ZSTD_CStream object is required to track streaming operation. -+ * Use ZSTD_initCStream() to initialize a ZSTD_CStream object. -+ * ZSTD_CStream objects can be reused multiple times on consecutive compression -+ * operations. It is recommended to re-use ZSTD_CStream in situations where many -+ * streaming operations will be achieved consecutively. Use one separate -+ * ZSTD_CStream per thread for parallel execution. -+ * -+ * Use ZSTD_compressStream() repetitively to consume input stream. -+ * The function will automatically update both `pos` fields. -+ * Note that it may not consume the entire input, in which case `pos < size`, -+ * and it's up to the caller to present again remaining data. -+ * It returns a hint for the preferred number of bytes to use as an input for -+ * the next function call. -+ * -+ * At any moment, it's possible to flush whatever data remains within internal -+ * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might -+ * still be some content left within the internal buffer if `output->size` is -+ * too small. It returns the number of bytes left in the internal buffer and -+ * must be called until it returns 0. -+ * -+ * ZSTD_endStream() instructs to finish a frame. It will perform a flush and -+ * write frame epilogue. The epilogue is required for decoders to consider a -+ * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush -+ * the full content if `output->size` is too small. In which case, call again -+ * ZSTD_endStream() to complete the flush. It returns the number of bytes left -+ * in the internal buffer and must be called until it returns 0. -+ ******************************************************************************/ -+ -+/** -+ * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream -+ * @cParams: The compression parameters to be used for compression. -+ * -+ * Return: A lower bound on the size of the workspace that is passed to -+ * ZSTD_initCStream() and ZSTD_initCStream_usingCDict(). -+ */ -+size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams); -+ -+/** -+ * struct ZSTD_CStream - the zstd streaming compression context -+ */ -+typedef struct ZSTD_CStream_s ZSTD_CStream; -+ -+/*===== ZSTD_CStream management functions =====*/ -+/** -+ * ZSTD_initCStream() - initialize a zstd streaming compression context -+ * @params: The zstd compression parameters. -+ * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must -+ * pass the source size (zero means empty source). Otherwise, -+ * the caller may optionally pass the source size, or zero if -+ * unknown. -+ * @workspace: The workspace to emplace the context into. It must outlive -+ * the returned context. -+ * @workspaceSize: The size of workspace. -+ * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine -+ * how large the workspace must be. -+ * -+ * Return: The zstd streaming compression context. -+ */ -+ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, -+ unsigned long long pledgedSrcSize, void *workspace, -+ size_t workspaceSize); -+ -+/** -+ * ZSTD_initCStream_usingCDict() - initialize a streaming compression context -+ * @cdict: The digested dictionary to use for compression. -+ * @pledgedSrcSize: Optionally the source size, or zero if unknown. -+ * @workspace: The workspace to emplace the context into. It must outlive -+ * the returned context. -+ * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound() -+ * with the cParams used to initialize the cdict to determine -+ * how large the workspace must be. -+ * -+ * Return: The zstd streaming compression context. -+ */ -+ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, -+ unsigned long long pledgedSrcSize, void *workspace, -+ size_t workspaceSize); -+ -+/*===== Streaming compression functions =====*/ -+/** -+ * ZSTD_resetCStream() - reset the context using parameters from creation -+ * @zcs: The zstd streaming compression context to reset. -+ * @pledgedSrcSize: Optionally the source size, or zero if unknown. -+ * -+ * Resets the context using the parameters from creation. Skips dictionary -+ * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame -+ * content size is always written into the frame header. -+ * -+ * Return: Zero or an error, which can be checked using ZSTD_isError(). -+ */ -+size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize); -+/** -+ * ZSTD_compressStream() - streaming compress some of input into output -+ * @zcs: The zstd streaming compression context. -+ * @output: Destination buffer. `output->pos` is updated to indicate how much -+ * compressed data was written. -+ * @input: Source buffer. `input->pos` is updated to indicate how much data was -+ * read. Note that it may not consume the entire input, in which case -+ * `input->pos < input->size`, and it's up to the caller to present -+ * remaining data again. -+ * -+ * The `input` and `output` buffers may be any size. Guaranteed to make some -+ * forward progress if `input` and `output` are not empty. -+ * -+ * Return: A hint for the number of bytes to use as the input for the next -+ * function call or an error, which can be checked using -+ * ZSTD_isError(). -+ */ -+size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, -+ ZSTD_inBuffer *input); -+/** -+ * ZSTD_flushStream() - flush internal buffers into output -+ * @zcs: The zstd streaming compression context. -+ * @output: Destination buffer. `output->pos` is updated to indicate how much -+ * compressed data was written. -+ * -+ * ZSTD_flushStream() must be called until it returns 0, meaning all the data -+ * has been flushed. Since ZSTD_flushStream() causes a block to be ended, -+ * calling it too often will degrade the compression ratio. -+ * -+ * Return: The number of bytes still present within internal buffers or an -+ * error, which can be checked using ZSTD_isError(). -+ */ -+size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); -+/** -+ * ZSTD_endStream() - flush internal buffers into output and end the frame -+ * @zcs: The zstd streaming compression context. -+ * @output: Destination buffer. `output->pos` is updated to indicate how much -+ * compressed data was written. -+ * -+ * ZSTD_endStream() must be called until it returns 0, meaning all the data has -+ * been flushed and the frame epilogue has been written. -+ * -+ * Return: The number of bytes still present within internal buffers or an -+ * error, which can be checked using ZSTD_isError(). -+ */ -+size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); -+ -+/** -+ * ZSTD_CStreamInSize() - recommended size for the input buffer -+ * -+ * Return: The recommended size for the input buffer. -+ */ -+size_t ZSTD_CStreamInSize(void); -+/** -+ * ZSTD_CStreamOutSize() - recommended size for the output buffer -+ * -+ * When the output buffer is at least this large, it is guaranteed to be large -+ * enough to flush at least one complete compressed block. -+ * -+ * Return: The recommended size for the output buffer. -+ */ -+size_t ZSTD_CStreamOutSize(void); -+ -+ -+ -+/*-***************************************************************************** -+ * Streaming decompression - HowTo -+ * -+ * A ZSTD_DStream object is required to track streaming operations. -+ * Use ZSTD_initDStream() to initialize a ZSTD_DStream object. -+ * ZSTD_DStream objects can be re-used multiple times. -+ * -+ * Use ZSTD_decompressStream() repetitively to consume your input. -+ * The function will update both `pos` fields. -+ * If `input->pos < input->size`, some input has not been consumed. -+ * It's up to the caller to present again remaining data. -+ * If `output->pos < output->size`, decoder has flushed everything it could. -+ * Returns 0 iff a frame is completely decoded and fully flushed. -+ * Otherwise it returns a suggested next input size that will never load more -+ * than the current frame. -+ ******************************************************************************/ -+ -+/** -+ * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream -+ * @maxWindowSize: The maximum window size allowed for compressed frames. -+ * -+ * Return: A lower bound on the size of the workspace that is passed to -+ * ZSTD_initDStream() and ZSTD_initDStream_usingDDict(). -+ */ -+size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize); -+ -+/** -+ * struct ZSTD_DStream - the zstd streaming decompression context -+ */ -+typedef struct ZSTD_DStream_s ZSTD_DStream; -+/*===== ZSTD_DStream management functions =====*/ -+/** -+ * ZSTD_initDStream() - initialize a zstd streaming decompression context -+ * @maxWindowSize: The maximum window size allowed for compressed frames. -+ * @workspace: The workspace to emplace the context into. It must outlive -+ * the returned context. -+ * @workspaceSize: The size of workspace. -+ * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine -+ * how large the workspace must be. -+ * -+ * Return: The zstd streaming decompression context. -+ */ -+ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, -+ size_t workspaceSize); -+/** -+ * ZSTD_initDStream_usingDDict() - initialize streaming decompression context -+ * @maxWindowSize: The maximum window size allowed for compressed frames. -+ * @ddict: The digested dictionary to use for decompression. -+ * @workspace: The workspace to emplace the context into. It must outlive -+ * the returned context. -+ * @workspaceSize: The size of workspace. -+ * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine -+ * how large the workspace must be. -+ * -+ * Return: The zstd streaming decompression context. -+ */ -+ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, -+ const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize); -+ -+/*===== Streaming decompression functions =====*/ -+/** -+ * ZSTD_resetDStream() - reset the context using parameters from creation -+ * @zds: The zstd streaming decompression context to reset. -+ * -+ * Resets the context using the parameters from creation. Skips dictionary -+ * loading, since it can be reused. -+ * -+ * Return: Zero or an error, which can be checked using ZSTD_isError(). -+ */ -+size_t ZSTD_resetDStream(ZSTD_DStream *zds); -+/** -+ * ZSTD_decompressStream() - streaming decompress some of input into output -+ * @zds: The zstd streaming decompression context. -+ * @output: Destination buffer. `output.pos` is updated to indicate how much -+ * decompressed data was written. -+ * @input: Source buffer. `input.pos` is updated to indicate how much data was -+ * read. Note that it may not consume the entire input, in which case -+ * `input.pos < input.size`, and it's up to the caller to present -+ * remaining data again. -+ * -+ * The `input` and `output` buffers may be any size. Guaranteed to make some -+ * forward progress if `input` and `output` are not empty. -+ * ZSTD_decompressStream() will not consume the last byte of the frame until -+ * the entire frame is flushed. -+ * -+ * Return: Returns 0 iff a frame is completely decoded and fully flushed. -+ * Otherwise returns a hint for the number of bytes to use as the input -+ * for the next function call or an error, which can be checked using -+ * ZSTD_isError(). The size hint will never load more than the frame. -+ */ -+size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, -+ ZSTD_inBuffer *input); -+ -+/** -+ * ZSTD_DStreamInSize() - recommended size for the input buffer -+ * -+ * Return: The recommended size for the input buffer. -+ */ -+size_t ZSTD_DStreamInSize(void); -+/** -+ * ZSTD_DStreamOutSize() - recommended size for the output buffer -+ * -+ * When the output buffer is at least this large, it is guaranteed to be large -+ * enough to flush at least one complete decompressed block. -+ * -+ * Return: The recommended size for the output buffer. -+ */ -+size_t ZSTD_DStreamOutSize(void); -+ -+ -+/* --- Constants ---*/ -+#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */ -+#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U -+ -+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) -+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) -+ -+#define ZSTD_WINDOWLOG_MAX_32 27 -+#define ZSTD_WINDOWLOG_MAX_64 27 -+#define ZSTD_WINDOWLOG_MAX \ -+ ((unsigned int)(sizeof(size_t) == 4 \ -+ ? ZSTD_WINDOWLOG_MAX_32 \ -+ : ZSTD_WINDOWLOG_MAX_64)) -+#define ZSTD_WINDOWLOG_MIN 10 -+#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX -+#define ZSTD_HASHLOG_MIN 6 -+#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1) -+#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN -+#define ZSTD_HASHLOG3_MAX 17 -+#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) -+#define ZSTD_SEARCHLOG_MIN 1 -+/* only for ZSTD_fast, other strategies are limited to 6 */ -+#define ZSTD_SEARCHLENGTH_MAX 7 -+/* only for ZSTD_btopt, other strategies are limited to 4 */ -+#define ZSTD_SEARCHLENGTH_MIN 3 -+#define ZSTD_TARGETLENGTH_MIN 4 -+#define ZSTD_TARGETLENGTH_MAX 999 -+ -+/* for static allocation */ -+#define ZSTD_FRAMEHEADERSIZE_MAX 18 -+#define ZSTD_FRAMEHEADERSIZE_MIN 6 -+static const size_t ZSTD_frameHeaderSize_prefix = 5; -+static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; -+static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; -+/* magic number + skippable frame length */ -+static const size_t ZSTD_skippableHeaderSize = 8; -+ -+ -+/*-************************************* -+ * Compressed size functions -+ **************************************/ -+ -+/** -+ * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame -+ * @src: Source buffer. It should point to the start of a zstd encoded frame -+ * or a skippable frame. -+ * @srcSize: The size of the source buffer. It must be at least as large as the -+ * size of the frame. -+ * -+ * Return: The compressed size of the frame pointed to by `src` or an error, -+ * which can be check with ZSTD_isError(). -+ * Suitable to pass to ZSTD_decompress() or similar functions. -+ */ -+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize); -+ -+/*-************************************* -+ * Decompressed size functions -+ **************************************/ -+/** -+ * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header -+ * @src: It should point to the start of a zstd encoded frame. -+ * @srcSize: The size of the source buffer. It must be at least as large as the -+ * frame header. `ZSTD_frameHeaderSize_max` is always large enough. -+ * -+ * Return: The frame content size stored in the frame header if known. -+ * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the -+ * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input. -+ */ -+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); -+ -+/** -+ * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames -+ * @src: It should point to the start of a series of zstd encoded and/or -+ * skippable frames. -+ * @srcSize: The exact size of the series of frames. -+ * -+ * If any zstd encoded frame in the series doesn't have the frame content size -+ * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always -+ * set when using ZSTD_compress(). The decompressed size can be very large. -+ * If the source is untrusted, the decompressed size could be wrong or -+ * intentionally modified. Always ensure the result fits within the -+ * application's authorized limits. ZSTD_findDecompressedSize() handles multiple -+ * frames, and so it must traverse the input to read each frame header. This is -+ * efficient as most of the data is skipped, however it does mean that all frame -+ * data must be present and valid. -+ * -+ * Return: Decompressed size of all the data contained in the frames if known. -+ * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown. -+ * `ZSTD_CONTENTSIZE_ERROR` if an error occurred. -+ */ -+unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize); -+ -+/*-************************************* -+ * Advanced compression functions -+ **************************************/ -+/** -+ * ZSTD_checkCParams() - ensure parameter values remain within authorized range -+ * @cParams: The zstd compression parameters. -+ * -+ * Return: Zero or an error, which can be checked using ZSTD_isError(). -+ */ -+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams); -+ -+/** -+ * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize -+ * @srcSize: Optionally the estimated source size, or zero if unknown. -+ * @dictSize: Optionally the estimated dictionary size, or zero if unknown. -+ * -+ * Return: The optimized parameters. -+ */ -+ZSTD_compressionParameters ZSTD_adjustCParams( -+ ZSTD_compressionParameters cParams, unsigned long long srcSize, -+ size_t dictSize); -+ -+/*--- Advanced decompression functions ---*/ -+ -+/** -+ * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame -+ * @buffer: The source buffer to check. -+ * @size: The size of the source buffer, must be at least 4 bytes. -+ * -+ * Return: True iff the buffer starts with a zstd or skippable frame identifier. -+ */ -+unsigned int ZSTD_isFrame(const void *buffer, size_t size); -+ -+/** -+ * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary -+ * @dict: The dictionary buffer. -+ * @dictSize: The size of the dictionary buffer. -+ * -+ * Return: The dictionary id stored within the dictionary or 0 if the -+ * dictionary is not a zstd dictionary. If it returns 0 the -+ * dictionary can still be loaded as a content-only dictionary. -+ */ -+unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize); -+ -+/** -+ * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict -+ * @ddict: The ddict to find the id of. -+ * -+ * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not -+ * a zstd dictionary. If it returns 0 `ddict` will be loaded as a -+ * content-only dictionary. -+ */ -+unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict); -+ -+/** -+ * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame -+ * @src: Source buffer. It must be a zstd encoded frame. -+ * @srcSize: The size of the source buffer. It must be at least as large as the -+ * frame header. `ZSTD_frameHeaderSize_max` is always large enough. -+ * -+ * Return: The dictionary id required to decompress the frame stored within -+ * `src` or 0 if the dictionary id could not be decoded. It can return -+ * 0 if the frame does not require a dictionary, the dictionary id -+ * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize` -+ * is too small. -+ */ -+unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize); -+ -+/** -+ * struct ZSTD_frameParams - zstd frame parameters stored in the frame header -+ * @frameContentSize: The frame content size, or 0 if not present. -+ * @windowSize: The window size, or 0 if the frame is a skippable frame. -+ * @dictID: The dictionary id, or 0 if not present. -+ * @checksumFlag: Whether a checksum was used. -+ */ -+typedef struct { -+ unsigned long long frameContentSize; -+ unsigned int windowSize; -+ unsigned int dictID; -+ unsigned int checksumFlag; -+} ZSTD_frameParams; -+ -+/** -+ * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame -+ * @fparamsPtr: On success the frame parameters are written here. -+ * @src: The source buffer. It must point to a zstd or skippable frame. -+ * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is -+ * always large enough to succeed. -+ * -+ * Return: 0 on success. If more data is required it returns how many bytes -+ * must be provided to make forward progress. Otherwise it returns -+ * an error, which can be checked using ZSTD_isError(). -+ */ -+size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, -+ size_t srcSize); -+ -+/*-***************************************************************************** -+ * Buffer-less and synchronous inner streaming functions -+ * -+ * This is an advanced API, giving full control over buffer management, for -+ * users which need direct control over memory. -+ * But it's also a complex one, with many restrictions (documented below). -+ * Prefer using normal streaming API for an easier experience -+ ******************************************************************************/ -+ -+/*-***************************************************************************** -+ * Buffer-less streaming compression (synchronous mode) -+ * -+ * A ZSTD_CCtx object is required to track streaming operations. -+ * Use ZSTD_initCCtx() to initialize a context. -+ * ZSTD_CCtx object can be re-used multiple times within successive compression -+ * operations. -+ * -+ * Start by initializing a context. -+ * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary -+ * compression, -+ * or ZSTD_compressBegin_advanced(), for finer parameter control. -+ * It's also possible to duplicate a reference context which has already been -+ * initialized, using ZSTD_copyCCtx() -+ * -+ * Then, consume your input using ZSTD_compressContinue(). -+ * There are some important considerations to keep in mind when using this -+ * advanced function : -+ * - ZSTD_compressContinue() has no internal buffer. It uses externally provided -+ * buffer only. -+ * - Interface is synchronous : input is consumed entirely and produce 1+ -+ * (or more) compressed blocks. -+ * - Caller must ensure there is enough space in `dst` to store compressed data -+ * under worst case scenario. Worst case evaluation is provided by -+ * ZSTD_compressBound(). -+ * ZSTD_compressContinue() doesn't guarantee recover after a failed -+ * compression. -+ * - ZSTD_compressContinue() presumes prior input ***is still accessible and -+ * unmodified*** (up to maximum distance size, see WindowLog). -+ * It remembers all previous contiguous blocks, plus one separated memory -+ * segment (which can itself consists of multiple contiguous blocks) -+ * - ZSTD_compressContinue() detects that prior input has been overwritten when -+ * `src` buffer overlaps. In which case, it will "discard" the relevant memory -+ * section from its history. -+ * -+ * Finish a frame with ZSTD_compressEnd(), which will write the last block(s) -+ * and optional checksum. It's possible to use srcSize==0, in which case, it -+ * will write a final empty block to end the frame. Without last block mark, -+ * frames will be considered unfinished (corrupted) by decoders. -+ * -+ * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new -+ * frame. -+ ******************************************************************************/ -+ -+/*===== Buffer-less streaming compression functions =====*/ -+size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel); -+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, -+ size_t dictSize, int compressionLevel); -+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, -+ size_t dictSize, ZSTD_parameters params, -+ unsigned long long pledgedSrcSize); -+size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx, -+ unsigned long long pledgedSrcSize); -+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, -+ unsigned long long pledgedSrcSize); -+size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, -+ const void *src, size_t srcSize); -+size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, -+ const void *src, size_t srcSize); -+ -+ -+ -+/*-***************************************************************************** -+ * Buffer-less streaming decompression (synchronous mode) -+ * -+ * A ZSTD_DCtx object is required to track streaming operations. -+ * Use ZSTD_initDCtx() to initialize a context. -+ * A ZSTD_DCtx object can be re-used multiple times. -+ * -+ * First typical operation is to retrieve frame parameters, using -+ * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide -+ * important information to correctly decode the frame, such as the minimum -+ * rolling buffer size to allocate to decompress data (`windowSize`), and the -+ * dictionary ID used. -+ * Note: content size is optional, it may not be present. 0 means unknown. -+ * Note that these values could be wrong, either because of data malformation, -+ * or because an attacker is spoofing deliberate false information. As a -+ * consequence, check that values remain within valid application range, -+ * especially `windowSize`, before allocation. Each application can set its own -+ * limit, depending on local restrictions. For extended interoperability, it is -+ * recommended to support at least 8 MB. -+ * Frame parameters are extracted from the beginning of the compressed frame. -+ * Data fragment must be large enough to ensure successful decoding, typically -+ * `ZSTD_frameHeaderSize_max` bytes. -+ * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled. -+ * >0: `srcSize` is too small, provide at least this many bytes. -+ * errorCode, which can be tested using ZSTD_isError(). -+ * -+ * Start decompression, with ZSTD_decompressBegin() or -+ * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared -+ * context, using ZSTD_copyDCtx(). -+ * -+ * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() -+ * alternatively. -+ * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' -+ * to ZSTD_decompressContinue(). -+ * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will -+ * fail. -+ * -+ * The result of ZSTD_decompressContinue() is the number of bytes regenerated -+ * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an -+ * error; it just means ZSTD_decompressContinue() has decoded some metadata -+ * item. It can also be an error code, which can be tested with ZSTD_isError(). -+ * -+ * ZSTD_decompressContinue() needs previous data blocks during decompression, up -+ * to `windowSize`. They should preferably be located contiguously, prior to -+ * current block. Alternatively, a round buffer of sufficient size is also -+ * possible. Sufficient size is determined by frame parameters. -+ * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't -+ * follow each other, make sure that either the compressor breaks contiguity at -+ * the same place, or that previous contiguous segment is large enough to -+ * properly handle maximum back-reference. -+ * -+ * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. -+ * Context can then be reset to start a new decompression. -+ * -+ * Note: it's possible to know if next input to present is a header or a block, -+ * using ZSTD_nextInputType(). This information is not required to properly -+ * decode a frame. -+ * -+ * == Special case: skippable frames == -+ * -+ * Skippable frames allow integration of user-defined data into a flow of -+ * concatenated frames. Skippable frames will be ignored (skipped) by a -+ * decompressor. The format of skippable frames is as follows: -+ * a) Skippable frame ID - 4 Bytes, Little endian format, any value from -+ * 0x184D2A50 to 0x184D2A5F -+ * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits -+ * c) Frame Content - any content (User Data) of length equal to Frame Size -+ * For skippable frames ZSTD_decompressContinue() always returns 0. -+ * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 -+ * what means that a frame is skippable. -+ * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might -+ * actually be a zstd encoded frame with no content. For purposes of -+ * decompression, it is valid in both cases to skip the frame using -+ * ZSTD_findFrameCompressedSize() to find its size in bytes. -+ * It also returns frame size as fparamsPtr->frameContentSize. -+ ******************************************************************************/ -+ -+/*===== Buffer-less streaming decompression functions =====*/ -+size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx); -+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, -+ size_t dictSize); -+void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx); -+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx); -+size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, -+ const void *src, size_t srcSize); -+typedef enum { -+ ZSTDnit_frameHeader, -+ ZSTDnit_blockHeader, -+ ZSTDnit_block, -+ ZSTDnit_lastBlock, -+ ZSTDnit_checksum, -+ ZSTDnit_skippableFrame -+} ZSTD_nextInputType_e; -+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx); -+ -+/*-***************************************************************************** -+ * Block functions -+ * -+ * Block functions produce and decode raw zstd blocks, without frame metadata. -+ * Frame metadata cost is typically ~18 bytes, which can be non-negligible for -+ * very small blocks (< 100 bytes). User will have to take in charge required -+ * information to regenerate data, such as compressed and content sizes. -+ * -+ * A few rules to respect: -+ * - Compressing and decompressing require a context structure -+ * + Use ZSTD_initCCtx() and ZSTD_initDCtx() -+ * - It is necessary to init context before starting -+ * + compression : ZSTD_compressBegin() -+ * + decompression : ZSTD_decompressBegin() -+ * + variants _usingDict() are also allowed -+ * + copyCCtx() and copyDCtx() work too -+ * - Block size is limited, it must be <= ZSTD_getBlockSizeMax() -+ * + If you need to compress more, cut data into multiple blocks -+ * + Consider using the regular ZSTD_compress() instead, as frame metadata -+ * costs become negligible when source size is large. -+ * - When a block is considered not compressible enough, ZSTD_compressBlock() -+ * result will be zero. In which case, nothing is produced into `dst`. -+ * + User must test for such outcome and deal directly with uncompressed data -+ * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!! -+ * + In case of multiple successive blocks, decoder must be informed of -+ * uncompressed block existence to follow proper history. Use -+ * ZSTD_insertBlock() in such a case. -+ ******************************************************************************/ -+ -+/* Define for static allocation */ -+#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) -+/*===== Raw zstd block functions =====*/ -+size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx); -+size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, -+ const void *src, size_t srcSize); -+size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, -+ const void *src, size_t srcSize); -+size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, -+ size_t blockSize); -+ -+#endif /* ZSTD_H */ -diff --git a/lib/Kconfig b/lib/Kconfig -index 5e7541f..0d49ed0 100644 ---- a/lib/Kconfig -+++ b/lib/Kconfig -@@ -249,6 +249,14 @@ config LZ4HC_COMPRESS - config LZ4_DECOMPRESS - tristate - -+config ZSTD_COMPRESS -+ select XXHASH -+ tristate -+ -+config ZSTD_DECOMPRESS -+ select XXHASH -+ tristate -+ - source "lib/xz/Kconfig" - - # -diff --git a/lib/Makefile b/lib/Makefile -index d06b68a..d5c8a4f 100644 ---- a/lib/Makefile -+++ b/lib/Makefile -@@ -116,6 +116,8 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ - obj-$(CONFIG_LZ4_COMPRESS) += lz4/ - obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/ - obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/ -+obj-$(CONFIG_ZSTD_COMPRESS) += zstd/ -+obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/ - obj-$(CONFIG_XZ_DEC) += xz/ - obj-$(CONFIG_RAID6_PQ) += raid6/ - -diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile -new file mode 100644 -index 0000000..dd0a359 ---- /dev/null -+++ b/lib/zstd/Makefile -@@ -0,0 +1,18 @@ -+obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o -+obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o -+ -+ccflags-y += -O3 -+ -+# Object files unique to zstd_compress and zstd_decompress -+zstd_compress-y := fse_compress.o huf_compress.o compress.o -+zstd_decompress-y := huf_decompress.o decompress.o -+ -+# These object files are shared between the modules. -+# Always add them to zstd_compress. -+# Unless both zstd_compress and zstd_decompress are built in -+# then also add them to zstd_decompress. -+zstd_compress-y += entropy_common.o fse_decompress.o zstd_common.o -+ -+ifneq ($(CONFIG_ZSTD_COMPRESS)$(CONFIG_ZSTD_DECOMPRESS),yy) -+ zstd_decompress-y += entropy_common.o fse_decompress.o zstd_common.o -+endif -diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h -new file mode 100644 -index 0000000..a826b99 ---- /dev/null -+++ b/lib/zstd/bitstream.h -@@ -0,0 +1,374 @@ -+/* -+ * bitstream -+ * Part of FSE library -+ * header file (to include) -+ * Copyright (C) 2013-2016, Yann Collet. -+ * -+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are -+ * met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above -+ * copyright notice, this list of conditions and the following disclaimer -+ * in the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ * -+ * You can contact the author at : -+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -+ */ -+#ifndef BITSTREAM_H_MODULE -+#define BITSTREAM_H_MODULE -+ -+/* -+* This API consists of small unitary functions, which must be inlined for best performance. -+* Since link-time-optimization is not available for all compilers, -+* these functions are defined into a .h to be included. -+*/ -+ -+/*-**************************************** -+* Dependencies -+******************************************/ -+#include "error_private.h" /* error codes and messages */ -+#include "mem.h" /* unaligned access routines */ -+ -+/*========================================= -+* Target specific -+=========================================*/ -+#define STREAM_ACCUMULATOR_MIN_32 25 -+#define STREAM_ACCUMULATOR_MIN_64 57 -+#define STREAM_ACCUMULATOR_MIN ((U32)(ZSTD_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) -+ -+/*-****************************************** -+* bitStream encoding API (write forward) -+********************************************/ -+/* bitStream can mix input from multiple sources. -+* A critical property of these streams is that they encode and decode in **reverse** direction. -+* So the first bit sequence you add will be the last to be read, like a LIFO stack. -+*/ -+typedef struct { -+ size_t bitContainer; -+ int bitPos; -+ char *startPtr; -+ char *ptr; -+ char *endPtr; -+} BIT_CStream_t; -+ -+ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity); -+ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits); -+ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC); -+ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC); -+ -+/* Start with initCStream, providing the size of buffer to write into. -+* bitStream will never write outside of this buffer. -+* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. -+* -+* bits are first added to a local register. -+* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. -+* Writing data into memory is an explicit operation, performed by the flushBits function. -+* Hence keep track how many bits are potentially stored into local register to avoid register overflow. -+* After a flushBits, a maximum of 7 bits might still be stored into local register. -+* -+* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. -+* -+* Last operation is to close the bitStream. -+* The function returns the final size of CStream in bytes. -+* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) -+*/ -+ -+/*-******************************************** -+* bitStream decoding API (read backward) -+**********************************************/ -+typedef struct { -+ size_t bitContainer; -+ unsigned bitsConsumed; -+ const char *ptr; -+ const char *start; -+} BIT_DStream_t; -+ -+typedef enum { -+ BIT_DStream_unfinished = 0, -+ BIT_DStream_endOfBuffer = 1, -+ BIT_DStream_completed = 2, -+ BIT_DStream_overflow = 3 -+} BIT_DStream_status; /* result of BIT_reloadDStream() */ -+/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ -+ -+ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize); -+ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits); -+ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD); -+ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *bitD); -+ -+/* Start by invoking BIT_initDStream(). -+* A chunk of the bitStream is then stored into a local register. -+* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). -+* You can then retrieve bitFields stored into the local register, **in reverse order**. -+* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. -+* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. -+* Otherwise, it can be less than that, so proceed accordingly. -+* Checking if DStream has reached its end can be performed with BIT_endOfDStream(). -+*/ -+ -+/*-**************************************** -+* unsafe API -+******************************************/ -+ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits); -+/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ -+ -+ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC); -+/* unsafe version; does not check buffer overflow */ -+ -+ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits); -+/* faster, but works only if nbBits >= 1 */ -+ -+/*-************************************************************** -+* Internal functions -+****************************************************************/ -+ZSTD_STATIC unsigned BIT_highbit32(register U32 val) { return 31 - __builtin_clz(val); } -+ -+/*===== Local Constants =====*/ -+static const unsigned BIT_mask[] = {0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, -+ 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, -+ 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF}; /* up to 26 bits */ -+ -+/*-************************************************************** -+* bitStream encoding -+****************************************************************/ -+/*! BIT_initCStream() : -+ * `dstCapacity` must be > sizeof(void*) -+ * @return : 0 if success, -+ otherwise an error code (can be tested using ERR_isError() ) */ -+ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *startPtr, size_t dstCapacity) -+{ -+ bitC->bitContainer = 0; -+ bitC->bitPos = 0; -+ bitC->startPtr = (char *)startPtr; -+ bitC->ptr = bitC->startPtr; -+ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr); -+ if (dstCapacity <= sizeof(bitC->ptr)) -+ return ERROR(dstSize_tooSmall); -+ return 0; -+} -+ -+/*! BIT_addBits() : -+ can add up to 26 bits into `bitC`. -+ Does not check for register overflow ! */ -+ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits) -+{ -+ bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; -+ bitC->bitPos += nbBits; -+} -+ -+/*! BIT_addBitsFast() : -+ * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */ -+ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits) -+{ -+ bitC->bitContainer |= value << bitC->bitPos; -+ bitC->bitPos += nbBits; -+} -+ -+/*! BIT_flushBitsFast() : -+ * unsafe version; does not check buffer overflow */ -+ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC) -+{ -+ size_t const nbBytes = bitC->bitPos >> 3; -+ ZSTD_writeLEST(bitC->ptr, bitC->bitContainer); -+ bitC->ptr += nbBytes; -+ bitC->bitPos &= 7; -+ bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */ -+} -+ -+/*! BIT_flushBits() : -+ * safe version; check for buffer overflow, and prevents it. -+ * note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */ -+ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC) -+{ -+ size_t const nbBytes = bitC->bitPos >> 3; -+ ZSTD_writeLEST(bitC->ptr, bitC->bitContainer); -+ bitC->ptr += nbBytes; -+ if (bitC->ptr > bitC->endPtr) -+ bitC->ptr = bitC->endPtr; -+ bitC->bitPos &= 7; -+ bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */ -+} -+ -+/*! BIT_closeCStream() : -+ * @return : size of CStream, in bytes, -+ or 0 if it could not fit into dstBuffer */ -+ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC) -+{ -+ BIT_addBitsFast(bitC, 1, 1); /* endMark */ -+ BIT_flushBits(bitC); -+ -+ if (bitC->ptr >= bitC->endPtr) -+ return 0; /* doesn't fit within authorized budget : cancel */ -+ -+ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); -+} -+ -+/*-******************************************************** -+* bitStream decoding -+**********************************************************/ -+/*! BIT_initDStream() : -+* Initialize a BIT_DStream_t. -+* `bitD` : a pointer to an already allocated BIT_DStream_t structure. -+* `srcSize` must be the *exact* size of the bitStream, in bytes. -+* @return : size of stream (== srcSize) or an errorCode if a problem is detected -+*/ -+ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize) -+{ -+ if (srcSize < 1) { -+ memset(bitD, 0, sizeof(*bitD)); -+ return ERROR(srcSize_wrong); -+ } -+ -+ if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ -+ bitD->start = (const char *)srcBuffer; -+ bitD->ptr = (const char *)srcBuffer + srcSize - sizeof(bitD->bitContainer); -+ bitD->bitContainer = ZSTD_readLEST(bitD->ptr); -+ { -+ BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1]; -+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ -+ if (lastByte == 0) -+ return ERROR(GENERIC); /* endMark not present */ -+ } -+ } else { -+ bitD->start = (const char *)srcBuffer; -+ bitD->ptr = bitD->start; -+ bitD->bitContainer = *(const BYTE *)(bitD->start); -+ switch (srcSize) { -+ case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16); -+ case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24); -+ case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32); -+ case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24; -+ case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16; -+ case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8; -+ default:; -+ } -+ { -+ BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1]; -+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; -+ if (lastByte == 0) -+ return ERROR(GENERIC); /* endMark not present */ -+ } -+ bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize) * 8; -+ } -+ -+ return srcSize; -+} -+ -+ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; } -+ -+ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; } -+ -+ZSTD_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { return bitContainer & BIT_mask[nbBits]; } -+ -+/*! BIT_lookBits() : -+ * Provides next n bits from local register. -+ * local register is not modified. -+ * On 32-bits, maxNbBits==24. -+ * On 64-bits, maxNbBits==56. -+ * @return : value extracted -+ */ -+ZSTD_STATIC size_t BIT_lookBits(const BIT_DStream_t *bitD, U32 nbBits) -+{ -+ U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1; -+ return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask - nbBits) & bitMask); -+} -+ -+/*! BIT_lookBitsFast() : -+* unsafe version; only works only if nbBits >= 1 */ -+ZSTD_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t *bitD, U32 nbBits) -+{ -+ U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1; -+ return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask + 1) - nbBits) & bitMask); -+} -+ -+ZSTD_STATIC void BIT_skipBits(BIT_DStream_t *bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } -+ -+/*! BIT_readBits() : -+ * Read (consume) next n bits from local register and update. -+ * Pay attention to not read more than nbBits contained into local register. -+ * @return : extracted value. -+ */ -+ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, U32 nbBits) -+{ -+ size_t const value = BIT_lookBits(bitD, nbBits); -+ BIT_skipBits(bitD, nbBits); -+ return value; -+} -+ -+/*! BIT_readBitsFast() : -+* unsafe version; only works only if nbBits >= 1 */ -+ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, U32 nbBits) -+{ -+ size_t const value = BIT_lookBitsFast(bitD, nbBits); -+ BIT_skipBits(bitD, nbBits); -+ return value; -+} -+ -+/*! BIT_reloadDStream() : -+* Refill `bitD` from buffer previously set in BIT_initDStream() . -+* This function is safe, it guarantees it will not read beyond src buffer. -+* @return : status of `BIT_DStream_t` internal register. -+ if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */ -+ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD) -+{ -+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer) * 8)) /* should not happen => corruption detected */ -+ return BIT_DStream_overflow; -+ -+ if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { -+ bitD->ptr -= bitD->bitsConsumed >> 3; -+ bitD->bitsConsumed &= 7; -+ bitD->bitContainer = ZSTD_readLEST(bitD->ptr); -+ return BIT_DStream_unfinished; -+ } -+ if (bitD->ptr == bitD->start) { -+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer) * 8) -+ return BIT_DStream_endOfBuffer; -+ return BIT_DStream_completed; -+ } -+ { -+ U32 nbBytes = bitD->bitsConsumed >> 3; -+ BIT_DStream_status result = BIT_DStream_unfinished; -+ if (bitD->ptr - nbBytes < bitD->start) { -+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ -+ result = BIT_DStream_endOfBuffer; -+ } -+ bitD->ptr -= nbBytes; -+ bitD->bitsConsumed -= nbBytes * 8; -+ bitD->bitContainer = ZSTD_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ -+ return result; -+ } -+} -+ -+/*! BIT_endOfDStream() : -+* @return Tells if DStream has exactly reached its end (all bits consumed). -+*/ -+ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *DStream) -+{ -+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8)); -+} -+ -+#endif /* BITSTREAM_H_MODULE */ -diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c -new file mode 100644 -index 0000000..ff18ae6 ---- /dev/null -+++ b/lib/zstd/compress.c -@@ -0,0 +1,3482 @@ -+/** -+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. -+ * All rights reserved. -+ * -+ * This source code is licensed under the BSD-style license found in the -+ * LICENSE file in the root directory of https://github.com/facebook/zstd. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ */ -+ -+/*-************************************* -+* Dependencies -+***************************************/ -+#include "fse.h" -+#include "huf.h" -+#include "mem.h" -+#include "zstd_internal.h" /* includes zstd.h */ -+#include -+#include -+#include /* memset */ -+ -+/*-************************************* -+* Constants -+***************************************/ -+static const U32 g_searchStrength = 8; /* control skip over incompressible data */ -+#define HASH_READ_SIZE 8 -+typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; -+ -+/*-************************************* -+* Helper functions -+***************************************/ -+size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; } -+ -+/*-************************************* -+* Sequence storage -+***************************************/ -+static void ZSTD_resetSeqStore(seqStore_t *ssPtr) -+{ -+ ssPtr->lit = ssPtr->litStart; -+ ssPtr->sequences = ssPtr->sequencesStart; -+ ssPtr->longLengthID = 0; -+} -+ -+/*-************************************* -+* Context memory management -+***************************************/ -+struct ZSTD_CCtx_s { -+ const BYTE *nextSrc; /* next block here to continue on curr prefix */ -+ const BYTE *base; /* All regular indexes relative to this position */ -+ const BYTE *dictBase; /* extDict indexes relative to this position */ -+ U32 dictLimit; /* below that point, need extDict */ -+ U32 lowLimit; /* below that point, no more data */ -+ U32 nextToUpdate; /* index from which to continue dictionary update */ -+ U32 nextToUpdate3; /* index from which to continue dictionary update */ -+ U32 hashLog3; /* dispatch table : larger == faster, more memory */ -+ U32 loadedDictEnd; /* index of end of dictionary */ -+ U32 forceWindow; /* force back-references to respect limit of 1< 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog); -+ size_t const h3Size = ((size_t)1) << hashLog3; -+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); -+ size_t const optSpace = -+ ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t)); -+ size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace + -+ (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0); -+ -+ return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize); -+} -+ -+static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem) -+{ -+ ZSTD_CCtx *cctx; -+ if (!customMem.customAlloc || !customMem.customFree) -+ return NULL; -+ cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem); -+ if (!cctx) -+ return NULL; -+ memset(cctx, 0, sizeof(ZSTD_CCtx)); -+ cctx->customMem = customMem; -+ return cctx; -+} -+ -+ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize) -+{ -+ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); -+ ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem); -+ if (cctx) { -+ cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize); -+ } -+ return cctx; -+} -+ -+size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx) -+{ -+ if (cctx == NULL) -+ return 0; /* support free on NULL */ -+ ZSTD_free(cctx->workSpace, cctx->customMem); -+ ZSTD_free(cctx, cctx->customMem); -+ return 0; /* reserved as a potential error code in the future */ -+} -+ -+const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); } -+ -+static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; } -+ -+/** ZSTD_checkParams() : -+ ensure param values remain within authorized range. -+ @return : 0, or an error code if one value is beyond authorized range */ -+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) -+{ -+#define CLAMPCHECK(val, min, max) \ -+ { \ -+ if ((val < min) | (val > max)) \ -+ return ERROR(compressionParameter_unsupported); \ -+ } -+ CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); -+ CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); -+ CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); -+ CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); -+ CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); -+ CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX); -+ if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2) -+ return ERROR(compressionParameter_unsupported); -+ return 0; -+} -+ -+/** ZSTD_cycleLog() : -+ * condition for correct operation : hashLog > 1 */ -+static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) -+{ -+ U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); -+ return hashLog - btScale; -+} -+ -+/** ZSTD_adjustCParams() : -+ optimize `cPar` for a given input (`srcSize` and `dictSize`). -+ mostly downsizing to reduce memory consumption and initialization. -+ Both `srcSize` and `dictSize` are optional (use 0 if unknown), -+ but if both are 0, no optimization can be done. -+ Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */ -+ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) -+{ -+ if (srcSize + dictSize == 0) -+ return cPar; /* no size information available : no adjustment */ -+ -+ /* resize params, to use less memory when necessary */ -+ { -+ U32 const minSrcSize = (srcSize == 0) ? 500 : 0; -+ U64 const rSize = srcSize + dictSize + minSrcSize; -+ if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) { -+ U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1); -+ if (cPar.windowLog > srcLog) -+ cPar.windowLog = srcLog; -+ } -+ } -+ if (cPar.hashLog > cPar.windowLog) -+ cPar.hashLog = cPar.windowLog; -+ { -+ U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); -+ if (cycleLog > cPar.windowLog) -+ cPar.chainLog -= (cycleLog - cPar.windowLog); -+ } -+ -+ if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) -+ cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */ -+ -+ return cPar; -+} -+ -+static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2) -+{ -+ return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) & -+ (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3)); -+} -+ -+/*! ZSTD_continueCCtx() : -+ reuse CCtx without reset (note : requires no dictionary) */ -+static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize) -+{ -+ U32 const end = (U32)(cctx->nextSrc - cctx->base); -+ cctx->params = params; -+ cctx->frameContentSize = frameContentSize; -+ cctx->lowLimit = end; -+ cctx->dictLimit = end; -+ cctx->nextToUpdate = end + 1; -+ cctx->stage = ZSTDcs_init; -+ cctx->dictID = 0; -+ cctx->loadedDictEnd = 0; -+ { -+ int i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ cctx->rep[i] = repStartValue[i]; -+ } -+ cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */ -+ xxh64_reset(&cctx->xxhState, 0); -+ return 0; -+} -+ -+typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e; -+ -+/*! ZSTD_resetCCtx_advanced() : -+ note : `params` must be validated */ -+static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp) -+{ -+ if (crp == ZSTDcrp_continue) -+ if (ZSTD_equivalentParams(params, zc->params)) { -+ zc->flagStaticTables = 0; -+ zc->flagStaticHufTable = HUF_repeat_none; -+ return ZSTD_continueCCtx(zc, params, frameContentSize); -+ } -+ -+ { -+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog); -+ U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4; -+ size_t const maxNbSeq = blockSize / divider; -+ size_t const tokenSpace = blockSize + 11 * maxNbSeq; -+ size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog); -+ size_t const hSize = ((size_t)1) << params.cParams.hashLog; -+ U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog); -+ size_t const h3Size = ((size_t)1) << hashLog3; -+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); -+ void *ptr; -+ -+ /* Check if workSpace is large enough, alloc a new one if needed */ -+ { -+ size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + -+ (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t)); -+ size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace + -+ (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0); -+ if (zc->workSpaceSize < neededSpace) { -+ ZSTD_free(zc->workSpace, zc->customMem); -+ zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem); -+ if (zc->workSpace == NULL) -+ return ERROR(memory_allocation); -+ zc->workSpaceSize = neededSpace; -+ } -+ } -+ -+ if (crp != ZSTDcrp_noMemset) -+ memset(zc->workSpace, 0, tableSpace); /* reset tables only */ -+ xxh64_reset(&zc->xxhState, 0); -+ zc->hashLog3 = hashLog3; -+ zc->hashTable = (U32 *)(zc->workSpace); -+ zc->chainTable = zc->hashTable + hSize; -+ zc->hashTable3 = zc->chainTable + chainSize; -+ ptr = zc->hashTable3 + h3Size; -+ zc->hufTable = (HUF_CElt *)ptr; -+ zc->flagStaticTables = 0; -+ zc->flagStaticHufTable = HUF_repeat_none; -+ ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */ -+ -+ zc->nextToUpdate = 1; -+ zc->nextSrc = NULL; -+ zc->base = NULL; -+ zc->dictBase = NULL; -+ zc->dictLimit = 0; -+ zc->lowLimit = 0; -+ zc->params = params; -+ zc->blockSize = blockSize; -+ zc->frameContentSize = frameContentSize; -+ { -+ int i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ zc->rep[i] = repStartValue[i]; -+ } -+ -+ if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) { -+ zc->seqStore.litFreq = (U32 *)ptr; -+ zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits); -+ zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1); -+ zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1); -+ ptr = zc->seqStore.offCodeFreq + (MaxOff + 1); -+ zc->seqStore.matchTable = (ZSTD_match_t *)ptr; -+ ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1; -+ zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr; -+ ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1; -+ zc->seqStore.litLengthSum = 0; -+ } -+ zc->seqStore.sequencesStart = (seqDef *)ptr; -+ ptr = zc->seqStore.sequencesStart + maxNbSeq; -+ zc->seqStore.llCode = (BYTE *)ptr; -+ zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq; -+ zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq; -+ zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq; -+ -+ zc->stage = ZSTDcs_init; -+ zc->dictID = 0; -+ zc->loadedDictEnd = 0; -+ -+ return 0; -+ } -+} -+ -+/* ZSTD_invalidateRepCodes() : -+ * ensures next compression will not use repcodes from previous block. -+ * Note : only works with regular variant; -+ * do not use with extDict variant ! */ -+void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx) -+{ -+ int i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ cctx->rep[i] = 0; -+} -+ -+/*! ZSTD_copyCCtx() : -+* Duplicate an existing context `srcCCtx` into another one `dstCCtx`. -+* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). -+* @return : 0, or an error code */ -+size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize) -+{ -+ if (srcCCtx->stage != ZSTDcs_init) -+ return ERROR(stage_wrong); -+ -+ memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); -+ { -+ ZSTD_parameters params = srcCCtx->params; -+ params.fParams.contentSizeFlag = (pledgedSrcSize > 0); -+ ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset); -+ } -+ -+ /* copy tables */ -+ { -+ size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog); -+ size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog; -+ size_t const h3Size = (size_t)1 << srcCCtx->hashLog3; -+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); -+ memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace); -+ } -+ -+ /* copy dictionary offsets */ -+ dstCCtx->nextToUpdate = srcCCtx->nextToUpdate; -+ dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3; -+ dstCCtx->nextSrc = srcCCtx->nextSrc; -+ dstCCtx->base = srcCCtx->base; -+ dstCCtx->dictBase = srcCCtx->dictBase; -+ dstCCtx->dictLimit = srcCCtx->dictLimit; -+ dstCCtx->lowLimit = srcCCtx->lowLimit; -+ dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd; -+ dstCCtx->dictID = srcCCtx->dictID; -+ -+ /* copy entropy tables */ -+ dstCCtx->flagStaticTables = srcCCtx->flagStaticTables; -+ dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable; -+ if (srcCCtx->flagStaticTables) { -+ memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable)); -+ memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable)); -+ memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable)); -+ } -+ if (srcCCtx->flagStaticHufTable) { -+ memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4); -+ } -+ -+ return 0; -+} -+ -+/*! ZSTD_reduceTable() : -+* reduce table indexes by `reducerValue` */ -+static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue) -+{ -+ U32 u; -+ for (u = 0; u < size; u++) { -+ if (table[u] < reducerValue) -+ table[u] = 0; -+ else -+ table[u] -= reducerValue; -+ } -+} -+ -+/*! ZSTD_reduceIndex() : -+* rescale all indexes to avoid future overflow (indexes are U32) */ -+static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue) -+{ -+ { -+ U32 const hSize = 1 << zc->params.cParams.hashLog; -+ ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); -+ } -+ -+ { -+ U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog); -+ ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); -+ } -+ -+ { -+ U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0; -+ ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); -+ } -+} -+ -+/*-******************************************************* -+* Block entropic compression -+*********************************************************/ -+ -+/* See doc/zstd_compression_format.md for detailed format description */ -+ -+size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ if (srcSize + ZSTD_blockHeaderSize > dstCapacity) -+ return ERROR(dstSize_tooSmall); -+ memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize); -+ ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw); -+ return ZSTD_blockHeaderSize + srcSize; -+} -+ -+static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ BYTE *const ostart = (BYTE * const)dst; -+ U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095); -+ -+ if (srcSize + flSize > dstCapacity) -+ return ERROR(dstSize_tooSmall); -+ -+ switch (flSize) { -+ case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break; -+ case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break; -+ default: /*note : should not be necessary : flSize is within {1,2,3} */ -+ case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break; -+ } -+ -+ memcpy(ostart + flSize, src, srcSize); -+ return srcSize + flSize; -+} -+ -+static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ BYTE *const ostart = (BYTE * const)dst; -+ U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095); -+ -+ (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ -+ -+ switch (flSize) { -+ case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break; -+ case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break; -+ default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */ -+ case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break; -+ } -+ -+ ostart[flSize] = *(const BYTE *)src; -+ return flSize + 1; -+} -+ -+static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; } -+ -+static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ size_t const minGain = ZSTD_minGain(srcSize); -+ size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); -+ BYTE *const ostart = (BYTE *)dst; -+ U32 singleStream = srcSize < 256; -+ symbolEncodingType_e hType = set_compressed; -+ size_t cLitSize; -+ -+/* small ? don't even attempt compression (speed opt) */ -+#define LITERAL_NOENTROPY 63 -+ { -+ size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; -+ if (srcSize <= minLitSize) -+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); -+ } -+ -+ if (dstCapacity < lhSize + 1) -+ return ERROR(dstSize_tooSmall); /* not enough space for compression */ -+ { -+ HUF_repeat repeat = zc->flagStaticHufTable; -+ int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0; -+ if (repeat == HUF_repeat_valid && lhSize == 3) -+ singleStream = 1; -+ cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters, -+ sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat) -+ : HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters, -+ sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat); -+ if (repeat != HUF_repeat_none) { -+ hType = set_repeat; -+ } /* reused the existing table */ -+ else { -+ zc->flagStaticHufTable = HUF_repeat_check; -+ } /* now have a table to reuse */ -+ } -+ -+ if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) { -+ zc->flagStaticHufTable = HUF_repeat_none; -+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); -+ } -+ if (cLitSize == 1) { -+ zc->flagStaticHufTable = HUF_repeat_none; -+ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); -+ } -+ -+ /* Build header */ -+ switch (lhSize) { -+ case 3: /* 2 - 2 - 10 - 10 */ -+ { -+ U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14); -+ ZSTD_writeLE24(ostart, lhc); -+ break; -+ } -+ case 4: /* 2 - 2 - 14 - 14 */ -+ { -+ U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18); -+ ZSTD_writeLE32(ostart, lhc); -+ break; -+ } -+ default: /* should not be necessary, lhSize is only {3,4,5} */ -+ case 5: /* 2 - 2 - 18 - 18 */ -+ { -+ U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22); -+ ZSTD_writeLE32(ostart, lhc); -+ ostart[4] = (BYTE)(cLitSize >> 10); -+ break; -+ } -+ } -+ return lhSize + cLitSize; -+} -+ -+static const BYTE LL_Code[64] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18, -+ 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, -+ 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24}; -+ -+static const BYTE ML_Code[128] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -+ 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, -+ 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, -+ 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, -+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}; -+ -+void ZSTD_seqToCodes(const seqStore_t *seqStorePtr) -+{ -+ BYTE const LL_deltaCode = 19; -+ BYTE const ML_deltaCode = 36; -+ const seqDef *const sequences = seqStorePtr->sequencesStart; -+ BYTE *const llCodeTable = seqStorePtr->llCode; -+ BYTE *const ofCodeTable = seqStorePtr->ofCode; -+ BYTE *const mlCodeTable = seqStorePtr->mlCode; -+ U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); -+ U32 u; -+ for (u = 0; u < nbSeq; u++) { -+ U32 const llv = sequences[u].litLength; -+ U32 const mlv = sequences[u].matchLength; -+ llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv]; -+ ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset); -+ mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv]; -+ } -+ if (seqStorePtr->longLengthID == 1) -+ llCodeTable[seqStorePtr->longLengthPos] = MaxLL; -+ if (seqStorePtr->longLengthID == 2) -+ mlCodeTable[seqStorePtr->longLengthPos] = MaxML; -+} -+ -+ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity) -+{ -+ const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN; -+ const seqStore_t *seqStorePtr = &(zc->seqStore); -+ FSE_CTable *CTable_LitLength = zc->litlengthCTable; -+ FSE_CTable *CTable_OffsetBits = zc->offcodeCTable; -+ FSE_CTable *CTable_MatchLength = zc->matchlengthCTable; -+ U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ -+ const seqDef *const sequences = seqStorePtr->sequencesStart; -+ const BYTE *const ofCodeTable = seqStorePtr->ofCode; -+ const BYTE *const llCodeTable = seqStorePtr->llCode; -+ const BYTE *const mlCodeTable = seqStorePtr->mlCode; -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *const oend = ostart + dstCapacity; -+ BYTE *op = ostart; -+ size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; -+ BYTE *seqHead; -+ -+ U32 *count; -+ S16 *norm; -+ U32 *workspace; -+ size_t workspaceSize = sizeof(zc->tmpCounters); -+ { -+ size_t spaceUsed32 = 0; -+ count = (U32 *)zc->tmpCounters + spaceUsed32; -+ spaceUsed32 += MaxSeq + 1; -+ norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32); -+ spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2; -+ -+ workspace = (U32 *)zc->tmpCounters + spaceUsed32; -+ workspaceSize -= (spaceUsed32 << 2); -+ } -+ -+ /* Compress literals */ -+ { -+ const BYTE *const literals = seqStorePtr->litStart; -+ size_t const litSize = seqStorePtr->lit - literals; -+ size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize); -+ if (ZSTD_isError(cSize)) -+ return cSize; -+ op += cSize; -+ } -+ -+ /* Sequences Header */ -+ if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) -+ return ERROR(dstSize_tooSmall); -+ if (nbSeq < 0x7F) -+ *op++ = (BYTE)nbSeq; -+ else if (nbSeq < LONGNBSEQ) -+ op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2; -+ else -+ op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3; -+ if (nbSeq == 0) -+ return op - ostart; -+ -+ /* seqHead : flags for FSE encoding type */ -+ seqHead = op++; -+ -+#define MIN_SEQ_FOR_DYNAMIC_FSE 64 -+#define MAX_SEQ_FOR_STATIC_FSE 1000 -+ -+ /* convert length/distances into codes */ -+ ZSTD_seqToCodes(seqStorePtr); -+ -+ /* CTable for Literal Lengths */ -+ { -+ U32 max = MaxLL; -+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace); -+ if ((mostFrequent == nbSeq) && (nbSeq > 2)) { -+ *op++ = llCodeTable[0]; -+ FSE_buildCTable_rle(CTable_LitLength, (BYTE)max); -+ LLtype = set_rle; -+ } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { -+ LLtype = set_repeat; -+ } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) { -+ FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize); -+ LLtype = set_basic; -+ } else { -+ size_t nbSeq_1 = nbSeq; -+ const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max); -+ if (count[llCodeTable[nbSeq - 1]] > 1) { -+ count[llCodeTable[nbSeq - 1]]--; -+ nbSeq_1--; -+ } -+ FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); -+ { -+ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ -+ if (FSE_isError(NCountSize)) -+ return NCountSize; -+ op += NCountSize; -+ } -+ FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize); -+ LLtype = set_compressed; -+ } -+ } -+ -+ /* CTable for Offsets */ -+ { -+ U32 max = MaxOff; -+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace); -+ if ((mostFrequent == nbSeq) && (nbSeq > 2)) { -+ *op++ = ofCodeTable[0]; -+ FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max); -+ Offtype = set_rle; -+ } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { -+ Offtype = set_repeat; -+ } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) { -+ FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize); -+ Offtype = set_basic; -+ } else { -+ size_t nbSeq_1 = nbSeq; -+ const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max); -+ if (count[ofCodeTable[nbSeq - 1]] > 1) { -+ count[ofCodeTable[nbSeq - 1]]--; -+ nbSeq_1--; -+ } -+ FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); -+ { -+ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ -+ if (FSE_isError(NCountSize)) -+ return NCountSize; -+ op += NCountSize; -+ } -+ FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize); -+ Offtype = set_compressed; -+ } -+ } -+ -+ /* CTable for MatchLengths */ -+ { -+ U32 max = MaxML; -+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace); -+ if ((mostFrequent == nbSeq) && (nbSeq > 2)) { -+ *op++ = *mlCodeTable; -+ FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max); -+ MLtype = set_rle; -+ } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { -+ MLtype = set_repeat; -+ } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) { -+ FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize); -+ MLtype = set_basic; -+ } else { -+ size_t nbSeq_1 = nbSeq; -+ const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max); -+ if (count[mlCodeTable[nbSeq - 1]] > 1) { -+ count[mlCodeTable[nbSeq - 1]]--; -+ nbSeq_1--; -+ } -+ FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); -+ { -+ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ -+ if (FSE_isError(NCountSize)) -+ return NCountSize; -+ op += NCountSize; -+ } -+ FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize); -+ MLtype = set_compressed; -+ } -+ } -+ -+ *seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2)); -+ zc->flagStaticTables = 0; -+ -+ /* Encoding Sequences */ -+ { -+ BIT_CStream_t blockStream; -+ FSE_CState_t stateMatchLength; -+ FSE_CState_t stateOffsetBits; -+ FSE_CState_t stateLitLength; -+ -+ CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */ -+ -+ /* first symbols */ -+ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]); -+ FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]); -+ FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]); -+ BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]); -+ if (ZSTD_32bits()) -+ BIT_flushBits(&blockStream); -+ BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]); -+ if (ZSTD_32bits()) -+ BIT_flushBits(&blockStream); -+ if (longOffsets) { -+ U32 const ofBits = ofCodeTable[nbSeq - 1]; -+ int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1); -+ if (extraBits) { -+ BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits); -+ BIT_flushBits(&blockStream); -+ } -+ BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits); -+ } else { -+ BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]); -+ } -+ BIT_flushBits(&blockStream); -+ -+ { -+ size_t n; -+ for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */ -+ BYTE const llCode = llCodeTable[n]; -+ BYTE const ofCode = ofCodeTable[n]; -+ BYTE const mlCode = mlCodeTable[n]; -+ U32 const llBits = LL_bits[llCode]; -+ U32 const ofBits = ofCode; /* 32b*/ /* 64b*/ -+ U32 const mlBits = ML_bits[mlCode]; -+ /* (7)*/ /* (7)*/ -+ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */ -+ FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */ -+ if (ZSTD_32bits()) -+ BIT_flushBits(&blockStream); /* (7)*/ -+ FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */ -+ if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog))) -+ BIT_flushBits(&blockStream); /* (7)*/ -+ BIT_addBits(&blockStream, sequences[n].litLength, llBits); -+ if (ZSTD_32bits() && ((llBits + mlBits) > 24)) -+ BIT_flushBits(&blockStream); -+ BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); -+ if (ZSTD_32bits()) -+ BIT_flushBits(&blockStream); /* (7)*/ -+ if (longOffsets) { -+ int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1); -+ if (extraBits) { -+ BIT_addBits(&blockStream, sequences[n].offset, extraBits); -+ BIT_flushBits(&blockStream); /* (7)*/ -+ } -+ BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */ -+ } else { -+ BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ -+ } -+ BIT_flushBits(&blockStream); /* (7)*/ -+ } -+ } -+ -+ FSE_flushCState(&blockStream, &stateMatchLength); -+ FSE_flushCState(&blockStream, &stateOffsetBits); -+ FSE_flushCState(&blockStream, &stateLitLength); -+ -+ { -+ size_t const streamSize = BIT_closeCStream(&blockStream); -+ if (streamSize == 0) -+ return ERROR(dstSize_tooSmall); /* not enough space */ -+ op += streamSize; -+ } -+ } -+ return op - ostart; -+} -+ -+ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize) -+{ -+ size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity); -+ size_t const minGain = ZSTD_minGain(srcSize); -+ size_t const maxCSize = srcSize - minGain; -+ /* If the srcSize <= dstCapacity, then there is enough space to write a -+ * raw uncompressed block. Since we ran out of space, the block must not -+ * be compressible, so fall back to a raw uncompressed block. -+ */ -+ int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity; -+ int i; -+ -+ if (ZSTD_isError(cSize) && !uncompressibleError) -+ return cSize; -+ if (cSize >= maxCSize || uncompressibleError) { -+ zc->flagStaticHufTable = HUF_repeat_none; -+ return 0; -+ } -+ /* confirm repcodes */ -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ zc->rep[i] = zc->repToConfirm[i]; -+ return cSize; -+} -+ -+/*! ZSTD_storeSeq() : -+ Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. -+ `offsetCode` : distance to match, or 0 == repCode. -+ `matchCode` : matchLength - MINMATCH -+*/ -+ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode) -+{ -+ /* copy Literals */ -+ ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); -+ seqStorePtr->lit += litLength; -+ -+ /* literal Length */ -+ if (litLength > 0xFFFF) { -+ seqStorePtr->longLengthID = 1; -+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); -+ } -+ seqStorePtr->sequences[0].litLength = (U16)litLength; -+ -+ /* match offset */ -+ seqStorePtr->sequences[0].offset = offsetCode + 1; -+ -+ /* match Length */ -+ if (matchCode > 0xFFFF) { -+ seqStorePtr->longLengthID = 2; -+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); -+ } -+ seqStorePtr->sequences[0].matchLength = (U16)matchCode; -+ -+ seqStorePtr->sequences++; -+} -+ -+/*-************************************* -+* Match length counter -+***************************************/ -+static unsigned ZSTD_NbCommonBytes(register size_t val) -+{ -+ if (ZSTD_isLittleEndian()) { -+ if (ZSTD_64bits()) { -+ return (__builtin_ctzll((U64)val) >> 3); -+ } else { /* 32 bits */ -+ return (__builtin_ctz((U32)val) >> 3); -+ } -+ } else { /* Big Endian CPU */ -+ if (ZSTD_64bits()) { -+ return (__builtin_clzll(val) >> 3); -+ } else { /* 32 bits */ -+ return (__builtin_clz((U32)val) >> 3); -+ } -+ } -+} -+ -+static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit) -+{ -+ const BYTE *const pStart = pIn; -+ const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1); -+ -+ while (pIn < pInLoopLimit) { -+ size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn); -+ if (!diff) { -+ pIn += sizeof(size_t); -+ pMatch += sizeof(size_t); -+ continue; -+ } -+ pIn += ZSTD_NbCommonBytes(diff); -+ return (size_t)(pIn - pStart); -+ } -+ if (ZSTD_64bits()) -+ if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) { -+ pIn += 4; -+ pMatch += 4; -+ } -+ if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) { -+ pIn += 2; -+ pMatch += 2; -+ } -+ if ((pIn < pInLimit) && (*pMatch == *pIn)) -+ pIn++; -+ return (size_t)(pIn - pStart); -+} -+ -+/** ZSTD_count_2segments() : -+* can count match length with `ip` & `match` in 2 different segments. -+* convention : on reaching mEnd, match count continue starting from iStart -+*/ -+static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart) -+{ -+ const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd); -+ size_t const matchLength = ZSTD_count(ip, match, vEnd); -+ if (match + matchLength != mEnd) -+ return matchLength; -+ return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd); -+} -+ -+/*-************************************* -+* Hashes -+***************************************/ -+static const U32 prime3bytes = 506832829U; -+static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); } -+ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */ -+ -+static const U32 prime4bytes = 2654435761U; -+static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); } -+static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); } -+ -+static const U64 prime5bytes = 889523592379ULL; -+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); } -+static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); } -+ -+static const U64 prime6bytes = 227718039650203ULL; -+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); } -+static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); } -+ -+static const U64 prime7bytes = 58295818150454627ULL; -+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); } -+static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); } -+ -+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); } -+static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); } -+ -+static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls) -+{ -+ switch (mls) { -+ // case 3: return ZSTD_hash3Ptr(p, hBits); -+ default: -+ case 4: return ZSTD_hash4Ptr(p, hBits); -+ case 5: return ZSTD_hash5Ptr(p, hBits); -+ case 6: return ZSTD_hash6Ptr(p, hBits); -+ case 7: return ZSTD_hash7Ptr(p, hBits); -+ case 8: return ZSTD_hash8Ptr(p, hBits); -+ } -+} -+ -+/*-************************************* -+* Fast Scan -+***************************************/ -+static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls) -+{ -+ U32 *const hashTable = zc->hashTable; -+ U32 const hBits = zc->params.cParams.hashLog; -+ const BYTE *const base = zc->base; -+ const BYTE *ip = base + zc->nextToUpdate; -+ const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE; -+ const size_t fastHashFillStep = 3; -+ -+ while (ip <= iend) { -+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); -+ ip += fastHashFillStep; -+ } -+} -+ -+FORCE_INLINE -+void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls) -+{ -+ U32 *const hashTable = cctx->hashTable; -+ U32 const hBits = cctx->params.cParams.hashLog; -+ seqStore_t *seqStorePtr = &(cctx->seqStore); -+ const BYTE *const base = cctx->base; -+ const BYTE *const istart = (const BYTE *)src; -+ const BYTE *ip = istart; -+ const BYTE *anchor = istart; -+ const U32 lowestIndex = cctx->dictLimit; -+ const BYTE *const lowest = base + lowestIndex; -+ const BYTE *const iend = istart + srcSize; -+ const BYTE *const ilimit = iend - HASH_READ_SIZE; -+ U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1]; -+ U32 offsetSaved = 0; -+ -+ /* init */ -+ ip += (ip == lowest); -+ { -+ U32 const maxRep = (U32)(ip - lowest); -+ if (offset_2 > maxRep) -+ offsetSaved = offset_2, offset_2 = 0; -+ if (offset_1 > maxRep) -+ offsetSaved = offset_1, offset_1 = 0; -+ } -+ -+ /* Main Search Loop */ -+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ -+ size_t mLength; -+ size_t const h = ZSTD_hashPtr(ip, hBits, mls); -+ U32 const curr = (U32)(ip - base); -+ U32 const matchIndex = hashTable[h]; -+ const BYTE *match = base + matchIndex; -+ hashTable[h] = curr; /* update hash table */ -+ -+ if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { -+ mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; -+ ip++; -+ ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); -+ } else { -+ U32 offset; -+ if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) { -+ ip += ((ip - anchor) >> g_searchStrength) + 1; -+ continue; -+ } -+ mLength = ZSTD_count(ip + 4, match + 4, iend) + 4; -+ offset = (U32)(ip - match); -+ while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) { -+ ip--; -+ match--; -+ mLength++; -+ } /* catch up */ -+ offset_2 = offset_1; -+ offset_1 = offset; -+ -+ ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); -+ } -+ -+ /* match found */ -+ ip += mLength; -+ anchor = ip; -+ -+ if (ip <= ilimit) { -+ /* Fill Table */ -+ hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */ -+ hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base); -+ /* check immediate repcode */ -+ while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) { -+ /* store sequence */ -+ size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; -+ { -+ U32 const tmpOff = offset_2; -+ offset_2 = offset_1; -+ offset_1 = tmpOff; -+ } /* swap offset_2 <=> offset_1 */ -+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); -+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH); -+ ip += rLength; -+ anchor = ip; -+ continue; /* faster when present ... (?) */ -+ } -+ } -+ } -+ -+ /* save reps for next block */ -+ cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; -+ cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; -+ -+ /* Last Literals */ -+ { -+ size_t const lastLLSize = iend - anchor; -+ memcpy(seqStorePtr->lit, anchor, lastLLSize); -+ seqStorePtr->lit += lastLLSize; -+ } -+} -+ -+static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+ const U32 mls = ctx->params.cParams.searchLength; -+ switch (mls) { -+ default: /* includes case 3 */ -+ case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return; -+ case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return; -+ case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return; -+ case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return; -+ } -+} -+ -+static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls) -+{ -+ U32 *hashTable = ctx->hashTable; -+ const U32 hBits = ctx->params.cParams.hashLog; -+ seqStore_t *seqStorePtr = &(ctx->seqStore); -+ const BYTE *const base = ctx->base; -+ const BYTE *const dictBase = ctx->dictBase; -+ const BYTE *const istart = (const BYTE *)src; -+ const BYTE *ip = istart; -+ const BYTE *anchor = istart; -+ const U32 lowestIndex = ctx->lowLimit; -+ const BYTE *const dictStart = dictBase + lowestIndex; -+ const U32 dictLimit = ctx->dictLimit; -+ const BYTE *const lowPrefixPtr = base + dictLimit; -+ const BYTE *const dictEnd = dictBase + dictLimit; -+ const BYTE *const iend = istart + srcSize; -+ const BYTE *const ilimit = iend - 8; -+ U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; -+ -+ /* Search Loop */ -+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ -+ const size_t h = ZSTD_hashPtr(ip, hBits, mls); -+ const U32 matchIndex = hashTable[h]; -+ const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base; -+ const BYTE *match = matchBase + matchIndex; -+ const U32 curr = (U32)(ip - base); -+ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ -+ const BYTE *repBase = repIndex < dictLimit ? dictBase : base; -+ const BYTE *repMatch = repBase + repIndex; -+ size_t mLength; -+ hashTable[h] = curr; /* update hash table */ -+ -+ if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) && -+ (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) { -+ const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend; -+ mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32; -+ ip++; -+ ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); -+ } else { -+ if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) { -+ ip += ((ip - anchor) >> g_searchStrength) + 1; -+ continue; -+ } -+ { -+ const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend; -+ const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; -+ U32 offset; -+ mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32; -+ while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) { -+ ip--; -+ match--; -+ mLength++; -+ } /* catch up */ -+ offset = curr - matchIndex; -+ offset_2 = offset_1; -+ offset_1 = offset; -+ ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); -+ } -+ } -+ -+ /* found a match : store it */ -+ ip += mLength; -+ anchor = ip; -+ -+ if (ip <= ilimit) { -+ /* Fill Table */ -+ hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; -+ hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base); -+ /* check immediate repcode */ -+ while (ip <= ilimit) { -+ U32 const curr2 = (U32)(ip - base); -+ U32 const repIndex2 = curr2 - offset_2; -+ const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; -+ if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ -+ && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) { -+ const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; -+ size_t repLength2 = -+ ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; -+ U32 tmpOffset = offset_2; -+ offset_2 = offset_1; -+ offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ -+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH); -+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2; -+ ip += repLength2; -+ anchor = ip; -+ continue; -+ } -+ break; -+ } -+ } -+ } -+ -+ /* save reps for next block */ -+ ctx->repToConfirm[0] = offset_1; -+ ctx->repToConfirm[1] = offset_2; -+ -+ /* Last Literals */ -+ { -+ size_t const lastLLSize = iend - anchor; -+ memcpy(seqStorePtr->lit, anchor, lastLLSize); -+ seqStorePtr->lit += lastLLSize; -+ } -+} -+ -+static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+ U32 const mls = ctx->params.cParams.searchLength; -+ switch (mls) { -+ default: /* includes case 3 */ -+ case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return; -+ case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return; -+ case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return; -+ case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return; -+ } -+} -+ -+/*-************************************* -+* Double Fast -+***************************************/ -+static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls) -+{ -+ U32 *const hashLarge = cctx->hashTable; -+ U32 const hBitsL = cctx->params.cParams.hashLog; -+ U32 *const hashSmall = cctx->chainTable; -+ U32 const hBitsS = cctx->params.cParams.chainLog; -+ const BYTE *const base = cctx->base; -+ const BYTE *ip = base + cctx->nextToUpdate; -+ const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE; -+ const size_t fastHashFillStep = 3; -+ -+ while (ip <= iend) { -+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); -+ hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); -+ ip += fastHashFillStep; -+ } -+} -+ -+FORCE_INLINE -+void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls) -+{ -+ U32 *const hashLong = cctx->hashTable; -+ const U32 hBitsL = cctx->params.cParams.hashLog; -+ U32 *const hashSmall = cctx->chainTable; -+ const U32 hBitsS = cctx->params.cParams.chainLog; -+ seqStore_t *seqStorePtr = &(cctx->seqStore); -+ const BYTE *const base = cctx->base; -+ const BYTE *const istart = (const BYTE *)src; -+ const BYTE *ip = istart; -+ const BYTE *anchor = istart; -+ const U32 lowestIndex = cctx->dictLimit; -+ const BYTE *const lowest = base + lowestIndex; -+ const BYTE *const iend = istart + srcSize; -+ const BYTE *const ilimit = iend - HASH_READ_SIZE; -+ U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1]; -+ U32 offsetSaved = 0; -+ -+ /* init */ -+ ip += (ip == lowest); -+ { -+ U32 const maxRep = (U32)(ip - lowest); -+ if (offset_2 > maxRep) -+ offsetSaved = offset_2, offset_2 = 0; -+ if (offset_1 > maxRep) -+ offsetSaved = offset_1, offset_1 = 0; -+ } -+ -+ /* Main Search Loop */ -+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ -+ size_t mLength; -+ size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); -+ size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); -+ U32 const curr = (U32)(ip - base); -+ U32 const matchIndexL = hashLong[h2]; -+ U32 const matchIndexS = hashSmall[h]; -+ const BYTE *matchLong = base + matchIndexL; -+ const BYTE *match = base + matchIndexS; -+ hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ -+ -+ if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */ -+ mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; -+ ip++; -+ ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); -+ } else { -+ U32 offset; -+ if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) { -+ mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8; -+ offset = (U32)(ip - matchLong); -+ while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) { -+ ip--; -+ matchLong--; -+ mLength++; -+ } /* catch up */ -+ } else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) { -+ size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); -+ U32 const matchIndex3 = hashLong[h3]; -+ const BYTE *match3 = base + matchIndex3; -+ hashLong[h3] = curr + 1; -+ if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) { -+ mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8; -+ ip++; -+ offset = (U32)(ip - match3); -+ while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) { -+ ip--; -+ match3--; -+ mLength++; -+ } /* catch up */ -+ } else { -+ mLength = ZSTD_count(ip + 4, match + 4, iend) + 4; -+ offset = (U32)(ip - match); -+ while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) { -+ ip--; -+ match--; -+ mLength++; -+ } /* catch up */ -+ } -+ } else { -+ ip += ((ip - anchor) >> g_searchStrength) + 1; -+ continue; -+ } -+ -+ offset_2 = offset_1; -+ offset_1 = offset; -+ -+ ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); -+ } -+ -+ /* match found */ -+ ip += mLength; -+ anchor = ip; -+ -+ if (ip <= ilimit) { -+ /* Fill Table */ -+ hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = -+ curr + 2; /* here because curr+2 could be > iend-8 */ -+ hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base); -+ -+ /* check immediate repcode */ -+ while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) { -+ /* store sequence */ -+ size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; -+ { -+ U32 const tmpOff = offset_2; -+ offset_2 = offset_1; -+ offset_1 = tmpOff; -+ } /* swap offset_2 <=> offset_1 */ -+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); -+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); -+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH); -+ ip += rLength; -+ anchor = ip; -+ continue; /* faster when present ... (?) */ -+ } -+ } -+ } -+ -+ /* save reps for next block */ -+ cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; -+ cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; -+ -+ /* Last Literals */ -+ { -+ size_t const lastLLSize = iend - anchor; -+ memcpy(seqStorePtr->lit, anchor, lastLLSize); -+ seqStorePtr->lit += lastLLSize; -+ } -+} -+ -+static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+ const U32 mls = ctx->params.cParams.searchLength; -+ switch (mls) { -+ default: /* includes case 3 */ -+ case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return; -+ case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return; -+ case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return; -+ case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return; -+ } -+} -+ -+static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls) -+{ -+ U32 *const hashLong = ctx->hashTable; -+ U32 const hBitsL = ctx->params.cParams.hashLog; -+ U32 *const hashSmall = ctx->chainTable; -+ U32 const hBitsS = ctx->params.cParams.chainLog; -+ seqStore_t *seqStorePtr = &(ctx->seqStore); -+ const BYTE *const base = ctx->base; -+ const BYTE *const dictBase = ctx->dictBase; -+ const BYTE *const istart = (const BYTE *)src; -+ const BYTE *ip = istart; -+ const BYTE *anchor = istart; -+ const U32 lowestIndex = ctx->lowLimit; -+ const BYTE *const dictStart = dictBase + lowestIndex; -+ const U32 dictLimit = ctx->dictLimit; -+ const BYTE *const lowPrefixPtr = base + dictLimit; -+ const BYTE *const dictEnd = dictBase + dictLimit; -+ const BYTE *const iend = istart + srcSize; -+ const BYTE *const ilimit = iend - 8; -+ U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; -+ -+ /* Search Loop */ -+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ -+ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); -+ const U32 matchIndex = hashSmall[hSmall]; -+ const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base; -+ const BYTE *match = matchBase + matchIndex; -+ -+ const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); -+ const U32 matchLongIndex = hashLong[hLong]; -+ const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base; -+ const BYTE *matchLong = matchLongBase + matchLongIndex; -+ -+ const U32 curr = (U32)(ip - base); -+ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ -+ const BYTE *repBase = repIndex < dictLimit ? dictBase : base; -+ const BYTE *repMatch = repBase + repIndex; -+ size_t mLength; -+ hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ -+ -+ if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) && -+ (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) { -+ const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend; -+ mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4; -+ ip++; -+ ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); -+ } else { -+ if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) { -+ const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend; -+ const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr; -+ U32 offset; -+ mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8; -+ offset = curr - matchLongIndex; -+ while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) { -+ ip--; -+ matchLong--; -+ mLength++; -+ } /* catch up */ -+ offset_2 = offset_1; -+ offset_1 = offset; -+ ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); -+ -+ } else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) { -+ size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); -+ U32 const matchIndex3 = hashLong[h3]; -+ const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base; -+ const BYTE *match3 = match3Base + matchIndex3; -+ U32 offset; -+ hashLong[h3] = curr + 1; -+ if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) { -+ const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend; -+ const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr; -+ mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8; -+ ip++; -+ offset = curr + 1 - matchIndex3; -+ while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) { -+ ip--; -+ match3--; -+ mLength++; -+ } /* catch up */ -+ } else { -+ const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend; -+ const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; -+ mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4; -+ offset = curr - matchIndex; -+ while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) { -+ ip--; -+ match--; -+ mLength++; -+ } /* catch up */ -+ } -+ offset_2 = offset_1; -+ offset_1 = offset; -+ ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); -+ -+ } else { -+ ip += ((ip - anchor) >> g_searchStrength) + 1; -+ continue; -+ } -+ } -+ -+ /* found a match : store it */ -+ ip += mLength; -+ anchor = ip; -+ -+ if (ip <= ilimit) { -+ /* Fill Table */ -+ hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2; -+ hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2; -+ hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base); -+ hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base); -+ /* check immediate repcode */ -+ while (ip <= ilimit) { -+ U32 const curr2 = (U32)(ip - base); -+ U32 const repIndex2 = curr2 - offset_2; -+ const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; -+ if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ -+ && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) { -+ const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; -+ size_t const repLength2 = -+ ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; -+ U32 tmpOffset = offset_2; -+ offset_2 = offset_1; -+ offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ -+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH); -+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2; -+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2; -+ ip += repLength2; -+ anchor = ip; -+ continue; -+ } -+ break; -+ } -+ } -+ } -+ -+ /* save reps for next block */ -+ ctx->repToConfirm[0] = offset_1; -+ ctx->repToConfirm[1] = offset_2; -+ -+ /* Last Literals */ -+ { -+ size_t const lastLLSize = iend - anchor; -+ memcpy(seqStorePtr->lit, anchor, lastLLSize); -+ seqStorePtr->lit += lastLLSize; -+ } -+} -+ -+static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+ U32 const mls = ctx->params.cParams.searchLength; -+ switch (mls) { -+ default: /* includes case 3 */ -+ case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return; -+ case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return; -+ case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return; -+ case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return; -+ } -+} -+ -+/*-************************************* -+* Binary Tree search -+***************************************/ -+/** ZSTD_insertBt1() : add one or multiple positions to tree. -+* ip : assumed <= iend-8 . -+* @return : nb of positions added */ -+static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict) -+{ -+ U32 *const hashTable = zc->hashTable; -+ U32 const hashLog = zc->params.cParams.hashLog; -+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls); -+ U32 *const bt = zc->chainTable; -+ U32 const btLog = zc->params.cParams.chainLog - 1; -+ U32 const btMask = (1 << btLog) - 1; -+ U32 matchIndex = hashTable[h]; -+ size_t commonLengthSmaller = 0, commonLengthLarger = 0; -+ const BYTE *const base = zc->base; -+ const BYTE *const dictBase = zc->dictBase; -+ const U32 dictLimit = zc->dictLimit; -+ const BYTE *const dictEnd = dictBase + dictLimit; -+ const BYTE *const prefixStart = base + dictLimit; -+ const BYTE *match; -+ const U32 curr = (U32)(ip - base); -+ const U32 btLow = btMask >= curr ? 0 : curr - btMask; -+ U32 *smallerPtr = bt + 2 * (curr & btMask); -+ U32 *largerPtr = smallerPtr + 1; -+ U32 dummy32; /* to be nullified at the end */ -+ U32 const windowLow = zc->lowLimit; -+ U32 matchEndIdx = curr + 8; -+ size_t bestLength = 8; -+ -+ hashTable[h] = curr; /* Update Hash Table */ -+ -+ while (nbCompares-- && (matchIndex > windowLow)) { -+ U32 *const nextPtr = bt + 2 * (matchIndex & btMask); -+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ -+ -+ if ((!extDict) || (matchIndex + matchLength >= dictLimit)) { -+ match = base + matchIndex; -+ if (match[matchLength] == ip[matchLength]) -+ matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1; -+ } else { -+ match = dictBase + matchIndex; -+ matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); -+ if (matchIndex + matchLength >= dictLimit) -+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ -+ } -+ -+ if (matchLength > bestLength) { -+ bestLength = matchLength; -+ if (matchLength > matchEndIdx - matchIndex) -+ matchEndIdx = matchIndex + (U32)matchLength; -+ } -+ -+ if (ip + matchLength == iend) /* equal : no way to know if inf or sup */ -+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ -+ -+ if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */ -+ /* match is smaller than curr */ -+ *smallerPtr = matchIndex; /* update smaller idx */ -+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ -+ if (matchIndex <= btLow) { -+ smallerPtr = &dummy32; -+ break; -+ } /* beyond tree size, stop the search */ -+ smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */ -+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ -+ } else { -+ /* match is larger than curr */ -+ *largerPtr = matchIndex; -+ commonLengthLarger = matchLength; -+ if (matchIndex <= btLow) { -+ largerPtr = &dummy32; -+ break; -+ } /* beyond tree size, stop the search */ -+ largerPtr = nextPtr; -+ matchIndex = nextPtr[0]; -+ } -+ } -+ -+ *smallerPtr = *largerPtr = 0; -+ if (bestLength > 384) -+ return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ -+ if (matchEndIdx > curr + 8) -+ return matchEndIdx - curr - 8; -+ return 1; -+} -+ -+static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls, -+ U32 extDict) -+{ -+ U32 *const hashTable = zc->hashTable; -+ U32 const hashLog = zc->params.cParams.hashLog; -+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls); -+ U32 *const bt = zc->chainTable; -+ U32 const btLog = zc->params.cParams.chainLog - 1; -+ U32 const btMask = (1 << btLog) - 1; -+ U32 matchIndex = hashTable[h]; -+ size_t commonLengthSmaller = 0, commonLengthLarger = 0; -+ const BYTE *const base = zc->base; -+ const BYTE *const dictBase = zc->dictBase; -+ const U32 dictLimit = zc->dictLimit; -+ const BYTE *const dictEnd = dictBase + dictLimit; -+ const BYTE *const prefixStart = base + dictLimit; -+ const U32 curr = (U32)(ip - base); -+ const U32 btLow = btMask >= curr ? 0 : curr - btMask; -+ const U32 windowLow = zc->lowLimit; -+ U32 *smallerPtr = bt + 2 * (curr & btMask); -+ U32 *largerPtr = bt + 2 * (curr & btMask) + 1; -+ U32 matchEndIdx = curr + 8; -+ U32 dummy32; /* to be nullified at the end */ -+ size_t bestLength = 0; -+ -+ hashTable[h] = curr; /* Update Hash Table */ -+ -+ while (nbCompares-- && (matchIndex > windowLow)) { -+ U32 *const nextPtr = bt + 2 * (matchIndex & btMask); -+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ -+ const BYTE *match; -+ -+ if ((!extDict) || (matchIndex + matchLength >= dictLimit)) { -+ match = base + matchIndex; -+ if (match[matchLength] == ip[matchLength]) -+ matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1; -+ } else { -+ match = dictBase + matchIndex; -+ matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); -+ if (matchIndex + matchLength >= dictLimit) -+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ -+ } -+ -+ if (matchLength > bestLength) { -+ if (matchLength > matchEndIdx - matchIndex) -+ matchEndIdx = matchIndex + (U32)matchLength; -+ if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1))) -+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex; -+ if (ip + matchLength == iend) /* equal : no way to know if inf or sup */ -+ break; /* drop, to guarantee consistency (miss a little bit of compression) */ -+ } -+ -+ if (match[matchLength] < ip[matchLength]) { -+ /* match is smaller than curr */ -+ *smallerPtr = matchIndex; /* update smaller idx */ -+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ -+ if (matchIndex <= btLow) { -+ smallerPtr = &dummy32; -+ break; -+ } /* beyond tree size, stop the search */ -+ smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */ -+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ -+ } else { -+ /* match is larger than curr */ -+ *largerPtr = matchIndex; -+ commonLengthLarger = matchLength; -+ if (matchIndex <= btLow) { -+ largerPtr = &dummy32; -+ break; -+ } /* beyond tree size, stop the search */ -+ largerPtr = nextPtr; -+ matchIndex = nextPtr[0]; -+ } -+ } -+ -+ *smallerPtr = *largerPtr = 0; -+ -+ zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1; -+ return bestLength; -+} -+ -+static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls) -+{ -+ const BYTE *const base = zc->base; -+ const U32 target = (U32)(ip - base); -+ U32 idx = zc->nextToUpdate; -+ -+ while (idx < target) -+ idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0); -+} -+ -+/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ -+static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls) -+{ -+ if (ip < zc->base + zc->nextToUpdate) -+ return 0; /* skipped area */ -+ ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); -+ return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0); -+} -+ -+static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */ -+ const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch) -+{ -+ switch (matchLengthSearch) { -+ default: /* includes case 3 */ -+ case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); -+ case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); -+ case 7: -+ case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); -+ } -+} -+ -+static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls) -+{ -+ const BYTE *const base = zc->base; -+ const U32 target = (U32)(ip - base); -+ U32 idx = zc->nextToUpdate; -+ -+ while (idx < target) -+ idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1); -+} -+ -+/** Tree updater, providing best match */ -+static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, -+ const U32 mls) -+{ -+ if (ip < zc->base + zc->nextToUpdate) -+ return 0; /* skipped area */ -+ ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); -+ return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1); -+} -+ -+static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */ -+ const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, -+ const U32 matchLengthSearch) -+{ -+ switch (matchLengthSearch) { -+ default: /* includes case 3 */ -+ case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); -+ case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); -+ case 7: -+ case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); -+ } -+} -+ -+/* ********************************* -+* Hash Chain -+***********************************/ -+#define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask] -+ -+/* Update chains up to ip (excluded) -+ Assumption : always within prefix (i.e. not within extDict) */ -+FORCE_INLINE -+U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls) -+{ -+ U32 *const hashTable = zc->hashTable; -+ const U32 hashLog = zc->params.cParams.hashLog; -+ U32 *const chainTable = zc->chainTable; -+ const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1; -+ const BYTE *const base = zc->base; -+ const U32 target = (U32)(ip - base); -+ U32 idx = zc->nextToUpdate; -+ -+ while (idx < target) { /* catch up */ -+ size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); -+ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; -+ hashTable[h] = idx; -+ idx++; -+ } -+ -+ zc->nextToUpdate = target; -+ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; -+} -+ -+/* inlining is important to hardwire a hot branch (template emulation) */ -+FORCE_INLINE -+size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */ -+ const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls, -+ const U32 extDict) -+{ -+ U32 *const chainTable = zc->chainTable; -+ const U32 chainSize = (1 << zc->params.cParams.chainLog); -+ const U32 chainMask = chainSize - 1; -+ const BYTE *const base = zc->base; -+ const BYTE *const dictBase = zc->dictBase; -+ const U32 dictLimit = zc->dictLimit; -+ const BYTE *const prefixStart = base + dictLimit; -+ const BYTE *const dictEnd = dictBase + dictLimit; -+ const U32 lowLimit = zc->lowLimit; -+ const U32 curr = (U32)(ip - base); -+ const U32 minChain = curr > chainSize ? curr - chainSize : 0; -+ int nbAttempts = maxNbAttempts; -+ size_t ml = EQUAL_READ32 - 1; -+ -+ /* HC4 match finder */ -+ U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls); -+ -+ for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) { -+ const BYTE *match; -+ size_t currMl = 0; -+ if ((!extDict) || matchIndex >= dictLimit) { -+ match = base + matchIndex; -+ if (match[ml] == ip[ml]) /* potentially better */ -+ currMl = ZSTD_count(ip, match, iLimit); -+ } else { -+ match = dictBase + matchIndex; -+ if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ -+ currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32; -+ } -+ -+ /* save best solution */ -+ if (currMl > ml) { -+ ml = currMl; -+ *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE; -+ if (ip + currMl == iLimit) -+ break; /* best possible, and avoid read overflow*/ -+ } -+ -+ if (matchIndex <= minChain) -+ break; -+ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); -+ } -+ -+ return ml; -+} -+ -+FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, -+ const U32 matchLengthSearch) -+{ -+ switch (matchLengthSearch) { -+ default: /* includes case 3 */ -+ case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0); -+ case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0); -+ case 7: -+ case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0); -+ } -+} -+ -+FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, -+ const U32 matchLengthSearch) -+{ -+ switch (matchLengthSearch) { -+ default: /* includes case 3 */ -+ case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1); -+ case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1); -+ case 7: -+ case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1); -+ } -+} -+ -+/* ******************************* -+* Common parser - lazy strategy -+*********************************/ -+FORCE_INLINE -+void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth) -+{ -+ seqStore_t *seqStorePtr = &(ctx->seqStore); -+ const BYTE *const istart = (const BYTE *)src; -+ const BYTE *ip = istart; -+ const BYTE *anchor = istart; -+ const BYTE *const iend = istart + srcSize; -+ const BYTE *const ilimit = iend - 8; -+ const BYTE *const base = ctx->base + ctx->dictLimit; -+ -+ U32 const maxSearches = 1 << ctx->params.cParams.searchLog; -+ U32 const mls = ctx->params.cParams.searchLength; -+ -+ typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch); -+ searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS; -+ U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0; -+ -+ /* init */ -+ ip += (ip == base); -+ ctx->nextToUpdate3 = ctx->nextToUpdate; -+ { -+ U32 const maxRep = (U32)(ip - base); -+ if (offset_2 > maxRep) -+ savedOffset = offset_2, offset_2 = 0; -+ if (offset_1 > maxRep) -+ savedOffset = offset_1, offset_1 = 0; -+ } -+ -+ /* Match Loop */ -+ while (ip < ilimit) { -+ size_t matchLength = 0; -+ size_t offset = 0; -+ const BYTE *start = ip + 1; -+ -+ /* check repCode */ -+ if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) { -+ /* repcode : we take it */ -+ matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32; -+ if (depth == 0) -+ goto _storeSequence; -+ } -+ -+ /* first search (depth 0) */ -+ { -+ size_t offsetFound = 99999999; -+ size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); -+ if (ml2 > matchLength) -+ matchLength = ml2, start = ip, offset = offsetFound; -+ } -+ -+ if (matchLength < EQUAL_READ32) { -+ ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ -+ continue; -+ } -+ -+ /* let's try to find a better solution */ -+ if (depth >= 1) -+ while (ip < ilimit) { -+ ip++; -+ if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) { -+ size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32; -+ int const gain2 = (int)(mlRep * 3); -+ int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1); -+ if ((mlRep >= EQUAL_READ32) && (gain2 > gain1)) -+ matchLength = mlRep, offset = 0, start = ip; -+ } -+ { -+ size_t offset2 = 99999999; -+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); -+ int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ -+ int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4); -+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { -+ matchLength = ml2, offset = offset2, start = ip; -+ continue; /* search a better one */ -+ } -+ } -+ -+ /* let's find an even better one */ -+ if ((depth == 2) && (ip < ilimit)) { -+ ip++; -+ if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) { -+ size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32; -+ int const gain2 = (int)(ml2 * 4); -+ int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1); -+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) -+ matchLength = ml2, offset = 0, start = ip; -+ } -+ { -+ size_t offset2 = 99999999; -+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); -+ int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ -+ int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7); -+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { -+ matchLength = ml2, offset = offset2, start = ip; -+ continue; -+ } -+ } -+ } -+ break; /* nothing found : store previous solution */ -+ } -+ -+ /* NOTE: -+ * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. -+ * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which -+ * overflows the pointer, which is undefined behavior. -+ */ -+ /* catch up */ -+ if (offset) { -+ while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) && -+ (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */ -+ { -+ start--; -+ matchLength++; -+ } -+ offset_2 = offset_1; -+ offset_1 = (U32)(offset - ZSTD_REP_MOVE); -+ } -+ -+ /* store sequence */ -+_storeSequence: -+ { -+ size_t const litLength = start - anchor; -+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH); -+ anchor = ip = start + matchLength; -+ } -+ -+ /* check immediate repcode */ -+ while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) { -+ /* store sequence */ -+ matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32; -+ offset = offset_2; -+ offset_2 = offset_1; -+ offset_1 = (U32)offset; /* swap repcodes */ -+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH); -+ ip += matchLength; -+ anchor = ip; -+ continue; /* faster when present ... (?) */ -+ } -+ } -+ -+ /* Save reps for next block */ -+ ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset; -+ ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; -+ -+ /* Last Literals */ -+ { -+ size_t const lastLLSize = iend - anchor; -+ memcpy(seqStorePtr->lit, anchor, lastLLSize); -+ seqStorePtr->lit += lastLLSize; -+ } -+} -+ -+static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); } -+ -+static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); } -+ -+static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); } -+ -+static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); } -+ -+FORCE_INLINE -+void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth) -+{ -+ seqStore_t *seqStorePtr = &(ctx->seqStore); -+ const BYTE *const istart = (const BYTE *)src; -+ const BYTE *ip = istart; -+ const BYTE *anchor = istart; -+ const BYTE *const iend = istart + srcSize; -+ const BYTE *const ilimit = iend - 8; -+ const BYTE *const base = ctx->base; -+ const U32 dictLimit = ctx->dictLimit; -+ const U32 lowestIndex = ctx->lowLimit; -+ const BYTE *const prefixStart = base + dictLimit; -+ const BYTE *const dictBase = ctx->dictBase; -+ const BYTE *const dictEnd = dictBase + dictLimit; -+ const BYTE *const dictStart = dictBase + ctx->lowLimit; -+ -+ const U32 maxSearches = 1 << ctx->params.cParams.searchLog; -+ const U32 mls = ctx->params.cParams.searchLength; -+ -+ typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch); -+ searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS; -+ -+ U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; -+ -+ /* init */ -+ ctx->nextToUpdate3 = ctx->nextToUpdate; -+ ip += (ip == prefixStart); -+ -+ /* Match Loop */ -+ while (ip < ilimit) { -+ size_t matchLength = 0; -+ size_t offset = 0; -+ const BYTE *start = ip + 1; -+ U32 curr = (U32)(ip - base); -+ -+ /* check repCode */ -+ { -+ const U32 repIndex = (U32)(curr + 1 - offset_1); -+ const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; -+ const BYTE *const repMatch = repBase + repIndex; -+ if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ -+ if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) { -+ /* repcode detected we should take it */ -+ const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; -+ matchLength = -+ ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; -+ if (depth == 0) -+ goto _storeSequence; -+ } -+ } -+ -+ /* first search (depth 0) */ -+ { -+ size_t offsetFound = 99999999; -+ size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); -+ if (ml2 > matchLength) -+ matchLength = ml2, start = ip, offset = offsetFound; -+ } -+ -+ if (matchLength < EQUAL_READ32) { -+ ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ -+ continue; -+ } -+ -+ /* let's try to find a better solution */ -+ if (depth >= 1) -+ while (ip < ilimit) { -+ ip++; -+ curr++; -+ /* check repCode */ -+ if (offset) { -+ const U32 repIndex = (U32)(curr - offset_1); -+ const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; -+ const BYTE *const repMatch = repBase + repIndex; -+ if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ -+ if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) { -+ /* repcode detected */ -+ const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; -+ size_t const repLength = -+ ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + -+ EQUAL_READ32; -+ int const gain2 = (int)(repLength * 3); -+ int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1); -+ if ((repLength >= EQUAL_READ32) && (gain2 > gain1)) -+ matchLength = repLength, offset = 0, start = ip; -+ } -+ } -+ -+ /* search match, depth 1 */ -+ { -+ size_t offset2 = 99999999; -+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); -+ int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ -+ int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4); -+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { -+ matchLength = ml2, offset = offset2, start = ip; -+ continue; /* search a better one */ -+ } -+ } -+ -+ /* let's find an even better one */ -+ if ((depth == 2) && (ip < ilimit)) { -+ ip++; -+ curr++; -+ /* check repCode */ -+ if (offset) { -+ const U32 repIndex = (U32)(curr - offset_1); -+ const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; -+ const BYTE *const repMatch = repBase + repIndex; -+ if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ -+ if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) { -+ /* repcode detected */ -+ const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; -+ size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, -+ repEnd, prefixStart) + -+ EQUAL_READ32; -+ int gain2 = (int)(repLength * 4); -+ int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1); -+ if ((repLength >= EQUAL_READ32) && (gain2 > gain1)) -+ matchLength = repLength, offset = 0, start = ip; -+ } -+ } -+ -+ /* search match, depth 2 */ -+ { -+ size_t offset2 = 99999999; -+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); -+ int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ -+ int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7); -+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { -+ matchLength = ml2, offset = offset2, start = ip; -+ continue; -+ } -+ } -+ } -+ break; /* nothing found : store previous solution */ -+ } -+ -+ /* catch up */ -+ if (offset) { -+ U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE)); -+ const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; -+ const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; -+ while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) { -+ start--; -+ match--; -+ matchLength++; -+ } /* catch up */ -+ offset_2 = offset_1; -+ offset_1 = (U32)(offset - ZSTD_REP_MOVE); -+ } -+ -+ /* store sequence */ -+ _storeSequence : { -+ size_t const litLength = start - anchor; -+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH); -+ anchor = ip = start + matchLength; -+ } -+ -+ /* check immediate repcode */ -+ while (ip <= ilimit) { -+ const U32 repIndex = (U32)((ip - base) - offset_2); -+ const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; -+ const BYTE *const repMatch = repBase + repIndex; -+ if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ -+ if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) { -+ /* repcode detected we should take it */ -+ const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; -+ matchLength = -+ ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; -+ offset = offset_2; -+ offset_2 = offset_1; -+ offset_1 = (U32)offset; /* swap offset history */ -+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH); -+ ip += matchLength; -+ anchor = ip; -+ continue; /* faster when present ... (?) */ -+ } -+ break; -+ } -+ } -+ -+ /* Save reps for next block */ -+ ctx->repToConfirm[0] = offset_1; -+ ctx->repToConfirm[1] = offset_2; -+ -+ /* Last Literals */ -+ { -+ size_t const lastLLSize = iend - anchor; -+ memcpy(seqStorePtr->lit, anchor, lastLLSize); -+ seqStorePtr->lit += lastLLSize; -+ } -+} -+ -+void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); } -+ -+static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1); -+} -+ -+static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2); -+} -+ -+static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2); -+} -+ -+/* The optimal parser */ -+#include "zstd_opt.h" -+ -+static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+#ifdef ZSTD_OPT_H_91842398743 -+ ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0); -+#else -+ (void)ctx; -+ (void)src; -+ (void)srcSize; -+ return; -+#endif -+} -+ -+static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+#ifdef ZSTD_OPT_H_91842398743 -+ ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1); -+#else -+ (void)ctx; -+ (void)src; -+ (void)srcSize; -+ return; -+#endif -+} -+ -+static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+#ifdef ZSTD_OPT_H_91842398743 -+ ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0); -+#else -+ (void)ctx; -+ (void)src; -+ (void)srcSize; -+ return; -+#endif -+} -+ -+static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -+{ -+#ifdef ZSTD_OPT_H_91842398743 -+ ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1); -+#else -+ (void)ctx; -+ (void)src; -+ (void)srcSize; -+ return; -+#endif -+} -+ -+typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize); -+ -+static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict) -+{ -+ static const ZSTD_blockCompressor blockCompressor[2][8] = { -+ {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, -+ ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2}, -+ {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict, -+ ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}}; -+ -+ return blockCompressor[extDict][(U32)strat]; -+} -+ -+static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit); -+ const BYTE *const base = zc->base; -+ const BYTE *const istart = (const BYTE *)src; -+ const U32 curr = (U32)(istart - base); -+ if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1) -+ return 0; /* don't even attempt compression below a certain srcSize */ -+ ZSTD_resetSeqStore(&(zc->seqStore)); -+ if (curr > zc->nextToUpdate + 384) -+ zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */ -+ blockCompressor(zc, src, srcSize); -+ return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize); -+} -+ -+/*! ZSTD_compress_generic() : -+* Compress a chunk of data into one or multiple blocks. -+* All blocks will be terminated, all input will be consumed. -+* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. -+* Frame is supposed already started (header already produced) -+* @return : compressed size, or an error code -+*/ -+static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk) -+{ -+ size_t blockSize = cctx->blockSize; -+ size_t remaining = srcSize; -+ const BYTE *ip = (const BYTE *)src; -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *op = ostart; -+ U32 const maxDist = 1 << cctx->params.cParams.windowLog; -+ -+ if (cctx->params.fParams.checksumFlag && srcSize) -+ xxh64_update(&cctx->xxhState, src, srcSize); -+ -+ while (remaining) { -+ U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); -+ size_t cSize; -+ -+ if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) -+ return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */ -+ if (remaining < blockSize) -+ blockSize = remaining; -+ -+ /* preemptive overflow correction */ -+ if (cctx->lowLimit > (3U << 29)) { -+ U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1; -+ U32 const curr = (U32)(ip - cctx->base); -+ U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog); -+ U32 const correction = curr - newCurr; -+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30); -+ ZSTD_reduceIndex(cctx, correction); -+ cctx->base += correction; -+ cctx->dictBase += correction; -+ cctx->lowLimit -= correction; -+ cctx->dictLimit -= correction; -+ if (cctx->nextToUpdate < correction) -+ cctx->nextToUpdate = 0; -+ else -+ cctx->nextToUpdate -= correction; -+ } -+ -+ if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) { -+ /* enforce maxDist */ -+ U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist; -+ if (cctx->lowLimit < newLowLimit) -+ cctx->lowLimit = newLowLimit; -+ if (cctx->dictLimit < cctx->lowLimit) -+ cctx->dictLimit = cctx->lowLimit; -+ } -+ -+ cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize); -+ if (ZSTD_isError(cSize)) -+ return cSize; -+ -+ if (cSize == 0) { /* block is not compressible */ -+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3); -+ if (blockSize + ZSTD_blockHeaderSize > dstCapacity) -+ return ERROR(dstSize_tooSmall); -+ ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */ -+ memcpy(op + ZSTD_blockHeaderSize, ip, blockSize); -+ cSize = ZSTD_blockHeaderSize + blockSize; -+ } else { -+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3); -+ ZSTD_writeLE24(op, cBlockHeader24); -+ cSize += ZSTD_blockHeaderSize; -+ } -+ -+ remaining -= blockSize; -+ dstCapacity -= cSize; -+ ip += blockSize; -+ op += cSize; -+ } -+ -+ if (lastFrameChunk && (op > ostart)) -+ cctx->stage = ZSTDcs_ending; -+ return op - ostart; -+} -+ -+static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID) -+{ -+ BYTE *const op = (BYTE *)dst; -+ U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */ -+ U32 const checksumFlag = params.fParams.checksumFlag > 0; -+ U32 const windowSize = 1U << params.cParams.windowLog; -+ U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); -+ BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); -+ U32 const fcsCode = -+ params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */ -+ BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6)); -+ size_t pos; -+ -+ if (dstCapacity < ZSTD_frameHeaderSize_max) -+ return ERROR(dstSize_tooSmall); -+ -+ ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER); -+ op[4] = frameHeaderDescriptionByte; -+ pos = 5; -+ if (!singleSegment) -+ op[pos++] = windowLogByte; -+ switch (dictIDSizeCode) { -+ default: /* impossible */ -+ case 0: break; -+ case 1: -+ op[pos] = (BYTE)(dictID); -+ pos++; -+ break; -+ case 2: -+ ZSTD_writeLE16(op + pos, (U16)dictID); -+ pos += 2; -+ break; -+ case 3: -+ ZSTD_writeLE32(op + pos, dictID); -+ pos += 4; -+ break; -+ } -+ switch (fcsCode) { -+ default: /* impossible */ -+ case 0: -+ if (singleSegment) -+ op[pos++] = (BYTE)(pledgedSrcSize); -+ break; -+ case 1: -+ ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256)); -+ pos += 2; -+ break; -+ case 2: -+ ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize)); -+ pos += 4; -+ break; -+ case 3: -+ ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize)); -+ pos += 8; -+ break; -+ } -+ return pos; -+} -+ -+static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk) -+{ -+ const BYTE *const ip = (const BYTE *)src; -+ size_t fhSize = 0; -+ -+ if (cctx->stage == ZSTDcs_created) -+ return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ -+ -+ if (frame && (cctx->stage == ZSTDcs_init)) { -+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID); -+ if (ZSTD_isError(fhSize)) -+ return fhSize; -+ dstCapacity -= fhSize; -+ dst = (char *)dst + fhSize; -+ cctx->stage = ZSTDcs_ongoing; -+ } -+ -+ /* Check if blocks follow each other */ -+ if (src != cctx->nextSrc) { -+ /* not contiguous */ -+ ptrdiff_t const delta = cctx->nextSrc - ip; -+ cctx->lowLimit = cctx->dictLimit; -+ cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base); -+ cctx->dictBase = cctx->base; -+ cctx->base -= delta; -+ cctx->nextToUpdate = cctx->dictLimit; -+ if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) -+ cctx->lowLimit = cctx->dictLimit; /* too small extDict */ -+ } -+ -+ /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ -+ if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) { -+ ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase; -+ U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx; -+ cctx->lowLimit = lowLimitMax; -+ } -+ -+ cctx->nextSrc = ip + srcSize; -+ -+ if (srcSize) { -+ size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) -+ : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize); -+ if (ZSTD_isError(cSize)) -+ return cSize; -+ return cSize + fhSize; -+ } else -+ return fhSize; -+} -+ -+size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0); -+} -+ -+size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); } -+ -+size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx); -+ if (srcSize > blockSizeMax) -+ return ERROR(srcSize_wrong); -+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0); -+} -+ -+/*! ZSTD_loadDictionaryContent() : -+ * @return : 0, or an error code -+ */ -+static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize) -+{ -+ const BYTE *const ip = (const BYTE *)src; -+ const BYTE *const iend = ip + srcSize; -+ -+ /* input becomes curr prefix */ -+ zc->lowLimit = zc->dictLimit; -+ zc->dictLimit = (U32)(zc->nextSrc - zc->base); -+ zc->dictBase = zc->base; -+ zc->base += ip - zc->nextSrc; -+ zc->nextToUpdate = zc->dictLimit; -+ zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base); -+ -+ zc->nextSrc = iend; -+ if (srcSize <= HASH_READ_SIZE) -+ return 0; -+ -+ switch (zc->params.cParams.strategy) { -+ case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break; -+ -+ case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break; -+ -+ case ZSTD_greedy: -+ case ZSTD_lazy: -+ case ZSTD_lazy2: -+ if (srcSize >= HASH_READ_SIZE) -+ ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength); -+ break; -+ -+ case ZSTD_btlazy2: -+ case ZSTD_btopt: -+ case ZSTD_btopt2: -+ if (srcSize >= HASH_READ_SIZE) -+ ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength); -+ break; -+ -+ default: -+ return ERROR(GENERIC); /* strategy doesn't exist; impossible */ -+ } -+ -+ zc->nextToUpdate = (U32)(iend - zc->base); -+ return 0; -+} -+ -+/* Dictionaries that assign zero probability to symbols that show up causes problems -+ when FSE encoding. Refuse dictionaries that assign zero probability to symbols -+ that we may encounter during compression. -+ NOTE: This behavior is not standard and could be improved in the future. */ -+static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) -+{ -+ U32 s; -+ if (dictMaxSymbolValue < maxSymbolValue) -+ return ERROR(dictionary_corrupted); -+ for (s = 0; s <= maxSymbolValue; ++s) { -+ if (normalizedCounter[s] == 0) -+ return ERROR(dictionary_corrupted); -+ } -+ return 0; -+} -+ -+/* Dictionary format : -+ * See : -+ * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -+ */ -+/*! ZSTD_loadZstdDictionary() : -+ * @return : 0, or an error code -+ * assumptions : magic number supposed already checked -+ * dictSize supposed > 8 -+ */ -+static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize) -+{ -+ const BYTE *dictPtr = (const BYTE *)dict; -+ const BYTE *const dictEnd = dictPtr + dictSize; -+ short offcodeNCount[MaxOff + 1]; -+ unsigned offcodeMaxValue = MaxOff; -+ -+ dictPtr += 4; /* skip magic number */ -+ cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr); -+ dictPtr += 4; -+ -+ { -+ size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters)); -+ if (HUF_isError(hufHeaderSize)) -+ return ERROR(dictionary_corrupted); -+ dictPtr += hufHeaderSize; -+ } -+ -+ { -+ unsigned offcodeLog; -+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr); -+ if (FSE_isError(offcodeHeaderSize)) -+ return ERROR(dictionary_corrupted); -+ if (offcodeLog > OffFSELog) -+ return ERROR(dictionary_corrupted); -+ /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ -+ CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)), -+ dictionary_corrupted); -+ dictPtr += offcodeHeaderSize; -+ } -+ -+ { -+ short matchlengthNCount[MaxML + 1]; -+ unsigned matchlengthMaxValue = MaxML, matchlengthLog; -+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr); -+ if (FSE_isError(matchlengthHeaderSize)) -+ return ERROR(dictionary_corrupted); -+ if (matchlengthLog > MLFSELog) -+ return ERROR(dictionary_corrupted); -+ /* Every match length code must have non-zero probability */ -+ CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML)); -+ CHECK_E( -+ FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)), -+ dictionary_corrupted); -+ dictPtr += matchlengthHeaderSize; -+ } -+ -+ { -+ short litlengthNCount[MaxLL + 1]; -+ unsigned litlengthMaxValue = MaxLL, litlengthLog; -+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr); -+ if (FSE_isError(litlengthHeaderSize)) -+ return ERROR(dictionary_corrupted); -+ if (litlengthLog > LLFSELog) -+ return ERROR(dictionary_corrupted); -+ /* Every literal length code must have non-zero probability */ -+ CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL)); -+ CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)), -+ dictionary_corrupted); -+ dictPtr += litlengthHeaderSize; -+ } -+ -+ if (dictPtr + 12 > dictEnd) -+ return ERROR(dictionary_corrupted); -+ cctx->rep[0] = ZSTD_readLE32(dictPtr + 0); -+ cctx->rep[1] = ZSTD_readLE32(dictPtr + 4); -+ cctx->rep[2] = ZSTD_readLE32(dictPtr + 8); -+ dictPtr += 12; -+ -+ { -+ size_t const dictContentSize = (size_t)(dictEnd - dictPtr); -+ U32 offcodeMax = MaxOff; -+ if (dictContentSize <= ((U32)-1) - 128 KB) { -+ U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ -+ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ -+ } -+ /* All offset values <= dictContentSize + 128 KB must be representable */ -+ CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); -+ /* All repCodes must be <= dictContentSize and != 0*/ -+ { -+ U32 u; -+ for (u = 0; u < 3; u++) { -+ if (cctx->rep[u] == 0) -+ return ERROR(dictionary_corrupted); -+ if (cctx->rep[u] > dictContentSize) -+ return ERROR(dictionary_corrupted); -+ } -+ } -+ -+ cctx->flagStaticTables = 1; -+ cctx->flagStaticHufTable = HUF_repeat_valid; -+ return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize); -+ } -+} -+ -+/** ZSTD_compress_insertDictionary() : -+* @return : 0, or an error code */ -+static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize) -+{ -+ if ((dict == NULL) || (dictSize <= 8)) -+ return 0; -+ -+ /* dict as pure content */ -+ if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict)) -+ return ZSTD_loadDictionaryContent(cctx, dict, dictSize); -+ -+ /* dict as zstd dictionary */ -+ return ZSTD_loadZstdDictionary(cctx, dict, dictSize); -+} -+ -+/*! ZSTD_compressBegin_internal() : -+* @return : 0, or an error code */ -+static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize) -+{ -+ ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue; -+ CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp)); -+ return ZSTD_compress_insertDictionary(cctx, dict, dictSize); -+} -+ -+/*! ZSTD_compressBegin_advanced() : -+* @return : 0, or an error code */ -+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) -+{ -+ /* compression parameters verification and optimization */ -+ CHECK_F(ZSTD_checkCParams(params.cParams)); -+ return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize); -+} -+ -+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel) -+{ -+ ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize); -+ return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0); -+} -+ -+size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); } -+ -+/*! ZSTD_writeEpilogue() : -+* Ends a frame. -+* @return : nb of bytes written into dst (or an error code) */ -+static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity) -+{ -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *op = ostart; -+ size_t fhSize = 0; -+ -+ if (cctx->stage == ZSTDcs_created) -+ return ERROR(stage_wrong); /* init missing */ -+ -+ /* special case : empty frame */ -+ if (cctx->stage == ZSTDcs_init) { -+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0); -+ if (ZSTD_isError(fhSize)) -+ return fhSize; -+ dstCapacity -= fhSize; -+ op += fhSize; -+ cctx->stage = ZSTDcs_ongoing; -+ } -+ -+ if (cctx->stage != ZSTDcs_ending) { -+ /* write one last empty block, make it the "last" block */ -+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0; -+ if (dstCapacity < 4) -+ return ERROR(dstSize_tooSmall); -+ ZSTD_writeLE32(op, cBlockHeader24); -+ op += ZSTD_blockHeaderSize; -+ dstCapacity -= ZSTD_blockHeaderSize; -+ } -+ -+ if (cctx->params.fParams.checksumFlag) { -+ U32 const checksum = (U32)xxh64_digest(&cctx->xxhState); -+ if (dstCapacity < 4) -+ return ERROR(dstSize_tooSmall); -+ ZSTD_writeLE32(op, checksum); -+ op += 4; -+ } -+ -+ cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ -+ return op - ostart; -+} -+ -+size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ size_t endResult; -+ size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1); -+ if (ZSTD_isError(cSize)) -+ return cSize; -+ endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize); -+ if (ZSTD_isError(endResult)) -+ return endResult; -+ return cSize + endResult; -+} -+ -+static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, -+ ZSTD_parameters params) -+{ -+ CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize)); -+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); -+} -+ -+size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, -+ ZSTD_parameters params) -+{ -+ return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); -+} -+ -+size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params) -+{ -+ return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params); -+} -+ -+/* ===== Dictionary API ===== */ -+ -+struct ZSTD_CDict_s { -+ void *dictBuffer; -+ const void *dictContent; -+ size_t dictContentSize; -+ ZSTD_CCtx *refContext; -+}; /* typedef'd tp ZSTD_CDict within "zstd.h" */ -+ -+size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); } -+ -+static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem) -+{ -+ if (!customMem.customAlloc || !customMem.customFree) -+ return NULL; -+ -+ { -+ ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem); -+ ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem); -+ -+ if (!cdict || !cctx) { -+ ZSTD_free(cdict, customMem); -+ ZSTD_freeCCtx(cctx); -+ return NULL; -+ } -+ -+ if ((byReference) || (!dictBuffer) || (!dictSize)) { -+ cdict->dictBuffer = NULL; -+ cdict->dictContent = dictBuffer; -+ } else { -+ void *const internalBuffer = ZSTD_malloc(dictSize, customMem); -+ if (!internalBuffer) { -+ ZSTD_free(cctx, customMem); -+ ZSTD_free(cdict, customMem); -+ return NULL; -+ } -+ memcpy(internalBuffer, dictBuffer, dictSize); -+ cdict->dictBuffer = internalBuffer; -+ cdict->dictContent = internalBuffer; -+ } -+ -+ { -+ size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); -+ if (ZSTD_isError(errorCode)) { -+ ZSTD_free(cdict->dictBuffer, customMem); -+ ZSTD_free(cdict, customMem); -+ ZSTD_freeCCtx(cctx); -+ return NULL; -+ } -+ } -+ -+ cdict->refContext = cctx; -+ cdict->dictContentSize = dictSize; -+ return cdict; -+ } -+} -+ -+ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize) -+{ -+ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); -+ return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem); -+} -+ -+size_t ZSTD_freeCDict(ZSTD_CDict *cdict) -+{ -+ if (cdict == NULL) -+ return 0; /* support free on NULL */ -+ { -+ ZSTD_customMem const cMem = cdict->refContext->customMem; -+ ZSTD_freeCCtx(cdict->refContext); -+ ZSTD_free(cdict->dictBuffer, cMem); -+ ZSTD_free(cdict, cMem); -+ return 0; -+ } -+} -+ -+static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); } -+ -+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize) -+{ -+ if (cdict->dictContentSize) -+ CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize)) -+ else { -+ ZSTD_parameters params = cdict->refContext->params; -+ params.fParams.contentSizeFlag = (pledgedSrcSize > 0); -+ CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize)); -+ } -+ return 0; -+} -+ -+/*! ZSTD_compress_usingCDict() : -+* Compression using a digested Dictionary. -+* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. -+* Note that compression level is decided during dictionary creation */ -+size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict) -+{ -+ CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize)); -+ -+ if (cdict->refContext->params.fParams.contentSizeFlag == 1) { -+ cctx->params.fParams.contentSizeFlag = 1; -+ cctx->frameContentSize = srcSize; -+ } else { -+ cctx->params.fParams.contentSizeFlag = 0; -+ } -+ -+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); -+} -+ -+/* ****************************************************************** -+* Streaming -+********************************************************************/ -+ -+typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage; -+ -+struct ZSTD_CStream_s { -+ ZSTD_CCtx *cctx; -+ ZSTD_CDict *cdictLocal; -+ const ZSTD_CDict *cdict; -+ char *inBuff; -+ size_t inBuffSize; -+ size_t inToCompress; -+ size_t inBuffPos; -+ size_t inBuffTarget; -+ size_t blockSize; -+ char *outBuff; -+ size_t outBuffSize; -+ size_t outBuffContentSize; -+ size_t outBuffFlushedSize; -+ ZSTD_cStreamStage stage; -+ U32 checksum; -+ U32 frameEnded; -+ U64 pledgedSrcSize; -+ U64 inputProcessed; -+ ZSTD_parameters params; -+ ZSTD_customMem customMem; -+}; /* typedef'd to ZSTD_CStream within "zstd.h" */ -+ -+size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams) -+{ -+ size_t const inBuffSize = (size_t)1 << cParams.windowLog; -+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize); -+ size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; -+ -+ return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize); -+} -+ -+ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem) -+{ -+ ZSTD_CStream *zcs; -+ -+ if (!customMem.customAlloc || !customMem.customFree) -+ return NULL; -+ -+ zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem); -+ if (zcs == NULL) -+ return NULL; -+ memset(zcs, 0, sizeof(ZSTD_CStream)); -+ memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem)); -+ zcs->cctx = ZSTD_createCCtx_advanced(customMem); -+ if (zcs->cctx == NULL) { -+ ZSTD_freeCStream(zcs); -+ return NULL; -+ } -+ return zcs; -+} -+ -+size_t ZSTD_freeCStream(ZSTD_CStream *zcs) -+{ -+ if (zcs == NULL) -+ return 0; /* support free on NULL */ -+ { -+ ZSTD_customMem const cMem = zcs->customMem; -+ ZSTD_freeCCtx(zcs->cctx); -+ zcs->cctx = NULL; -+ ZSTD_freeCDict(zcs->cdictLocal); -+ zcs->cdictLocal = NULL; -+ ZSTD_free(zcs->inBuff, cMem); -+ zcs->inBuff = NULL; -+ ZSTD_free(zcs->outBuff, cMem); -+ zcs->outBuff = NULL; -+ ZSTD_free(zcs, cMem); -+ return 0; -+ } -+} -+ -+/*====== Initialization ======*/ -+ -+size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } -+size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; } -+ -+static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize) -+{ -+ if (zcs->inBuffSize == 0) -+ return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */ -+ -+ if (zcs->cdict) -+ CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize)) -+ else -+ CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize)); -+ -+ zcs->inToCompress = 0; -+ zcs->inBuffPos = 0; -+ zcs->inBuffTarget = zcs->blockSize; -+ zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; -+ zcs->stage = zcss_load; -+ zcs->frameEnded = 0; -+ zcs->pledgedSrcSize = pledgedSrcSize; -+ zcs->inputProcessed = 0; -+ return 0; /* ready to go */ -+} -+ -+size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize) -+{ -+ -+ zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0); -+ -+ return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); -+} -+ -+static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) -+{ -+ /* allocate buffers */ -+ { -+ size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog; -+ if (zcs->inBuffSize < neededInBuffSize) { -+ zcs->inBuffSize = neededInBuffSize; -+ ZSTD_free(zcs->inBuff, zcs->customMem); -+ zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem); -+ if (zcs->inBuff == NULL) -+ return ERROR(memory_allocation); -+ } -+ zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize); -+ } -+ if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) { -+ zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1; -+ ZSTD_free(zcs->outBuff, zcs->customMem); -+ zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem); -+ if (zcs->outBuff == NULL) -+ return ERROR(memory_allocation); -+ } -+ -+ if (dict && dictSize >= 8) { -+ ZSTD_freeCDict(zcs->cdictLocal); -+ zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem); -+ if (zcs->cdictLocal == NULL) -+ return ERROR(memory_allocation); -+ zcs->cdict = zcs->cdictLocal; -+ } else -+ zcs->cdict = NULL; -+ -+ zcs->checksum = params.fParams.checksumFlag > 0; -+ zcs->params = params; -+ -+ return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); -+} -+ -+ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize) -+{ -+ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); -+ ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem); -+ if (zcs) { -+ size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize); -+ if (ZSTD_isError(code)) { -+ return NULL; -+ } -+ } -+ return zcs; -+} -+ -+ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize) -+{ -+ ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict); -+ ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize); -+ if (zcs) { -+ zcs->cdict = cdict; -+ if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) { -+ return NULL; -+ } -+ } -+ return zcs; -+} -+ -+/*====== Compression ======*/ -+ -+typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e; -+ -+ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ size_t const length = MIN(dstCapacity, srcSize); -+ memcpy(dst, src, length); -+ return length; -+} -+ -+static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush) -+{ -+ U32 someMoreWork = 1; -+ const char *const istart = (const char *)src; -+ const char *const iend = istart + *srcSizePtr; -+ const char *ip = istart; -+ char *const ostart = (char *)dst; -+ char *const oend = ostart + *dstCapacityPtr; -+ char *op = ostart; -+ -+ while (someMoreWork) { -+ switch (zcs->stage) { -+ case zcss_init: -+ return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */ -+ -+ case zcss_load: -+ /* complete inBuffer */ -+ { -+ size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; -+ size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip); -+ zcs->inBuffPos += loaded; -+ ip += loaded; -+ if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) { -+ someMoreWork = 0; -+ break; /* not enough input to get a full block : stop there, wait for more */ -+ } -+ } -+ /* compress curr block (note : this stage cannot be stopped in the middle) */ -+ { -+ void *cDst; -+ size_t cSize; -+ size_t const iSize = zcs->inBuffPos - zcs->inToCompress; -+ size_t oSize = oend - op; -+ if (oSize >= ZSTD_compressBound(iSize)) -+ cDst = op; /* compress directly into output buffer (avoid flush stage) */ -+ else -+ cDst = zcs->outBuff, oSize = zcs->outBuffSize; -+ cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) -+ : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); -+ if (ZSTD_isError(cSize)) -+ return cSize; -+ if (flush == zsf_end) -+ zcs->frameEnded = 1; -+ /* prepare next block */ -+ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; -+ if (zcs->inBuffTarget > zcs->inBuffSize) -+ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */ -+ zcs->inToCompress = zcs->inBuffPos; -+ if (cDst == op) { -+ op += cSize; -+ break; -+ } /* no need to flush */ -+ zcs->outBuffContentSize = cSize; -+ zcs->outBuffFlushedSize = 0; -+ zcs->stage = zcss_flush; /* pass-through to flush stage */ -+ } -+ -+ case zcss_flush: { -+ size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; -+ size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); -+ op += flushed; -+ zcs->outBuffFlushedSize += flushed; -+ if (toFlush != flushed) { -+ someMoreWork = 0; -+ break; -+ } /* dst too small to store flushed data : stop there */ -+ zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; -+ zcs->stage = zcss_load; -+ break; -+ } -+ -+ case zcss_final: -+ someMoreWork = 0; /* do nothing */ -+ break; -+ -+ default: -+ return ERROR(GENERIC); /* impossible */ -+ } -+ } -+ -+ *srcSizePtr = ip - istart; -+ *dstCapacityPtr = op - ostart; -+ zcs->inputProcessed += *srcSizePtr; -+ if (zcs->frameEnded) -+ return 0; -+ { -+ size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; -+ if (hintInSize == 0) -+ hintInSize = zcs->blockSize; -+ return hintInSize; -+ } -+} -+ -+size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input) -+{ -+ size_t sizeRead = input->size - input->pos; -+ size_t sizeWritten = output->size - output->pos; -+ size_t const result = -+ ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather); -+ input->pos += sizeRead; -+ output->pos += sizeWritten; -+ return result; -+} -+ -+/*====== Finalize ======*/ -+ -+/*! ZSTD_flushStream() : -+* @return : amount of data remaining to flush */ -+size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output) -+{ -+ size_t srcSize = 0; -+ size_t sizeWritten = output->size - output->pos; -+ size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize, -+ &srcSize, /* use a valid src address instead of NULL */ -+ zsf_flush); -+ output->pos += sizeWritten; -+ if (ZSTD_isError(result)) -+ return result; -+ return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */ -+} -+ -+size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output) -+{ -+ BYTE *const ostart = (BYTE *)(output->dst) + output->pos; -+ BYTE *const oend = (BYTE *)(output->dst) + output->size; -+ BYTE *op = ostart; -+ -+ if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize)) -+ return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */ -+ -+ if (zcs->stage != zcss_final) { -+ /* flush whatever remains */ -+ size_t srcSize = 0; -+ size_t sizeWritten = output->size - output->pos; -+ size_t const notEnded = -+ ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */ -+ size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; -+ op += sizeWritten; -+ if (remainingToFlush) { -+ output->pos += sizeWritten; -+ return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4); -+ } -+ /* create epilogue */ -+ zcs->stage = zcss_final; -+ zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL, -+ 0); /* write epilogue, including final empty block, into outBuff */ -+ } -+ -+ /* flush epilogue */ -+ { -+ size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; -+ size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); -+ op += flushed; -+ zcs->outBuffFlushedSize += flushed; -+ output->pos += op - ostart; -+ if (toFlush == flushed) -+ zcs->stage = zcss_init; /* end reached */ -+ return toFlush - flushed; -+ } -+} -+ -+/*-===== Pre-defined compression levels =====-*/ -+ -+#define ZSTD_DEFAULT_CLEVEL 1 -+#define ZSTD_MAX_CLEVEL 22 -+int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } -+ -+static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = { -+ { -+ /* "default" */ -+ /* W, C, H, S, L, TL, strat */ -+ {18, 12, 12, 1, 7, 16, ZSTD_fast}, /* level 0 - never used */ -+ {19, 13, 14, 1, 7, 16, ZSTD_fast}, /* level 1 */ -+ {19, 15, 16, 1, 6, 16, ZSTD_fast}, /* level 2 */ -+ {20, 16, 17, 1, 5, 16, ZSTD_dfast}, /* level 3.*/ -+ {20, 18, 18, 1, 5, 16, ZSTD_dfast}, /* level 4.*/ -+ {20, 15, 18, 3, 5, 16, ZSTD_greedy}, /* level 5 */ -+ {21, 16, 19, 2, 5, 16, ZSTD_lazy}, /* level 6 */ -+ {21, 17, 20, 3, 5, 16, ZSTD_lazy}, /* level 7 */ -+ {21, 18, 20, 3, 5, 16, ZSTD_lazy2}, /* level 8 */ -+ {21, 20, 20, 3, 5, 16, ZSTD_lazy2}, /* level 9 */ -+ {21, 19, 21, 4, 5, 16, ZSTD_lazy2}, /* level 10 */ -+ {22, 20, 22, 4, 5, 16, ZSTD_lazy2}, /* level 11 */ -+ {22, 20, 22, 5, 5, 16, ZSTD_lazy2}, /* level 12 */ -+ {22, 21, 22, 5, 5, 16, ZSTD_lazy2}, /* level 13 */ -+ {22, 21, 22, 6, 5, 16, ZSTD_lazy2}, /* level 14 */ -+ {22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */ -+ {23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */ -+ {23, 21, 22, 4, 5, 24, ZSTD_btopt}, /* level 17 */ -+ {23, 23, 22, 6, 5, 32, ZSTD_btopt}, /* level 18 */ -+ {23, 23, 22, 6, 3, 48, ZSTD_btopt}, /* level 19 */ -+ {25, 25, 23, 7, 3, 64, ZSTD_btopt2}, /* level 20 */ -+ {26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */ -+ {27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */ -+ }, -+ { -+ /* for srcSize <= 256 KB */ -+ /* W, C, H, S, L, T, strat */ -+ {0, 0, 0, 0, 0, 0, ZSTD_fast}, /* level 0 - not used */ -+ {18, 13, 14, 1, 6, 8, ZSTD_fast}, /* level 1 */ -+ {18, 14, 13, 1, 5, 8, ZSTD_dfast}, /* level 2 */ -+ {18, 16, 15, 1, 5, 8, ZSTD_dfast}, /* level 3 */ -+ {18, 15, 17, 1, 5, 8, ZSTD_greedy}, /* level 4.*/ -+ {18, 16, 17, 4, 5, 8, ZSTD_greedy}, /* level 5.*/ -+ {18, 16, 17, 3, 5, 8, ZSTD_lazy}, /* level 6.*/ -+ {18, 17, 17, 4, 4, 8, ZSTD_lazy}, /* level 7 */ -+ {18, 17, 17, 4, 4, 8, ZSTD_lazy2}, /* level 8 */ -+ {18, 17, 17, 5, 4, 8, ZSTD_lazy2}, /* level 9 */ -+ {18, 17, 17, 6, 4, 8, ZSTD_lazy2}, /* level 10 */ -+ {18, 18, 17, 6, 4, 8, ZSTD_lazy2}, /* level 11.*/ -+ {18, 18, 17, 7, 4, 8, ZSTD_lazy2}, /* level 12.*/ -+ {18, 19, 17, 6, 4, 8, ZSTD_btlazy2}, /* level 13 */ -+ {18, 18, 18, 4, 4, 16, ZSTD_btopt}, /* level 14.*/ -+ {18, 18, 18, 4, 3, 16, ZSTD_btopt}, /* level 15.*/ -+ {18, 19, 18, 6, 3, 32, ZSTD_btopt}, /* level 16.*/ -+ {18, 19, 18, 8, 3, 64, ZSTD_btopt}, /* level 17.*/ -+ {18, 19, 18, 9, 3, 128, ZSTD_btopt}, /* level 18.*/ -+ {18, 19, 18, 10, 3, 256, ZSTD_btopt}, /* level 19.*/ -+ {18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/ -+ {18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/ -+ {18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/ -+ }, -+ { -+ /* for srcSize <= 128 KB */ -+ /* W, C, H, S, L, T, strat */ -+ {17, 12, 12, 1, 7, 8, ZSTD_fast}, /* level 0 - not used */ -+ {17, 12, 13, 1, 6, 8, ZSTD_fast}, /* level 1 */ -+ {17, 13, 16, 1, 5, 8, ZSTD_fast}, /* level 2 */ -+ {17, 16, 16, 2, 5, 8, ZSTD_dfast}, /* level 3 */ -+ {17, 13, 15, 3, 4, 8, ZSTD_greedy}, /* level 4 */ -+ {17, 15, 17, 4, 4, 8, ZSTD_greedy}, /* level 5 */ -+ {17, 16, 17, 3, 4, 8, ZSTD_lazy}, /* level 6 */ -+ {17, 15, 17, 4, 4, 8, ZSTD_lazy2}, /* level 7 */ -+ {17, 17, 17, 4, 4, 8, ZSTD_lazy2}, /* level 8 */ -+ {17, 17, 17, 5, 4, 8, ZSTD_lazy2}, /* level 9 */ -+ {17, 17, 17, 6, 4, 8, ZSTD_lazy2}, /* level 10 */ -+ {17, 17, 17, 7, 4, 8, ZSTD_lazy2}, /* level 11 */ -+ {17, 17, 17, 8, 4, 8, ZSTD_lazy2}, /* level 12 */ -+ {17, 18, 17, 6, 4, 8, ZSTD_btlazy2}, /* level 13.*/ -+ {17, 17, 17, 7, 3, 8, ZSTD_btopt}, /* level 14.*/ -+ {17, 17, 17, 7, 3, 16, ZSTD_btopt}, /* level 15.*/ -+ {17, 18, 17, 7, 3, 32, ZSTD_btopt}, /* level 16.*/ -+ {17, 18, 17, 7, 3, 64, ZSTD_btopt}, /* level 17.*/ -+ {17, 18, 17, 7, 3, 256, ZSTD_btopt}, /* level 18.*/ -+ {17, 18, 17, 8, 3, 256, ZSTD_btopt}, /* level 19.*/ -+ {17, 18, 17, 9, 3, 256, ZSTD_btopt2}, /* level 20.*/ -+ {17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/ -+ {17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/ -+ }, -+ { -+ /* for srcSize <= 16 KB */ -+ /* W, C, H, S, L, T, strat */ -+ {14, 12, 12, 1, 7, 6, ZSTD_fast}, /* level 0 - not used */ -+ {14, 14, 14, 1, 6, 6, ZSTD_fast}, /* level 1 */ -+ {14, 14, 14, 1, 4, 6, ZSTD_fast}, /* level 2 */ -+ {14, 14, 14, 1, 4, 6, ZSTD_dfast}, /* level 3.*/ -+ {14, 14, 14, 4, 4, 6, ZSTD_greedy}, /* level 4.*/ -+ {14, 14, 14, 3, 4, 6, ZSTD_lazy}, /* level 5.*/ -+ {14, 14, 14, 4, 4, 6, ZSTD_lazy2}, /* level 6 */ -+ {14, 14, 14, 5, 4, 6, ZSTD_lazy2}, /* level 7 */ -+ {14, 14, 14, 6, 4, 6, ZSTD_lazy2}, /* level 8.*/ -+ {14, 15, 14, 6, 4, 6, ZSTD_btlazy2}, /* level 9.*/ -+ {14, 15, 14, 3, 3, 6, ZSTD_btopt}, /* level 10.*/ -+ {14, 15, 14, 6, 3, 8, ZSTD_btopt}, /* level 11.*/ -+ {14, 15, 14, 6, 3, 16, ZSTD_btopt}, /* level 12.*/ -+ {14, 15, 14, 6, 3, 24, ZSTD_btopt}, /* level 13.*/ -+ {14, 15, 15, 6, 3, 48, ZSTD_btopt}, /* level 14.*/ -+ {14, 15, 15, 6, 3, 64, ZSTD_btopt}, /* level 15.*/ -+ {14, 15, 15, 6, 3, 96, ZSTD_btopt}, /* level 16.*/ -+ {14, 15, 15, 6, 3, 128, ZSTD_btopt}, /* level 17.*/ -+ {14, 15, 15, 6, 3, 256, ZSTD_btopt}, /* level 18.*/ -+ {14, 15, 15, 7, 3, 256, ZSTD_btopt}, /* level 19.*/ -+ {14, 15, 15, 8, 3, 256, ZSTD_btopt2}, /* level 20.*/ -+ {14, 15, 15, 9, 3, 256, ZSTD_btopt2}, /* level 21.*/ -+ {14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/ -+ }, -+}; -+ -+/*! ZSTD_getCParams() : -+* @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`. -+* Size values are optional, provide 0 if not known or unused */ -+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) -+{ -+ ZSTD_compressionParameters cp; -+ size_t const addedSize = srcSize ? 0 : 500; -+ U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1; -+ U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */ -+ if (compressionLevel <= 0) -+ compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */ -+ if (compressionLevel > ZSTD_MAX_CLEVEL) -+ compressionLevel = ZSTD_MAX_CLEVEL; -+ cp = ZSTD_defaultCParameters[tableID][compressionLevel]; -+ if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */ -+ if (cp.windowLog > ZSTD_WINDOWLOG_MAX) -+ cp.windowLog = ZSTD_WINDOWLOG_MAX; -+ if (cp.chainLog > ZSTD_CHAINLOG_MAX) -+ cp.chainLog = ZSTD_CHAINLOG_MAX; -+ if (cp.hashLog > ZSTD_HASHLOG_MAX) -+ cp.hashLog = ZSTD_HASHLOG_MAX; -+ } -+ cp = ZSTD_adjustCParams(cp, srcSize, dictSize); -+ return cp; -+} -+ -+/*! ZSTD_getParams() : -+* same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`). -+* All fields of `ZSTD_frameParameters` are set to default (0) */ -+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) -+{ -+ ZSTD_parameters params; -+ ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize); -+ memset(¶ms, 0, sizeof(params)); -+ params.cParams = cParams; -+ return params; -+} -+ -+EXPORT_SYMBOL(ZSTD_maxCLevel); -+EXPORT_SYMBOL(ZSTD_compressBound); -+ -+EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound); -+EXPORT_SYMBOL(ZSTD_initCCtx); -+EXPORT_SYMBOL(ZSTD_compressCCtx); -+EXPORT_SYMBOL(ZSTD_compress_usingDict); -+ -+EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound); -+EXPORT_SYMBOL(ZSTD_initCDict); -+EXPORT_SYMBOL(ZSTD_compress_usingCDict); -+ -+EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound); -+EXPORT_SYMBOL(ZSTD_initCStream); -+EXPORT_SYMBOL(ZSTD_initCStream_usingCDict); -+EXPORT_SYMBOL(ZSTD_resetCStream); -+EXPORT_SYMBOL(ZSTD_compressStream); -+EXPORT_SYMBOL(ZSTD_flushStream); -+EXPORT_SYMBOL(ZSTD_endStream); -+EXPORT_SYMBOL(ZSTD_CStreamInSize); -+EXPORT_SYMBOL(ZSTD_CStreamOutSize); -+ -+EXPORT_SYMBOL(ZSTD_getCParams); -+EXPORT_SYMBOL(ZSTD_getParams); -+EXPORT_SYMBOL(ZSTD_checkCParams); -+EXPORT_SYMBOL(ZSTD_adjustCParams); -+ -+EXPORT_SYMBOL(ZSTD_compressBegin); -+EXPORT_SYMBOL(ZSTD_compressBegin_usingDict); -+EXPORT_SYMBOL(ZSTD_compressBegin_advanced); -+EXPORT_SYMBOL(ZSTD_copyCCtx); -+EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict); -+EXPORT_SYMBOL(ZSTD_compressContinue); -+EXPORT_SYMBOL(ZSTD_compressEnd); -+ -+EXPORT_SYMBOL(ZSTD_getBlockSizeMax); -+EXPORT_SYMBOL(ZSTD_compressBlock); -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_DESCRIPTION("Zstd Compressor"); -diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c -new file mode 100644 -index 0000000..72df4828 ---- /dev/null -+++ b/lib/zstd/decompress.c -@@ -0,0 +1,2526 @@ -+/** -+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. -+ * All rights reserved. -+ * -+ * This source code is licensed under the BSD-style license found in the -+ * LICENSE file in the root directory of https://github.com/facebook/zstd. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ */ -+ -+/* *************************************************************** -+* Tuning parameters -+*****************************************************************/ -+/*! -+* MAXWINDOWSIZE_DEFAULT : -+* maximum window size accepted by DStream, by default. -+* Frames requiring more memory will be rejected. -+*/ -+#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT -+#define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */ -+#endif -+ -+/*-******************************************************* -+* Dependencies -+*********************************************************/ -+#include "fse.h" -+#include "huf.h" -+#include "mem.h" /* low level memory routines */ -+#include "zstd_internal.h" -+#include -+#include -+#include /* memcpy, memmove, memset */ -+ -+#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0) -+ -+/*-************************************* -+* Macros -+***************************************/ -+#define ZSTD_isError ERR_isError /* for inlining */ -+#define FSE_isError ERR_isError -+#define HUF_isError ERR_isError -+ -+/*_******************************************************* -+* Memory operations -+**********************************************************/ -+static void ZSTD_copy4(void *dst, const void *src) { memcpy(dst, src, 4); } -+ -+/*-************************************************************* -+* Context management -+***************************************************************/ -+typedef enum { -+ ZSTDds_getFrameHeaderSize, -+ ZSTDds_decodeFrameHeader, -+ ZSTDds_decodeBlockHeader, -+ ZSTDds_decompressBlock, -+ ZSTDds_decompressLastBlock, -+ ZSTDds_checkChecksum, -+ ZSTDds_decodeSkippableHeader, -+ ZSTDds_skipFrame -+} ZSTD_dStage; -+ -+typedef struct { -+ FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; -+ FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; -+ FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)]; -+ HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ -+ U64 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32 / 2]; -+ U32 rep[ZSTD_REP_NUM]; -+} ZSTD_entropyTables_t; -+ -+struct ZSTD_DCtx_s { -+ const FSE_DTable *LLTptr; -+ const FSE_DTable *MLTptr; -+ const FSE_DTable *OFTptr; -+ const HUF_DTable *HUFptr; -+ ZSTD_entropyTables_t entropy; -+ const void *previousDstEnd; /* detect continuity */ -+ const void *base; /* start of curr segment */ -+ const void *vBase; /* virtual start of previous segment if it was just before curr one */ -+ const void *dictEnd; /* end of previous segment */ -+ size_t expected; -+ ZSTD_frameParams fParams; -+ blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */ -+ ZSTD_dStage stage; -+ U32 litEntropy; -+ U32 fseEntropy; -+ struct xxh64_state xxhState; -+ size_t headerSize; -+ U32 dictID; -+ const BYTE *litPtr; -+ ZSTD_customMem customMem; -+ size_t litSize; -+ size_t rleSize; -+ BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH]; -+ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; -+}; /* typedef'd to ZSTD_DCtx within "zstd.h" */ -+ -+size_t ZSTD_DCtxWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); } -+ -+size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx) -+{ -+ dctx->expected = ZSTD_frameHeaderSize_prefix; -+ dctx->stage = ZSTDds_getFrameHeaderSize; -+ dctx->previousDstEnd = NULL; -+ dctx->base = NULL; -+ dctx->vBase = NULL; -+ dctx->dictEnd = NULL; -+ dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ -+ dctx->litEntropy = dctx->fseEntropy = 0; -+ dctx->dictID = 0; -+ ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); -+ memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ -+ dctx->LLTptr = dctx->entropy.LLTable; -+ dctx->MLTptr = dctx->entropy.MLTable; -+ dctx->OFTptr = dctx->entropy.OFTable; -+ dctx->HUFptr = dctx->entropy.hufTable; -+ return 0; -+} -+ -+ZSTD_DCtx *ZSTD_createDCtx_advanced(ZSTD_customMem customMem) -+{ -+ ZSTD_DCtx *dctx; -+ -+ if (!customMem.customAlloc || !customMem.customFree) -+ return NULL; -+ -+ dctx = (ZSTD_DCtx *)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem); -+ if (!dctx) -+ return NULL; -+ memcpy(&dctx->customMem, &customMem, sizeof(customMem)); -+ ZSTD_decompressBegin(dctx); -+ return dctx; -+} -+ -+ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize) -+{ -+ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); -+ return ZSTD_createDCtx_advanced(stackMem); -+} -+ -+size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx) -+{ -+ if (dctx == NULL) -+ return 0; /* support free on NULL */ -+ ZSTD_free(dctx, dctx->customMem); -+ return 0; /* reserved as a potential error code in the future */ -+} -+ -+void ZSTD_copyDCtx(ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx) -+{ -+ size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max; -+ memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */ -+} -+ -+static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict); -+ -+/*-************************************************************* -+* Decompression section -+***************************************************************/ -+ -+/*! ZSTD_isFrame() : -+ * Tells if the content of `buffer` starts with a valid Frame Identifier. -+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. -+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. -+ * Note 3 : Skippable Frame Identifiers are considered valid. */ -+unsigned ZSTD_isFrame(const void *buffer, size_t size) -+{ -+ if (size < 4) -+ return 0; -+ { -+ U32 const magic = ZSTD_readLE32(buffer); -+ if (magic == ZSTD_MAGICNUMBER) -+ return 1; -+ if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) -+ return 1; -+ } -+ return 0; -+} -+ -+/** ZSTD_frameHeaderSize() : -+* srcSize must be >= ZSTD_frameHeaderSize_prefix. -+* @return : size of the Frame Header */ -+static size_t ZSTD_frameHeaderSize(const void *src, size_t srcSize) -+{ -+ if (srcSize < ZSTD_frameHeaderSize_prefix) -+ return ERROR(srcSize_wrong); -+ { -+ BYTE const fhd = ((const BYTE *)src)[4]; -+ U32 const dictID = fhd & 3; -+ U32 const singleSegment = (fhd >> 5) & 1; -+ U32 const fcsId = fhd >> 6; -+ return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId); -+ } -+} -+ -+/** ZSTD_getFrameParams() : -+* decode Frame Header, or require larger `srcSize`. -+* @return : 0, `fparamsPtr` is correctly filled, -+* >0, `srcSize` is too small, result is expected `srcSize`, -+* or an error code, which can be tested using ZSTD_isError() */ -+size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, size_t srcSize) -+{ -+ const BYTE *ip = (const BYTE *)src; -+ -+ if (srcSize < ZSTD_frameHeaderSize_prefix) -+ return ZSTD_frameHeaderSize_prefix; -+ if (ZSTD_readLE32(src) != ZSTD_MAGICNUMBER) { -+ if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { -+ if (srcSize < ZSTD_skippableHeaderSize) -+ return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */ -+ memset(fparamsPtr, 0, sizeof(*fparamsPtr)); -+ fparamsPtr->frameContentSize = ZSTD_readLE32((const char *)src + 4); -+ fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */ -+ return 0; -+ } -+ return ERROR(prefix_unknown); -+ } -+ -+ /* ensure there is enough `srcSize` to fully read/decode frame header */ -+ { -+ size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize); -+ if (srcSize < fhsize) -+ return fhsize; -+ } -+ -+ { -+ BYTE const fhdByte = ip[4]; -+ size_t pos = 5; -+ U32 const dictIDSizeCode = fhdByte & 3; -+ U32 const checksumFlag = (fhdByte >> 2) & 1; -+ U32 const singleSegment = (fhdByte >> 5) & 1; -+ U32 const fcsID = fhdByte >> 6; -+ U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; -+ U32 windowSize = 0; -+ U32 dictID = 0; -+ U64 frameContentSize = 0; -+ if ((fhdByte & 0x08) != 0) -+ return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */ -+ if (!singleSegment) { -+ BYTE const wlByte = ip[pos++]; -+ U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; -+ if (windowLog > ZSTD_WINDOWLOG_MAX) -+ return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */ -+ windowSize = (1U << windowLog); -+ windowSize += (windowSize >> 3) * (wlByte & 7); -+ } -+ -+ switch (dictIDSizeCode) { -+ default: /* impossible */ -+ case 0: break; -+ case 1: -+ dictID = ip[pos]; -+ pos++; -+ break; -+ case 2: -+ dictID = ZSTD_readLE16(ip + pos); -+ pos += 2; -+ break; -+ case 3: -+ dictID = ZSTD_readLE32(ip + pos); -+ pos += 4; -+ break; -+ } -+ switch (fcsID) { -+ default: /* impossible */ -+ case 0: -+ if (singleSegment) -+ frameContentSize = ip[pos]; -+ break; -+ case 1: frameContentSize = ZSTD_readLE16(ip + pos) + 256; break; -+ case 2: frameContentSize = ZSTD_readLE32(ip + pos); break; -+ case 3: frameContentSize = ZSTD_readLE64(ip + pos); break; -+ } -+ if (!windowSize) -+ windowSize = (U32)frameContentSize; -+ if (windowSize > windowSizeMax) -+ return ERROR(frameParameter_windowTooLarge); -+ fparamsPtr->frameContentSize = frameContentSize; -+ fparamsPtr->windowSize = windowSize; -+ fparamsPtr->dictID = dictID; -+ fparamsPtr->checksumFlag = checksumFlag; -+ } -+ return 0; -+} -+ -+/** ZSTD_getFrameContentSize() : -+* compatible with legacy mode -+* @return : decompressed size of the single frame pointed to be `src` if known, otherwise -+* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined -+* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ -+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) -+{ -+ { -+ ZSTD_frameParams fParams; -+ if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0) -+ return ZSTD_CONTENTSIZE_ERROR; -+ if (fParams.windowSize == 0) { -+ /* Either skippable or empty frame, size == 0 either way */ -+ return 0; -+ } else if (fParams.frameContentSize != 0) { -+ return fParams.frameContentSize; -+ } else { -+ return ZSTD_CONTENTSIZE_UNKNOWN; -+ } -+ } -+} -+ -+/** ZSTD_findDecompressedSize() : -+ * compatible with legacy mode -+ * `srcSize` must be the exact length of some number of ZSTD compressed and/or -+ * skippable frames -+ * @return : decompressed size of the frames contained */ -+unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize) -+{ -+ { -+ unsigned long long totalDstSize = 0; -+ while (srcSize >= ZSTD_frameHeaderSize_prefix) { -+ const U32 magicNumber = ZSTD_readLE32(src); -+ -+ if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { -+ size_t skippableSize; -+ if (srcSize < ZSTD_skippableHeaderSize) -+ return ERROR(srcSize_wrong); -+ skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize; -+ if (srcSize < skippableSize) { -+ return ZSTD_CONTENTSIZE_ERROR; -+ } -+ -+ src = (const BYTE *)src + skippableSize; -+ srcSize -= skippableSize; -+ continue; -+ } -+ -+ { -+ unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); -+ if (ret >= ZSTD_CONTENTSIZE_ERROR) -+ return ret; -+ -+ /* check for overflow */ -+ if (totalDstSize + ret < totalDstSize) -+ return ZSTD_CONTENTSIZE_ERROR; -+ totalDstSize += ret; -+ } -+ { -+ size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); -+ if (ZSTD_isError(frameSrcSize)) { -+ return ZSTD_CONTENTSIZE_ERROR; -+ } -+ -+ src = (const BYTE *)src + frameSrcSize; -+ srcSize -= frameSrcSize; -+ } -+ } -+ -+ if (srcSize) { -+ return ZSTD_CONTENTSIZE_ERROR; -+ } -+ -+ return totalDstSize; -+ } -+} -+ -+/** ZSTD_decodeFrameHeader() : -+* `headerSize` must be the size provided by ZSTD_frameHeaderSize(). -+* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ -+static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx *dctx, const void *src, size_t headerSize) -+{ -+ size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize); -+ if (ZSTD_isError(result)) -+ return result; /* invalid header */ -+ if (result > 0) -+ return ERROR(srcSize_wrong); /* headerSize too small */ -+ if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) -+ return ERROR(dictionary_wrong); -+ if (dctx->fParams.checksumFlag) -+ xxh64_reset(&dctx->xxhState, 0); -+ return 0; -+} -+ -+typedef struct { -+ blockType_e blockType; -+ U32 lastBlock; -+ U32 origSize; -+} blockProperties_t; -+ -+/*! ZSTD_getcBlockSize() : -+* Provides the size of compressed block from block header `src` */ -+size_t ZSTD_getcBlockSize(const void *src, size_t srcSize, blockProperties_t *bpPtr) -+{ -+ if (srcSize < ZSTD_blockHeaderSize) -+ return ERROR(srcSize_wrong); -+ { -+ U32 const cBlockHeader = ZSTD_readLE24(src); -+ U32 const cSize = cBlockHeader >> 3; -+ bpPtr->lastBlock = cBlockHeader & 1; -+ bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); -+ bpPtr->origSize = cSize; /* only useful for RLE */ -+ if (bpPtr->blockType == bt_rle) -+ return 1; -+ if (bpPtr->blockType == bt_reserved) -+ return ERROR(corruption_detected); -+ return cSize; -+ } -+} -+ -+static size_t ZSTD_copyRawBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ if (srcSize > dstCapacity) -+ return ERROR(dstSize_tooSmall); -+ memcpy(dst, src, srcSize); -+ return srcSize; -+} -+ -+static size_t ZSTD_setRleBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, size_t regenSize) -+{ -+ if (srcSize != 1) -+ return ERROR(srcSize_wrong); -+ if (regenSize > dstCapacity) -+ return ERROR(dstSize_tooSmall); -+ memset(dst, *(const BYTE *)src, regenSize); -+ return regenSize; -+} -+ -+/*! ZSTD_decodeLiteralsBlock() : -+ @return : nb of bytes read from src (< srcSize ) */ -+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ -+{ -+ if (srcSize < MIN_CBLOCK_SIZE) -+ return ERROR(corruption_detected); -+ -+ { -+ const BYTE *const istart = (const BYTE *)src; -+ symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); -+ -+ switch (litEncType) { -+ case set_repeat: -+ if (dctx->litEntropy == 0) -+ return ERROR(dictionary_corrupted); -+ /* fall-through */ -+ case set_compressed: -+ if (srcSize < 5) -+ return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ -+ { -+ size_t lhSize, litSize, litCSize; -+ U32 singleStream = 0; -+ U32 const lhlCode = (istart[0] >> 2) & 3; -+ U32 const lhc = ZSTD_readLE32(istart); -+ switch (lhlCode) { -+ case 0: -+ case 1: -+ default: /* note : default is impossible, since lhlCode into [0..3] */ -+ /* 2 - 2 - 10 - 10 */ -+ singleStream = !lhlCode; -+ lhSize = 3; -+ litSize = (lhc >> 4) & 0x3FF; -+ litCSize = (lhc >> 14) & 0x3FF; -+ break; -+ case 2: -+ /* 2 - 2 - 14 - 14 */ -+ lhSize = 4; -+ litSize = (lhc >> 4) & 0x3FFF; -+ litCSize = lhc >> 18; -+ break; -+ case 3: -+ /* 2 - 2 - 18 - 18 */ -+ lhSize = 5; -+ litSize = (lhc >> 4) & 0x3FFFF; -+ litCSize = (lhc >> 22) + (istart[4] << 10); -+ break; -+ } -+ if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) -+ return ERROR(corruption_detected); -+ if (litCSize + lhSize > srcSize) -+ return ERROR(corruption_detected); -+ -+ if (HUF_isError( -+ (litEncType == set_repeat) -+ ? (singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr) -+ : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr)) -+ : (singleStream -+ ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize, -+ dctx->entropy.workspace, sizeof(dctx->entropy.workspace)) -+ : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize, -+ dctx->entropy.workspace, sizeof(dctx->entropy.workspace))))) -+ return ERROR(corruption_detected); -+ -+ dctx->litPtr = dctx->litBuffer; -+ dctx->litSize = litSize; -+ dctx->litEntropy = 1; -+ if (litEncType == set_compressed) -+ dctx->HUFptr = dctx->entropy.hufTable; -+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); -+ return litCSize + lhSize; -+ } -+ -+ case set_basic: { -+ size_t litSize, lhSize; -+ U32 const lhlCode = ((istart[0]) >> 2) & 3; -+ switch (lhlCode) { -+ case 0: -+ case 2: -+ default: /* note : default is impossible, since lhlCode into [0..3] */ -+ lhSize = 1; -+ litSize = istart[0] >> 3; -+ break; -+ case 1: -+ lhSize = 2; -+ litSize = ZSTD_readLE16(istart) >> 4; -+ break; -+ case 3: -+ lhSize = 3; -+ litSize = ZSTD_readLE24(istart) >> 4; -+ break; -+ } -+ -+ if (lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ -+ if (litSize + lhSize > srcSize) -+ return ERROR(corruption_detected); -+ memcpy(dctx->litBuffer, istart + lhSize, litSize); -+ dctx->litPtr = dctx->litBuffer; -+ dctx->litSize = litSize; -+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); -+ return lhSize + litSize; -+ } -+ /* direct reference into compressed stream */ -+ dctx->litPtr = istart + lhSize; -+ dctx->litSize = litSize; -+ return lhSize + litSize; -+ } -+ -+ case set_rle: { -+ U32 const lhlCode = ((istart[0]) >> 2) & 3; -+ size_t litSize, lhSize; -+ switch (lhlCode) { -+ case 0: -+ case 2: -+ default: /* note : default is impossible, since lhlCode into [0..3] */ -+ lhSize = 1; -+ litSize = istart[0] >> 3; -+ break; -+ case 1: -+ lhSize = 2; -+ litSize = ZSTD_readLE16(istart) >> 4; -+ break; -+ case 3: -+ lhSize = 3; -+ litSize = ZSTD_readLE24(istart) >> 4; -+ if (srcSize < 4) -+ return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ -+ break; -+ } -+ if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) -+ return ERROR(corruption_detected); -+ memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); -+ dctx->litPtr = dctx->litBuffer; -+ dctx->litSize = litSize; -+ return lhSize + 1; -+ } -+ default: -+ return ERROR(corruption_detected); /* impossible */ -+ } -+ } -+} -+ -+typedef union { -+ FSE_decode_t realData; -+ U32 alignedBy4; -+} FSE_decode_t4; -+ -+static const FSE_decode_t4 LL_defaultDTable[(1 << LL_DEFAULTNORMLOG) + 1] = { -+ {{LL_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */ -+ {{0, 0, 4}}, /* 0 : base, symbol, bits */ -+ {{16, 0, 4}}, -+ {{32, 1, 5}}, -+ {{0, 3, 5}}, -+ {{0, 4, 5}}, -+ {{0, 6, 5}}, -+ {{0, 7, 5}}, -+ {{0, 9, 5}}, -+ {{0, 10, 5}}, -+ {{0, 12, 5}}, -+ {{0, 14, 6}}, -+ {{0, 16, 5}}, -+ {{0, 18, 5}}, -+ {{0, 19, 5}}, -+ {{0, 21, 5}}, -+ {{0, 22, 5}}, -+ {{0, 24, 5}}, -+ {{32, 25, 5}}, -+ {{0, 26, 5}}, -+ {{0, 27, 6}}, -+ {{0, 29, 6}}, -+ {{0, 31, 6}}, -+ {{32, 0, 4}}, -+ {{0, 1, 4}}, -+ {{0, 2, 5}}, -+ {{32, 4, 5}}, -+ {{0, 5, 5}}, -+ {{32, 7, 5}}, -+ {{0, 8, 5}}, -+ {{32, 10, 5}}, -+ {{0, 11, 5}}, -+ {{0, 13, 6}}, -+ {{32, 16, 5}}, -+ {{0, 17, 5}}, -+ {{32, 19, 5}}, -+ {{0, 20, 5}}, -+ {{32, 22, 5}}, -+ {{0, 23, 5}}, -+ {{0, 25, 4}}, -+ {{16, 25, 4}}, -+ {{32, 26, 5}}, -+ {{0, 28, 6}}, -+ {{0, 30, 6}}, -+ {{48, 0, 4}}, -+ {{16, 1, 4}}, -+ {{32, 2, 5}}, -+ {{32, 3, 5}}, -+ {{32, 5, 5}}, -+ {{32, 6, 5}}, -+ {{32, 8, 5}}, -+ {{32, 9, 5}}, -+ {{32, 11, 5}}, -+ {{32, 12, 5}}, -+ {{0, 15, 6}}, -+ {{32, 17, 5}}, -+ {{32, 18, 5}}, -+ {{32, 20, 5}}, -+ {{32, 21, 5}}, -+ {{32, 23, 5}}, -+ {{32, 24, 5}}, -+ {{0, 35, 6}}, -+ {{0, 34, 6}}, -+ {{0, 33, 6}}, -+ {{0, 32, 6}}, -+}; /* LL_defaultDTable */ -+ -+static const FSE_decode_t4 ML_defaultDTable[(1 << ML_DEFAULTNORMLOG) + 1] = { -+ {{ML_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */ -+ {{0, 0, 6}}, /* 0 : base, symbol, bits */ -+ {{0, 1, 4}}, -+ {{32, 2, 5}}, -+ {{0, 3, 5}}, -+ {{0, 5, 5}}, -+ {{0, 6, 5}}, -+ {{0, 8, 5}}, -+ {{0, 10, 6}}, -+ {{0, 13, 6}}, -+ {{0, 16, 6}}, -+ {{0, 19, 6}}, -+ {{0, 22, 6}}, -+ {{0, 25, 6}}, -+ {{0, 28, 6}}, -+ {{0, 31, 6}}, -+ {{0, 33, 6}}, -+ {{0, 35, 6}}, -+ {{0, 37, 6}}, -+ {{0, 39, 6}}, -+ {{0, 41, 6}}, -+ {{0, 43, 6}}, -+ {{0, 45, 6}}, -+ {{16, 1, 4}}, -+ {{0, 2, 4}}, -+ {{32, 3, 5}}, -+ {{0, 4, 5}}, -+ {{32, 6, 5}}, -+ {{0, 7, 5}}, -+ {{0, 9, 6}}, -+ {{0, 12, 6}}, -+ {{0, 15, 6}}, -+ {{0, 18, 6}}, -+ {{0, 21, 6}}, -+ {{0, 24, 6}}, -+ {{0, 27, 6}}, -+ {{0, 30, 6}}, -+ {{0, 32, 6}}, -+ {{0, 34, 6}}, -+ {{0, 36, 6}}, -+ {{0, 38, 6}}, -+ {{0, 40, 6}}, -+ {{0, 42, 6}}, -+ {{0, 44, 6}}, -+ {{32, 1, 4}}, -+ {{48, 1, 4}}, -+ {{16, 2, 4}}, -+ {{32, 4, 5}}, -+ {{32, 5, 5}}, -+ {{32, 7, 5}}, -+ {{32, 8, 5}}, -+ {{0, 11, 6}}, -+ {{0, 14, 6}}, -+ {{0, 17, 6}}, -+ {{0, 20, 6}}, -+ {{0, 23, 6}}, -+ {{0, 26, 6}}, -+ {{0, 29, 6}}, -+ {{0, 52, 6}}, -+ {{0, 51, 6}}, -+ {{0, 50, 6}}, -+ {{0, 49, 6}}, -+ {{0, 48, 6}}, -+ {{0, 47, 6}}, -+ {{0, 46, 6}}, -+}; /* ML_defaultDTable */ -+ -+static const FSE_decode_t4 OF_defaultDTable[(1 << OF_DEFAULTNORMLOG) + 1] = { -+ {{OF_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */ -+ {{0, 0, 5}}, /* 0 : base, symbol, bits */ -+ {{0, 6, 4}}, -+ {{0, 9, 5}}, -+ {{0, 15, 5}}, -+ {{0, 21, 5}}, -+ {{0, 3, 5}}, -+ {{0, 7, 4}}, -+ {{0, 12, 5}}, -+ {{0, 18, 5}}, -+ {{0, 23, 5}}, -+ {{0, 5, 5}}, -+ {{0, 8, 4}}, -+ {{0, 14, 5}}, -+ {{0, 20, 5}}, -+ {{0, 2, 5}}, -+ {{16, 7, 4}}, -+ {{0, 11, 5}}, -+ {{0, 17, 5}}, -+ {{0, 22, 5}}, -+ {{0, 4, 5}}, -+ {{16, 8, 4}}, -+ {{0, 13, 5}}, -+ {{0, 19, 5}}, -+ {{0, 1, 5}}, -+ {{16, 6, 4}}, -+ {{0, 10, 5}}, -+ {{0, 16, 5}}, -+ {{0, 28, 5}}, -+ {{0, 27, 5}}, -+ {{0, 26, 5}}, -+ {{0, 25, 5}}, -+ {{0, 24, 5}}, -+}; /* OF_defaultDTable */ -+ -+/*! ZSTD_buildSeqTable() : -+ @return : nb bytes read from src, -+ or an error code if it fails, testable with ZSTD_isError() -+*/ -+static size_t ZSTD_buildSeqTable(FSE_DTable *DTableSpace, const FSE_DTable **DTablePtr, symbolEncodingType_e type, U32 max, U32 maxLog, const void *src, -+ size_t srcSize, const FSE_decode_t4 *defaultTable, U32 flagRepeatTable, void *workspace, size_t workspaceSize) -+{ -+ const void *const tmpPtr = defaultTable; /* bypass strict aliasing */ -+ switch (type) { -+ case set_rle: -+ if (!srcSize) -+ return ERROR(srcSize_wrong); -+ if ((*(const BYTE *)src) > max) -+ return ERROR(corruption_detected); -+ FSE_buildDTable_rle(DTableSpace, *(const BYTE *)src); -+ *DTablePtr = DTableSpace; -+ return 1; -+ case set_basic: *DTablePtr = (const FSE_DTable *)tmpPtr; return 0; -+ case set_repeat: -+ if (!flagRepeatTable) -+ return ERROR(corruption_detected); -+ return 0; -+ default: /* impossible */ -+ case set_compressed: { -+ U32 tableLog; -+ S16 *norm = (S16 *)workspace; -+ size_t const spaceUsed32 = ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2; -+ -+ if ((spaceUsed32 << 2) > workspaceSize) -+ return ERROR(GENERIC); -+ workspace = (U32 *)workspace + spaceUsed32; -+ workspaceSize -= (spaceUsed32 << 2); -+ { -+ size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); -+ if (FSE_isError(headerSize)) -+ return ERROR(corruption_detected); -+ if (tableLog > maxLog) -+ return ERROR(corruption_detected); -+ FSE_buildDTable_wksp(DTableSpace, norm, max, tableLog, workspace, workspaceSize); -+ *DTablePtr = DTableSpace; -+ return headerSize; -+ } -+ } -+ } -+} -+ -+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize) -+{ -+ const BYTE *const istart = (const BYTE *const)src; -+ const BYTE *const iend = istart + srcSize; -+ const BYTE *ip = istart; -+ -+ /* check */ -+ if (srcSize < MIN_SEQUENCES_SIZE) -+ return ERROR(srcSize_wrong); -+ -+ /* SeqHead */ -+ { -+ int nbSeq = *ip++; -+ if (!nbSeq) { -+ *nbSeqPtr = 0; -+ return 1; -+ } -+ if (nbSeq > 0x7F) { -+ if (nbSeq == 0xFF) { -+ if (ip + 2 > iend) -+ return ERROR(srcSize_wrong); -+ nbSeq = ZSTD_readLE16(ip) + LONGNBSEQ, ip += 2; -+ } else { -+ if (ip >= iend) -+ return ERROR(srcSize_wrong); -+ nbSeq = ((nbSeq - 0x80) << 8) + *ip++; -+ } -+ } -+ *nbSeqPtr = nbSeq; -+ } -+ -+ /* FSE table descriptors */ -+ if (ip + 4 > iend) -+ return ERROR(srcSize_wrong); /* minimum possible size */ -+ { -+ symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); -+ symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); -+ symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); -+ ip++; -+ -+ /* Build DTables */ -+ { -+ size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend - ip, -+ LL_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace)); -+ if (ZSTD_isError(llhSize)) -+ return ERROR(corruption_detected); -+ ip += llhSize; -+ } -+ { -+ size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend - ip, -+ OF_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace)); -+ if (ZSTD_isError(ofhSize)) -+ return ERROR(corruption_detected); -+ ip += ofhSize; -+ } -+ { -+ size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend - ip, -+ ML_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace)); -+ if (ZSTD_isError(mlhSize)) -+ return ERROR(corruption_detected); -+ ip += mlhSize; -+ } -+ } -+ -+ return ip - istart; -+} -+ -+typedef struct { -+ size_t litLength; -+ size_t matchLength; -+ size_t offset; -+ const BYTE *match; -+} seq_t; -+ -+typedef struct { -+ BIT_DStream_t DStream; -+ FSE_DState_t stateLL; -+ FSE_DState_t stateOffb; -+ FSE_DState_t stateML; -+ size_t prevOffset[ZSTD_REP_NUM]; -+ const BYTE *base; -+ size_t pos; -+ uPtrDiff gotoDict; -+} seqState_t; -+ -+FORCE_NOINLINE -+size_t ZSTD_execSequenceLast7(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base, -+ const BYTE *const vBase, const BYTE *const dictEnd) -+{ -+ BYTE *const oLitEnd = op + sequence.litLength; -+ size_t const sequenceLength = sequence.litLength + sequence.matchLength; -+ BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ -+ BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH; -+ const BYTE *const iLitEnd = *litPtr + sequence.litLength; -+ const BYTE *match = oLitEnd - sequence.offset; -+ -+ /* check */ -+ if (oMatchEnd > oend) -+ return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ -+ if (iLitEnd > litLimit) -+ return ERROR(corruption_detected); /* over-read beyond lit buffer */ -+ if (oLitEnd <= oend_w) -+ return ERROR(GENERIC); /* Precondition */ -+ -+ /* copy literals */ -+ if (op < oend_w) { -+ ZSTD_wildcopy(op, *litPtr, oend_w - op); -+ *litPtr += oend_w - op; -+ op = oend_w; -+ } -+ while (op < oLitEnd) -+ *op++ = *(*litPtr)++; -+ -+ /* copy Match */ -+ if (sequence.offset > (size_t)(oLitEnd - base)) { -+ /* offset beyond prefix */ -+ if (sequence.offset > (size_t)(oLitEnd - vBase)) -+ return ERROR(corruption_detected); -+ match = dictEnd - (base - match); -+ if (match + sequence.matchLength <= dictEnd) { -+ memmove(oLitEnd, match, sequence.matchLength); -+ return sequenceLength; -+ } -+ /* span extDict & currPrefixSegment */ -+ { -+ size_t const length1 = dictEnd - match; -+ memmove(oLitEnd, match, length1); -+ op = oLitEnd + length1; -+ sequence.matchLength -= length1; -+ match = base; -+ } -+ } -+ while (op < oMatchEnd) -+ *op++ = *match++; -+ return sequenceLength; -+} -+ -+static seq_t ZSTD_decodeSequence(seqState_t *seqState) -+{ -+ seq_t seq; -+ -+ U32 const llCode = FSE_peekSymbol(&seqState->stateLL); -+ U32 const mlCode = FSE_peekSymbol(&seqState->stateML); -+ U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ -+ -+ U32 const llBits = LL_bits[llCode]; -+ U32 const mlBits = ML_bits[mlCode]; -+ U32 const ofBits = ofCode; -+ U32 const totalBits = llBits + mlBits + ofBits; -+ -+ static const U32 LL_base[MaxLL + 1] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, -+ 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000}; -+ -+ static const U32 ML_base[MaxML + 1] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, -+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, -+ 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003}; -+ -+ static const U32 OF_base[MaxOff + 1] = {0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, -+ 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, -+ 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD}; -+ -+ /* sequence */ -+ { -+ size_t offset; -+ if (!ofCode) -+ offset = 0; -+ else { -+ offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ -+ if (ZSTD_32bits()) -+ BIT_reloadDStream(&seqState->DStream); -+ } -+ -+ if (ofCode <= 1) { -+ offset += (llCode == 0); -+ if (offset) { -+ size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; -+ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ -+ if (offset != 1) -+ seqState->prevOffset[2] = seqState->prevOffset[1]; -+ seqState->prevOffset[1] = seqState->prevOffset[0]; -+ seqState->prevOffset[0] = offset = temp; -+ } else { -+ offset = seqState->prevOffset[0]; -+ } -+ } else { -+ seqState->prevOffset[2] = seqState->prevOffset[1]; -+ seqState->prevOffset[1] = seqState->prevOffset[0]; -+ seqState->prevOffset[0] = offset; -+ } -+ seq.offset = offset; -+ } -+ -+ seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ -+ if (ZSTD_32bits() && (mlBits + llBits > 24)) -+ BIT_reloadDStream(&seqState->DStream); -+ -+ seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ -+ if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog))) -+ BIT_reloadDStream(&seqState->DStream); -+ -+ /* ANS state update */ -+ FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ -+ FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ -+ if (ZSTD_32bits()) -+ BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ -+ FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ -+ -+ seq.match = NULL; -+ -+ return seq; -+} -+ -+FORCE_INLINE -+size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base, -+ const BYTE *const vBase, const BYTE *const dictEnd) -+{ -+ BYTE *const oLitEnd = op + sequence.litLength; -+ size_t const sequenceLength = sequence.litLength + sequence.matchLength; -+ BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ -+ BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH; -+ const BYTE *const iLitEnd = *litPtr + sequence.litLength; -+ const BYTE *match = oLitEnd - sequence.offset; -+ -+ /* check */ -+ if (oMatchEnd > oend) -+ return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ -+ if (iLitEnd > litLimit) -+ return ERROR(corruption_detected); /* over-read beyond lit buffer */ -+ if (oLitEnd > oend_w) -+ return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); -+ -+ /* copy Literals */ -+ ZSTD_copy8(op, *litPtr); -+ if (sequence.litLength > 8) -+ ZSTD_wildcopy(op + 8, (*litPtr) + 8, -+ sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ -+ op = oLitEnd; -+ *litPtr = iLitEnd; /* update for next sequence */ -+ -+ /* copy Match */ -+ if (sequence.offset > (size_t)(oLitEnd - base)) { -+ /* offset beyond prefix */ -+ if (sequence.offset > (size_t)(oLitEnd - vBase)) -+ return ERROR(corruption_detected); -+ match = dictEnd + (match - base); -+ if (match + sequence.matchLength <= dictEnd) { -+ memmove(oLitEnd, match, sequence.matchLength); -+ return sequenceLength; -+ } -+ /* span extDict & currPrefixSegment */ -+ { -+ size_t const length1 = dictEnd - match; -+ memmove(oLitEnd, match, length1); -+ op = oLitEnd + length1; -+ sequence.matchLength -= length1; -+ match = base; -+ if (op > oend_w || sequence.matchLength < MINMATCH) { -+ U32 i; -+ for (i = 0; i < sequence.matchLength; ++i) -+ op[i] = match[i]; -+ return sequenceLength; -+ } -+ } -+ } -+ /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ -+ -+ /* match within prefix */ -+ if (sequence.offset < 8) { -+ /* close range match, overlap */ -+ static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ -+ static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */ -+ int const sub2 = dec64table[sequence.offset]; -+ op[0] = match[0]; -+ op[1] = match[1]; -+ op[2] = match[2]; -+ op[3] = match[3]; -+ match += dec32table[sequence.offset]; -+ ZSTD_copy4(op + 4, match); -+ match -= sub2; -+ } else { -+ ZSTD_copy8(op, match); -+ } -+ op += 8; -+ match += 8; -+ -+ if (oMatchEnd > oend - (16 - MINMATCH)) { -+ if (op < oend_w) { -+ ZSTD_wildcopy(op, match, oend_w - op); -+ match += oend_w - op; -+ op = oend_w; -+ } -+ while (op < oMatchEnd) -+ *op++ = *match++; -+ } else { -+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */ -+ } -+ return sequenceLength; -+} -+ -+static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize) -+{ -+ const BYTE *ip = (const BYTE *)seqStart; -+ const BYTE *const iend = ip + seqSize; -+ BYTE *const ostart = (BYTE * const)dst; -+ BYTE *const oend = ostart + maxDstSize; -+ BYTE *op = ostart; -+ const BYTE *litPtr = dctx->litPtr; -+ const BYTE *const litEnd = litPtr + dctx->litSize; -+ const BYTE *const base = (const BYTE *)(dctx->base); -+ const BYTE *const vBase = (const BYTE *)(dctx->vBase); -+ const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd); -+ int nbSeq; -+ -+ /* Build Decoding Tables */ -+ { -+ size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); -+ if (ZSTD_isError(seqHSize)) -+ return seqHSize; -+ ip += seqHSize; -+ } -+ -+ /* Regen sequences */ -+ if (nbSeq) { -+ seqState_t seqState; -+ dctx->fseEntropy = 1; -+ { -+ U32 i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ seqState.prevOffset[i] = dctx->entropy.rep[i]; -+ } -+ CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected); -+ FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); -+ FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); -+ FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); -+ -+ for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq;) { -+ nbSeq--; -+ { -+ seq_t const sequence = ZSTD_decodeSequence(&seqState); -+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); -+ if (ZSTD_isError(oneSeqSize)) -+ return oneSeqSize; -+ op += oneSeqSize; -+ } -+ } -+ -+ /* check if reached exact end */ -+ if (nbSeq) -+ return ERROR(corruption_detected); -+ /* save reps for next block */ -+ { -+ U32 i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); -+ } -+ } -+ -+ /* last literal segment */ -+ { -+ size_t const lastLLSize = litEnd - litPtr; -+ if (lastLLSize > (size_t)(oend - op)) -+ return ERROR(dstSize_tooSmall); -+ memcpy(op, litPtr, lastLLSize); -+ op += lastLLSize; -+ } -+ -+ return op - ostart; -+} -+ -+FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t *seqState, int const longOffsets) -+{ -+ seq_t seq; -+ -+ U32 const llCode = FSE_peekSymbol(&seqState->stateLL); -+ U32 const mlCode = FSE_peekSymbol(&seqState->stateML); -+ U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ -+ -+ U32 const llBits = LL_bits[llCode]; -+ U32 const mlBits = ML_bits[mlCode]; -+ U32 const ofBits = ofCode; -+ U32 const totalBits = llBits + mlBits + ofBits; -+ -+ static const U32 LL_base[MaxLL + 1] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, -+ 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000}; -+ -+ static const U32 ML_base[MaxML + 1] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, -+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, -+ 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003}; -+ -+ static const U32 OF_base[MaxOff + 1] = {0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, -+ 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, -+ 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD}; -+ -+ /* sequence */ -+ { -+ size_t offset; -+ if (!ofCode) -+ offset = 0; -+ else { -+ if (longOffsets) { -+ int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN); -+ offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); -+ if (ZSTD_32bits() || extraBits) -+ BIT_reloadDStream(&seqState->DStream); -+ if (extraBits) -+ offset += BIT_readBitsFast(&seqState->DStream, extraBits); -+ } else { -+ offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ -+ if (ZSTD_32bits()) -+ BIT_reloadDStream(&seqState->DStream); -+ } -+ } -+ -+ if (ofCode <= 1) { -+ offset += (llCode == 0); -+ if (offset) { -+ size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; -+ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ -+ if (offset != 1) -+ seqState->prevOffset[2] = seqState->prevOffset[1]; -+ seqState->prevOffset[1] = seqState->prevOffset[0]; -+ seqState->prevOffset[0] = offset = temp; -+ } else { -+ offset = seqState->prevOffset[0]; -+ } -+ } else { -+ seqState->prevOffset[2] = seqState->prevOffset[1]; -+ seqState->prevOffset[1] = seqState->prevOffset[0]; -+ seqState->prevOffset[0] = offset; -+ } -+ seq.offset = offset; -+ } -+ -+ seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ -+ if (ZSTD_32bits() && (mlBits + llBits > 24)) -+ BIT_reloadDStream(&seqState->DStream); -+ -+ seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ -+ if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog))) -+ BIT_reloadDStream(&seqState->DStream); -+ -+ { -+ size_t const pos = seqState->pos + seq.litLength; -+ seq.match = seqState->base + pos - seq.offset; /* single memory segment */ -+ if (seq.offset > pos) -+ seq.match += seqState->gotoDict; /* separate memory segment */ -+ seqState->pos = pos + seq.matchLength; -+ } -+ -+ /* ANS state update */ -+ FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ -+ FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ -+ if (ZSTD_32bits()) -+ BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ -+ FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ -+ -+ return seq; -+} -+ -+static seq_t ZSTD_decodeSequenceLong(seqState_t *seqState, unsigned const windowSize) -+{ -+ if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) { -+ return ZSTD_decodeSequenceLong_generic(seqState, 1); -+ } else { -+ return ZSTD_decodeSequenceLong_generic(seqState, 0); -+ } -+} -+ -+FORCE_INLINE -+size_t ZSTD_execSequenceLong(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base, -+ const BYTE *const vBase, const BYTE *const dictEnd) -+{ -+ BYTE *const oLitEnd = op + sequence.litLength; -+ size_t const sequenceLength = sequence.litLength + sequence.matchLength; -+ BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ -+ BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH; -+ const BYTE *const iLitEnd = *litPtr + sequence.litLength; -+ const BYTE *match = sequence.match; -+ -+ /* check */ -+ if (oMatchEnd > oend) -+ return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ -+ if (iLitEnd > litLimit) -+ return ERROR(corruption_detected); /* over-read beyond lit buffer */ -+ if (oLitEnd > oend_w) -+ return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); -+ -+ /* copy Literals */ -+ ZSTD_copy8(op, *litPtr); -+ if (sequence.litLength > 8) -+ ZSTD_wildcopy(op + 8, (*litPtr) + 8, -+ sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ -+ op = oLitEnd; -+ *litPtr = iLitEnd; /* update for next sequence */ -+ -+ /* copy Match */ -+ if (sequence.offset > (size_t)(oLitEnd - base)) { -+ /* offset beyond prefix */ -+ if (sequence.offset > (size_t)(oLitEnd - vBase)) -+ return ERROR(corruption_detected); -+ if (match + sequence.matchLength <= dictEnd) { -+ memmove(oLitEnd, match, sequence.matchLength); -+ return sequenceLength; -+ } -+ /* span extDict & currPrefixSegment */ -+ { -+ size_t const length1 = dictEnd - match; -+ memmove(oLitEnd, match, length1); -+ op = oLitEnd + length1; -+ sequence.matchLength -= length1; -+ match = base; -+ if (op > oend_w || sequence.matchLength < MINMATCH) { -+ U32 i; -+ for (i = 0; i < sequence.matchLength; ++i) -+ op[i] = match[i]; -+ return sequenceLength; -+ } -+ } -+ } -+ /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ -+ -+ /* match within prefix */ -+ if (sequence.offset < 8) { -+ /* close range match, overlap */ -+ static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ -+ static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */ -+ int const sub2 = dec64table[sequence.offset]; -+ op[0] = match[0]; -+ op[1] = match[1]; -+ op[2] = match[2]; -+ op[3] = match[3]; -+ match += dec32table[sequence.offset]; -+ ZSTD_copy4(op + 4, match); -+ match -= sub2; -+ } else { -+ ZSTD_copy8(op, match); -+ } -+ op += 8; -+ match += 8; -+ -+ if (oMatchEnd > oend - (16 - MINMATCH)) { -+ if (op < oend_w) { -+ ZSTD_wildcopy(op, match, oend_w - op); -+ match += oend_w - op; -+ op = oend_w; -+ } -+ while (op < oMatchEnd) -+ *op++ = *match++; -+ } else { -+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */ -+ } -+ return sequenceLength; -+} -+ -+static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize) -+{ -+ const BYTE *ip = (const BYTE *)seqStart; -+ const BYTE *const iend = ip + seqSize; -+ BYTE *const ostart = (BYTE * const)dst; -+ BYTE *const oend = ostart + maxDstSize; -+ BYTE *op = ostart; -+ const BYTE *litPtr = dctx->litPtr; -+ const BYTE *const litEnd = litPtr + dctx->litSize; -+ const BYTE *const base = (const BYTE *)(dctx->base); -+ const BYTE *const vBase = (const BYTE *)(dctx->vBase); -+ const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd); -+ unsigned const windowSize = dctx->fParams.windowSize; -+ int nbSeq; -+ -+ /* Build Decoding Tables */ -+ { -+ size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); -+ if (ZSTD_isError(seqHSize)) -+ return seqHSize; -+ ip += seqHSize; -+ } -+ -+ /* Regen sequences */ -+ if (nbSeq) { -+#define STORED_SEQS 4 -+#define STOSEQ_MASK (STORED_SEQS - 1) -+#define ADVANCED_SEQS 4 -+ seq_t *sequences = (seq_t *)dctx->entropy.workspace; -+ int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); -+ seqState_t seqState; -+ int seqNb; -+ ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.workspace) >= sizeof(seq_t) * STORED_SEQS); -+ dctx->fseEntropy = 1; -+ { -+ U32 i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ seqState.prevOffset[i] = dctx->entropy.rep[i]; -+ } -+ seqState.base = base; -+ seqState.pos = (size_t)(op - base); -+ seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */ -+ CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected); -+ FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); -+ FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); -+ FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); -+ -+ /* prepare in advance */ -+ for (seqNb = 0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb < seqAdvance; seqNb++) { -+ sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize); -+ } -+ if (seqNb < seqAdvance) -+ return ERROR(corruption_detected); -+ -+ /* decode and decompress */ -+ for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb < nbSeq; seqNb++) { -+ seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize); -+ size_t const oneSeqSize = -+ ZSTD_execSequenceLong(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd); -+ if (ZSTD_isError(oneSeqSize)) -+ return oneSeqSize; -+ ZSTD_PREFETCH(sequence.match); -+ sequences[seqNb & STOSEQ_MASK] = sequence; -+ op += oneSeqSize; -+ } -+ if (seqNb < nbSeq) -+ return ERROR(corruption_detected); -+ -+ /* finish queue */ -+ seqNb -= seqAdvance; -+ for (; seqNb < nbSeq; seqNb++) { -+ size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd); -+ if (ZSTD_isError(oneSeqSize)) -+ return oneSeqSize; -+ op += oneSeqSize; -+ } -+ -+ /* save reps for next block */ -+ { -+ U32 i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); -+ } -+ } -+ -+ /* last literal segment */ -+ { -+ size_t const lastLLSize = litEnd - litPtr; -+ if (lastLLSize > (size_t)(oend - op)) -+ return ERROR(dstSize_tooSmall); -+ memcpy(op, litPtr, lastLLSize); -+ op += lastLLSize; -+ } -+ -+ return op - ostart; -+} -+ -+static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ /* blockType == blockCompressed */ -+ const BYTE *ip = (const BYTE *)src; -+ -+ if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) -+ return ERROR(srcSize_wrong); -+ -+ /* Decode literals section */ -+ { -+ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); -+ if (ZSTD_isError(litCSize)) -+ return litCSize; -+ ip += litCSize; -+ srcSize -= litCSize; -+ } -+ if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */ -+ /* likely because of register pressure */ -+ /* if that's the correct cause, then 32-bits ARM should be affected differently */ -+ /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */ -+ if (dctx->fParams.windowSize > (1 << 23)) -+ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize); -+ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize); -+} -+ -+static void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst) -+{ -+ if (dst != dctx->previousDstEnd) { /* not contiguous */ -+ dctx->dictEnd = dctx->previousDstEnd; -+ dctx->vBase = (const char *)dst - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base)); -+ dctx->base = dst; -+ dctx->previousDstEnd = dst; -+ } -+} -+ -+size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ size_t dSize; -+ ZSTD_checkContinuity(dctx, dst); -+ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); -+ dctx->previousDstEnd = (char *)dst + dSize; -+ return dSize; -+} -+ -+/** ZSTD_insertBlock() : -+ insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ -+size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize) -+{ -+ ZSTD_checkContinuity(dctx, blockStart); -+ dctx->previousDstEnd = (const char *)blockStart + blockSize; -+ return blockSize; -+} -+ -+size_t ZSTD_generateNxBytes(void *dst, size_t dstCapacity, BYTE byte, size_t length) -+{ -+ if (length > dstCapacity) -+ return ERROR(dstSize_tooSmall); -+ memset(dst, byte, length); -+ return length; -+} -+ -+/** ZSTD_findFrameCompressedSize() : -+ * compatible with legacy mode -+ * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame -+ * `srcSize` must be at least as large as the frame contained -+ * @return : the compressed size of the frame starting at `src` */ -+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) -+{ -+ if (srcSize >= ZSTD_skippableHeaderSize && (ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { -+ return ZSTD_skippableHeaderSize + ZSTD_readLE32((const BYTE *)src + 4); -+ } else { -+ const BYTE *ip = (const BYTE *)src; -+ const BYTE *const ipstart = ip; -+ size_t remainingSize = srcSize; -+ ZSTD_frameParams fParams; -+ -+ size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize); -+ if (ZSTD_isError(headerSize)) -+ return headerSize; -+ -+ /* Frame Header */ -+ { -+ size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize); -+ if (ZSTD_isError(ret)) -+ return ret; -+ if (ret > 0) -+ return ERROR(srcSize_wrong); -+ } -+ -+ ip += headerSize; -+ remainingSize -= headerSize; -+ -+ /* Loop on each block */ -+ while (1) { -+ blockProperties_t blockProperties; -+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); -+ if (ZSTD_isError(cBlockSize)) -+ return cBlockSize; -+ -+ if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) -+ return ERROR(srcSize_wrong); -+ -+ ip += ZSTD_blockHeaderSize + cBlockSize; -+ remainingSize -= ZSTD_blockHeaderSize + cBlockSize; -+ -+ if (blockProperties.lastBlock) -+ break; -+ } -+ -+ if (fParams.checksumFlag) { /* Frame content checksum */ -+ if (remainingSize < 4) -+ return ERROR(srcSize_wrong); -+ ip += 4; -+ remainingSize -= 4; -+ } -+ -+ return ip - ipstart; -+ } -+} -+ -+/*! ZSTD_decompressFrame() : -+* @dctx must be properly initialized */ -+static size_t ZSTD_decompressFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr) -+{ -+ const BYTE *ip = (const BYTE *)(*srcPtr); -+ BYTE *const ostart = (BYTE * const)dst; -+ BYTE *const oend = ostart + dstCapacity; -+ BYTE *op = ostart; -+ size_t remainingSize = *srcSizePtr; -+ -+ /* check */ -+ if (remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize) -+ return ERROR(srcSize_wrong); -+ -+ /* Frame Header */ -+ { -+ size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); -+ if (ZSTD_isError(frameHeaderSize)) -+ return frameHeaderSize; -+ if (remainingSize < frameHeaderSize + ZSTD_blockHeaderSize) -+ return ERROR(srcSize_wrong); -+ CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize)); -+ ip += frameHeaderSize; -+ remainingSize -= frameHeaderSize; -+ } -+ -+ /* Loop on each block */ -+ while (1) { -+ size_t decodedSize; -+ blockProperties_t blockProperties; -+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); -+ if (ZSTD_isError(cBlockSize)) -+ return cBlockSize; -+ -+ ip += ZSTD_blockHeaderSize; -+ remainingSize -= ZSTD_blockHeaderSize; -+ if (cBlockSize > remainingSize) -+ return ERROR(srcSize_wrong); -+ -+ switch (blockProperties.blockType) { -+ case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend - op, ip, cBlockSize); break; -+ case bt_raw: decodedSize = ZSTD_copyRawBlock(op, oend - op, ip, cBlockSize); break; -+ case bt_rle: decodedSize = ZSTD_generateNxBytes(op, oend - op, *ip, blockProperties.origSize); break; -+ case bt_reserved: -+ default: return ERROR(corruption_detected); -+ } -+ -+ if (ZSTD_isError(decodedSize)) -+ return decodedSize; -+ if (dctx->fParams.checksumFlag) -+ xxh64_update(&dctx->xxhState, op, decodedSize); -+ op += decodedSize; -+ ip += cBlockSize; -+ remainingSize -= cBlockSize; -+ if (blockProperties.lastBlock) -+ break; -+ } -+ -+ if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ -+ U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState); -+ U32 checkRead; -+ if (remainingSize < 4) -+ return ERROR(checksum_wrong); -+ checkRead = ZSTD_readLE32(ip); -+ if (checkRead != checkCalc) -+ return ERROR(checksum_wrong); -+ ip += 4; -+ remainingSize -= 4; -+ } -+ -+ /* Allow caller to get size read */ -+ *srcPtr = ip; -+ *srcSizePtr = remainingSize; -+ return op - ostart; -+} -+ -+static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict); -+static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict); -+ -+static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, -+ const ZSTD_DDict *ddict) -+{ -+ void *const dststart = dst; -+ -+ if (ddict) { -+ if (dict) { -+ /* programmer error, these two cases should be mutually exclusive */ -+ return ERROR(GENERIC); -+ } -+ -+ dict = ZSTD_DDictDictContent(ddict); -+ dictSize = ZSTD_DDictDictSize(ddict); -+ } -+ -+ while (srcSize >= ZSTD_frameHeaderSize_prefix) { -+ U32 magicNumber; -+ -+ magicNumber = ZSTD_readLE32(src); -+ if (magicNumber != ZSTD_MAGICNUMBER) { -+ if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { -+ size_t skippableSize; -+ if (srcSize < ZSTD_skippableHeaderSize) -+ return ERROR(srcSize_wrong); -+ skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize; -+ if (srcSize < skippableSize) { -+ return ERROR(srcSize_wrong); -+ } -+ -+ src = (const BYTE *)src + skippableSize; -+ srcSize -= skippableSize; -+ continue; -+ } else { -+ return ERROR(prefix_unknown); -+ } -+ } -+ -+ if (ddict) { -+ /* we were called from ZSTD_decompress_usingDDict */ -+ ZSTD_refDDict(dctx, ddict); -+ } else { -+ /* this will initialize correctly with no dict if dict == NULL, so -+ * use this in all cases but ddict */ -+ CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize)); -+ } -+ ZSTD_checkContinuity(dctx, dst); -+ -+ { -+ const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); -+ if (ZSTD_isError(res)) -+ return res; -+ /* don't need to bounds check this, ZSTD_decompressFrame will have -+ * already */ -+ dst = (BYTE *)dst + res; -+ dstCapacity -= res; -+ } -+ } -+ -+ if (srcSize) -+ return ERROR(srcSize_wrong); /* input not entirely consumed */ -+ -+ return (BYTE *)dst - (BYTE *)dststart; -+} -+ -+size_t ZSTD_decompress_usingDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize) -+{ -+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); -+} -+ -+size_t ZSTD_decompressDCtx(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0); -+} -+ -+/*-************************************** -+* Advanced Streaming Decompression API -+* Bufferless and synchronous -+****************************************/ -+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx) { return dctx->expected; } -+ -+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx) -+{ -+ switch (dctx->stage) { -+ default: /* should not happen */ -+ case ZSTDds_getFrameHeaderSize: -+ case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader; -+ case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader; -+ case ZSTDds_decompressBlock: return ZSTDnit_block; -+ case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock; -+ case ZSTDds_checkChecksum: return ZSTDnit_checksum; -+ case ZSTDds_decodeSkippableHeader: -+ case ZSTDds_skipFrame: return ZSTDnit_skippableFrame; -+ } -+} -+ -+int ZSTD_isSkipFrame(ZSTD_DCtx *dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */ -+ -+/** ZSTD_decompressContinue() : -+* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) -+* or an error code, which can be tested using ZSTD_isError() */ -+size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ /* Sanity check */ -+ if (srcSize != dctx->expected) -+ return ERROR(srcSize_wrong); -+ if (dstCapacity) -+ ZSTD_checkContinuity(dctx, dst); -+ -+ switch (dctx->stage) { -+ case ZSTDds_getFrameHeaderSize: -+ if (srcSize != ZSTD_frameHeaderSize_prefix) -+ return ERROR(srcSize_wrong); /* impossible */ -+ if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ -+ memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); -+ dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */ -+ dctx->stage = ZSTDds_decodeSkippableHeader; -+ return 0; -+ } -+ dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix); -+ if (ZSTD_isError(dctx->headerSize)) -+ return dctx->headerSize; -+ memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); -+ if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) { -+ dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix; -+ dctx->stage = ZSTDds_decodeFrameHeader; -+ return 0; -+ } -+ dctx->expected = 0; /* not necessary to copy more */ -+ -+ case ZSTDds_decodeFrameHeader: -+ memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); -+ CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize)); -+ dctx->expected = ZSTD_blockHeaderSize; -+ dctx->stage = ZSTDds_decodeBlockHeader; -+ return 0; -+ -+ case ZSTDds_decodeBlockHeader: { -+ blockProperties_t bp; -+ size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); -+ if (ZSTD_isError(cBlockSize)) -+ return cBlockSize; -+ dctx->expected = cBlockSize; -+ dctx->bType = bp.blockType; -+ dctx->rleSize = bp.origSize; -+ if (cBlockSize) { -+ dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; -+ return 0; -+ } -+ /* empty block */ -+ if (bp.lastBlock) { -+ if (dctx->fParams.checksumFlag) { -+ dctx->expected = 4; -+ dctx->stage = ZSTDds_checkChecksum; -+ } else { -+ dctx->expected = 0; /* end of frame */ -+ dctx->stage = ZSTDds_getFrameHeaderSize; -+ } -+ } else { -+ dctx->expected = 3; /* go directly to next header */ -+ dctx->stage = ZSTDds_decodeBlockHeader; -+ } -+ return 0; -+ } -+ case ZSTDds_decompressLastBlock: -+ case ZSTDds_decompressBlock: { -+ size_t rSize; -+ switch (dctx->bType) { -+ case bt_compressed: rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break; -+ case bt_raw: rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break; -+ case bt_rle: rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break; -+ case bt_reserved: /* should never happen */ -+ default: return ERROR(corruption_detected); -+ } -+ if (ZSTD_isError(rSize)) -+ return rSize; -+ if (dctx->fParams.checksumFlag) -+ xxh64_update(&dctx->xxhState, dst, rSize); -+ -+ if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ -+ if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ -+ dctx->expected = 4; -+ dctx->stage = ZSTDds_checkChecksum; -+ } else { -+ dctx->expected = 0; /* ends here */ -+ dctx->stage = ZSTDds_getFrameHeaderSize; -+ } -+ } else { -+ dctx->stage = ZSTDds_decodeBlockHeader; -+ dctx->expected = ZSTD_blockHeaderSize; -+ dctx->previousDstEnd = (char *)dst + rSize; -+ } -+ return rSize; -+ } -+ case ZSTDds_checkChecksum: { -+ U32 const h32 = (U32)xxh64_digest(&dctx->xxhState); -+ U32 const check32 = ZSTD_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */ -+ if (check32 != h32) -+ return ERROR(checksum_wrong); -+ dctx->expected = 0; -+ dctx->stage = ZSTDds_getFrameHeaderSize; -+ return 0; -+ } -+ case ZSTDds_decodeSkippableHeader: { -+ memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); -+ dctx->expected = ZSTD_readLE32(dctx->headerBuffer + 4); -+ dctx->stage = ZSTDds_skipFrame; -+ return 0; -+ } -+ case ZSTDds_skipFrame: { -+ dctx->expected = 0; -+ dctx->stage = ZSTDds_getFrameHeaderSize; -+ return 0; -+ } -+ default: -+ return ERROR(GENERIC); /* impossible */ -+ } -+} -+ -+static size_t ZSTD_refDictContent(ZSTD_DCtx *dctx, const void *dict, size_t dictSize) -+{ -+ dctx->dictEnd = dctx->previousDstEnd; -+ dctx->vBase = (const char *)dict - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base)); -+ dctx->base = dict; -+ dctx->previousDstEnd = (const char *)dict + dictSize; -+ return 0; -+} -+ -+/* ZSTD_loadEntropy() : -+ * dict : must point at beginning of a valid zstd dictionary -+ * @return : size of entropy tables read */ -+static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t *entropy, const void *const dict, size_t const dictSize) -+{ -+ const BYTE *dictPtr = (const BYTE *)dict; -+ const BYTE *const dictEnd = dictPtr + dictSize; -+ -+ if (dictSize <= 8) -+ return ERROR(dictionary_corrupted); -+ dictPtr += 8; /* skip header = magic + dictID */ -+ -+ { -+ size_t const hSize = HUF_readDTableX4_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace)); -+ if (HUF_isError(hSize)) -+ return ERROR(dictionary_corrupted); -+ dictPtr += hSize; -+ } -+ -+ { -+ short offcodeNCount[MaxOff + 1]; -+ U32 offcodeMaxValue = MaxOff, offcodeLog; -+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr); -+ if (FSE_isError(offcodeHeaderSize)) -+ return ERROR(dictionary_corrupted); -+ if (offcodeLog > OffFSELog) -+ return ERROR(dictionary_corrupted); -+ CHECK_E(FSE_buildDTable_wksp(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted); -+ dictPtr += offcodeHeaderSize; -+ } -+ -+ { -+ short matchlengthNCount[MaxML + 1]; -+ unsigned matchlengthMaxValue = MaxML, matchlengthLog; -+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr); -+ if (FSE_isError(matchlengthHeaderSize)) -+ return ERROR(dictionary_corrupted); -+ if (matchlengthLog > MLFSELog) -+ return ERROR(dictionary_corrupted); -+ CHECK_E(FSE_buildDTable_wksp(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted); -+ dictPtr += matchlengthHeaderSize; -+ } -+ -+ { -+ short litlengthNCount[MaxLL + 1]; -+ unsigned litlengthMaxValue = MaxLL, litlengthLog; -+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr); -+ if (FSE_isError(litlengthHeaderSize)) -+ return ERROR(dictionary_corrupted); -+ if (litlengthLog > LLFSELog) -+ return ERROR(dictionary_corrupted); -+ CHECK_E(FSE_buildDTable_wksp(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted); -+ dictPtr += litlengthHeaderSize; -+ } -+ -+ if (dictPtr + 12 > dictEnd) -+ return ERROR(dictionary_corrupted); -+ { -+ int i; -+ size_t const dictContentSize = (size_t)(dictEnd - (dictPtr + 12)); -+ for (i = 0; i < 3; i++) { -+ U32 const rep = ZSTD_readLE32(dictPtr); -+ dictPtr += 4; -+ if (rep == 0 || rep >= dictContentSize) -+ return ERROR(dictionary_corrupted); -+ entropy->rep[i] = rep; -+ } -+ } -+ -+ return dictPtr - (const BYTE *)dict; -+} -+ -+static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx *dctx, const void *dict, size_t dictSize) -+{ -+ if (dictSize < 8) -+ return ZSTD_refDictContent(dctx, dict, dictSize); -+ { -+ U32 const magic = ZSTD_readLE32(dict); -+ if (magic != ZSTD_DICT_MAGIC) { -+ return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ -+ } -+ } -+ dctx->dictID = ZSTD_readLE32((const char *)dict + 4); -+ -+ /* load entropy tables */ -+ { -+ size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); -+ if (ZSTD_isError(eSize)) -+ return ERROR(dictionary_corrupted); -+ dict = (const char *)dict + eSize; -+ dictSize -= eSize; -+ } -+ dctx->litEntropy = dctx->fseEntropy = 1; -+ -+ /* reference dictionary content */ -+ return ZSTD_refDictContent(dctx, dict, dictSize); -+} -+ -+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, size_t dictSize) -+{ -+ CHECK_F(ZSTD_decompressBegin(dctx)); -+ if (dict && dictSize) -+ CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted); -+ return 0; -+} -+ -+/* ====== ZSTD_DDict ====== */ -+ -+struct ZSTD_DDict_s { -+ void *dictBuffer; -+ const void *dictContent; -+ size_t dictSize; -+ ZSTD_entropyTables_t entropy; -+ U32 dictID; -+ U32 entropyPresent; -+ ZSTD_customMem cMem; -+}; /* typedef'd to ZSTD_DDict within "zstd.h" */ -+ -+size_t ZSTD_DDictWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); } -+ -+static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict) { return ddict->dictContent; } -+ -+static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict) { return ddict->dictSize; } -+ -+static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict) -+{ -+ ZSTD_decompressBegin(dstDCtx); /* init */ -+ if (ddict) { /* support refDDict on NULL */ -+ dstDCtx->dictID = ddict->dictID; -+ dstDCtx->base = ddict->dictContent; -+ dstDCtx->vBase = ddict->dictContent; -+ dstDCtx->dictEnd = (const BYTE *)ddict->dictContent + ddict->dictSize; -+ dstDCtx->previousDstEnd = dstDCtx->dictEnd; -+ if (ddict->entropyPresent) { -+ dstDCtx->litEntropy = 1; -+ dstDCtx->fseEntropy = 1; -+ dstDCtx->LLTptr = ddict->entropy.LLTable; -+ dstDCtx->MLTptr = ddict->entropy.MLTable; -+ dstDCtx->OFTptr = ddict->entropy.OFTable; -+ dstDCtx->HUFptr = ddict->entropy.hufTable; -+ dstDCtx->entropy.rep[0] = ddict->entropy.rep[0]; -+ dstDCtx->entropy.rep[1] = ddict->entropy.rep[1]; -+ dstDCtx->entropy.rep[2] = ddict->entropy.rep[2]; -+ } else { -+ dstDCtx->litEntropy = 0; -+ dstDCtx->fseEntropy = 0; -+ } -+ } -+} -+ -+static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict *ddict) -+{ -+ ddict->dictID = 0; -+ ddict->entropyPresent = 0; -+ if (ddict->dictSize < 8) -+ return 0; -+ { -+ U32 const magic = ZSTD_readLE32(ddict->dictContent); -+ if (magic != ZSTD_DICT_MAGIC) -+ return 0; /* pure content mode */ -+ } -+ ddict->dictID = ZSTD_readLE32((const char *)ddict->dictContent + 4); -+ -+ /* load entropy tables */ -+ CHECK_E(ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted); -+ ddict->entropyPresent = 1; -+ return 0; -+} -+ -+static ZSTD_DDict *ZSTD_createDDict_advanced(const void *dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem) -+{ -+ if (!customMem.customAlloc || !customMem.customFree) -+ return NULL; -+ -+ { -+ ZSTD_DDict *const ddict = (ZSTD_DDict *)ZSTD_malloc(sizeof(ZSTD_DDict), customMem); -+ if (!ddict) -+ return NULL; -+ ddict->cMem = customMem; -+ -+ if ((byReference) || (!dict) || (!dictSize)) { -+ ddict->dictBuffer = NULL; -+ ddict->dictContent = dict; -+ } else { -+ void *const internalBuffer = ZSTD_malloc(dictSize, customMem); -+ if (!internalBuffer) { -+ ZSTD_freeDDict(ddict); -+ return NULL; -+ } -+ memcpy(internalBuffer, dict, dictSize); -+ ddict->dictBuffer = internalBuffer; -+ ddict->dictContent = internalBuffer; -+ } -+ ddict->dictSize = dictSize; -+ ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ -+ /* parse dictionary content */ -+ { -+ size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict); -+ if (ZSTD_isError(errorCode)) { -+ ZSTD_freeDDict(ddict); -+ return NULL; -+ } -+ } -+ -+ return ddict; -+ } -+} -+ -+/*! ZSTD_initDDict() : -+* Create a digested dictionary, to start decompression without startup delay. -+* `dict` content is copied inside DDict. -+* Consequently, `dict` can be released after `ZSTD_DDict` creation */ -+ZSTD_DDict *ZSTD_initDDict(const void *dict, size_t dictSize, void *workspace, size_t workspaceSize) -+{ -+ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); -+ return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem); -+} -+ -+size_t ZSTD_freeDDict(ZSTD_DDict *ddict) -+{ -+ if (ddict == NULL) -+ return 0; /* support free on NULL */ -+ { -+ ZSTD_customMem const cMem = ddict->cMem; -+ ZSTD_free(ddict->dictBuffer, cMem); -+ ZSTD_free(ddict, cMem); -+ return 0; -+ } -+} -+ -+/*! ZSTD_getDictID_fromDict() : -+ * Provides the dictID stored within dictionary. -+ * if @return == 0, the dictionary is not conformant with Zstandard specification. -+ * It can still be loaded, but as a content-only dictionary. */ -+unsigned ZSTD_getDictID_fromDict(const void *dict, size_t dictSize) -+{ -+ if (dictSize < 8) -+ return 0; -+ if (ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) -+ return 0; -+ return ZSTD_readLE32((const char *)dict + 4); -+} -+ -+/*! ZSTD_getDictID_fromDDict() : -+ * Provides the dictID of the dictionary loaded into `ddict`. -+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. -+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ -+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict) -+{ -+ if (ddict == NULL) -+ return 0; -+ return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); -+} -+ -+/*! ZSTD_getDictID_fromFrame() : -+ * Provides the dictID required to decompressed the frame stored within `src`. -+ * If @return == 0, the dictID could not be decoded. -+ * This could for one of the following reasons : -+ * - The frame does not require a dictionary to be decoded (most common case). -+ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. -+ * Note : this use case also happens when using a non-conformant dictionary. -+ * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). -+ * - This is not a Zstandard frame. -+ * When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */ -+unsigned ZSTD_getDictID_fromFrame(const void *src, size_t srcSize) -+{ -+ ZSTD_frameParams zfp = {0, 0, 0, 0}; -+ size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize); -+ if (ZSTD_isError(hError)) -+ return 0; -+ return zfp.dictID; -+} -+ -+/*! ZSTD_decompress_usingDDict() : -+* Decompression using a pre-digested Dictionary -+* Use dictionary without significant overhead. */ -+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict) -+{ -+ /* pass content and size in case legacy frames are encountered */ -+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict); -+} -+ -+/*===================================== -+* Streaming decompression -+*====================================*/ -+ -+typedef enum { zdss_init, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; -+ -+/* *** Resource management *** */ -+struct ZSTD_DStream_s { -+ ZSTD_DCtx *dctx; -+ ZSTD_DDict *ddictLocal; -+ const ZSTD_DDict *ddict; -+ ZSTD_frameParams fParams; -+ ZSTD_dStreamStage stage; -+ char *inBuff; -+ size_t inBuffSize; -+ size_t inPos; -+ size_t maxWindowSize; -+ char *outBuff; -+ size_t outBuffSize; -+ size_t outStart; -+ size_t outEnd; -+ size_t blockSize; -+ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */ -+ size_t lhSize; -+ ZSTD_customMem customMem; -+ void *legacyContext; -+ U32 previousLegacyVersion; -+ U32 legacyVersion; -+ U32 hostageByte; -+}; /* typedef'd to ZSTD_DStream within "zstd.h" */ -+ -+size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize) -+{ -+ size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); -+ size_t const inBuffSize = blockSize; -+ size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; -+ return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize); -+} -+ -+static ZSTD_DStream *ZSTD_createDStream_advanced(ZSTD_customMem customMem) -+{ -+ ZSTD_DStream *zds; -+ -+ if (!customMem.customAlloc || !customMem.customFree) -+ return NULL; -+ -+ zds = (ZSTD_DStream *)ZSTD_malloc(sizeof(ZSTD_DStream), customMem); -+ if (zds == NULL) -+ return NULL; -+ memset(zds, 0, sizeof(ZSTD_DStream)); -+ memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem)); -+ zds->dctx = ZSTD_createDCtx_advanced(customMem); -+ if (zds->dctx == NULL) { -+ ZSTD_freeDStream(zds); -+ return NULL; -+ } -+ zds->stage = zdss_init; -+ zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; -+ return zds; -+} -+ -+ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, size_t workspaceSize) -+{ -+ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); -+ ZSTD_DStream *zds = ZSTD_createDStream_advanced(stackMem); -+ if (!zds) { -+ return NULL; -+ } -+ -+ zds->maxWindowSize = maxWindowSize; -+ zds->stage = zdss_loadHeader; -+ zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; -+ ZSTD_freeDDict(zds->ddictLocal); -+ zds->ddictLocal = NULL; -+ zds->ddict = zds->ddictLocal; -+ zds->legacyVersion = 0; -+ zds->hostageByte = 0; -+ -+ { -+ size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); -+ size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; -+ -+ zds->inBuff = (char *)ZSTD_malloc(blockSize, zds->customMem); -+ zds->inBuffSize = blockSize; -+ zds->outBuff = (char *)ZSTD_malloc(neededOutSize, zds->customMem); -+ zds->outBuffSize = neededOutSize; -+ if (zds->inBuff == NULL || zds->outBuff == NULL) { -+ ZSTD_freeDStream(zds); -+ return NULL; -+ } -+ } -+ return zds; -+} -+ -+ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize) -+{ -+ ZSTD_DStream *zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize); -+ if (zds) { -+ zds->ddict = ddict; -+ } -+ return zds; -+} -+ -+size_t ZSTD_freeDStream(ZSTD_DStream *zds) -+{ -+ if (zds == NULL) -+ return 0; /* support free on null */ -+ { -+ ZSTD_customMem const cMem = zds->customMem; -+ ZSTD_freeDCtx(zds->dctx); -+ zds->dctx = NULL; -+ ZSTD_freeDDict(zds->ddictLocal); -+ zds->ddictLocal = NULL; -+ ZSTD_free(zds->inBuff, cMem); -+ zds->inBuff = NULL; -+ ZSTD_free(zds->outBuff, cMem); -+ zds->outBuff = NULL; -+ ZSTD_free(zds, cMem); -+ return 0; -+ } -+} -+ -+/* *** Initialization *** */ -+ -+size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; } -+size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } -+ -+size_t ZSTD_resetDStream(ZSTD_DStream *zds) -+{ -+ zds->stage = zdss_loadHeader; -+ zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; -+ zds->legacyVersion = 0; -+ zds->hostageByte = 0; -+ return ZSTD_frameHeaderSize_prefix; -+} -+ -+/* ***** Decompression ***** */ -+ -+ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -+{ -+ size_t const length = MIN(dstCapacity, srcSize); -+ memcpy(dst, src, length); -+ return length; -+} -+ -+size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input) -+{ -+ const char *const istart = (const char *)(input->src) + input->pos; -+ const char *const iend = (const char *)(input->src) + input->size; -+ const char *ip = istart; -+ char *const ostart = (char *)(output->dst) + output->pos; -+ char *const oend = (char *)(output->dst) + output->size; -+ char *op = ostart; -+ U32 someMoreWork = 1; -+ -+ while (someMoreWork) { -+ switch (zds->stage) { -+ case zdss_init: -+ ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */ -+ /* fall-through */ -+ -+ case zdss_loadHeader: { -+ size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); -+ if (ZSTD_isError(hSize)) -+ return hSize; -+ if (hSize != 0) { /* need more input */ -+ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ -+ if (toLoad > (size_t)(iend - ip)) { /* not enough input to load full header */ -+ memcpy(zds->headerBuffer + zds->lhSize, ip, iend - ip); -+ zds->lhSize += iend - ip; -+ input->pos = input->size; -+ return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + -+ ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ -+ } -+ memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); -+ zds->lhSize = hSize; -+ ip += toLoad; -+ break; -+ } -+ -+ /* check for single-pass mode opportunity */ -+ if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */ -+ && (U64)(size_t)(oend - op) >= zds->fParams.frameContentSize) { -+ size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend - istart); -+ if (cSize <= (size_t)(iend - istart)) { -+ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend - op, istart, cSize, zds->ddict); -+ if (ZSTD_isError(decompressedSize)) -+ return decompressedSize; -+ ip = istart + cSize; -+ op += decompressedSize; -+ zds->dctx->expected = 0; -+ zds->stage = zdss_init; -+ someMoreWork = 0; -+ break; -+ } -+ } -+ -+ /* Consume header */ -+ ZSTD_refDDict(zds->dctx, zds->ddict); -+ { -+ size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */ -+ CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size)); -+ { -+ size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); -+ CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer + h1Size, h2Size)); -+ } -+ } -+ -+ zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); -+ if (zds->fParams.windowSize > zds->maxWindowSize) -+ return ERROR(frameParameter_windowTooLarge); -+ -+ /* Buffers are preallocated, but double check */ -+ { -+ size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); -+ size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; -+ if (zds->inBuffSize < blockSize) { -+ return ERROR(GENERIC); -+ } -+ if (zds->outBuffSize < neededOutSize) { -+ return ERROR(GENERIC); -+ } -+ zds->blockSize = blockSize; -+ } -+ zds->stage = zdss_read; -+ } -+ /* pass-through */ -+ -+ case zdss_read: { -+ size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); -+ if (neededInSize == 0) { /* end of frame */ -+ zds->stage = zdss_init; -+ someMoreWork = 0; -+ break; -+ } -+ if ((size_t)(iend - ip) >= neededInSize) { /* decode directly from src */ -+ const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); -+ size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, -+ (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize); -+ if (ZSTD_isError(decodedSize)) -+ return decodedSize; -+ ip += neededInSize; -+ if (!decodedSize && !isSkipFrame) -+ break; /* this was just a header */ -+ zds->outEnd = zds->outStart + decodedSize; -+ zds->stage = zdss_flush; -+ break; -+ } -+ if (ip == iend) { -+ someMoreWork = 0; -+ break; -+ } /* no more input */ -+ zds->stage = zdss_load; -+ /* pass-through */ -+ } -+ -+ case zdss_load: { -+ size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); -+ size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */ -+ size_t loadedSize; -+ if (toLoad > zds->inBuffSize - zds->inPos) -+ return ERROR(corruption_detected); /* should never happen */ -+ loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend - ip); -+ ip += loadedSize; -+ zds->inPos += loadedSize; -+ if (loadedSize < toLoad) { -+ someMoreWork = 0; -+ break; -+ } /* not enough input, wait for more */ -+ -+ /* decode loaded input */ -+ { -+ const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); -+ size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart, -+ zds->inBuff, neededInSize); -+ if (ZSTD_isError(decodedSize)) -+ return decodedSize; -+ zds->inPos = 0; /* input is consumed */ -+ if (!decodedSize && !isSkipFrame) { -+ zds->stage = zdss_read; -+ break; -+ } /* this was just a header */ -+ zds->outEnd = zds->outStart + decodedSize; -+ zds->stage = zdss_flush; -+ /* pass-through */ -+ } -+ } -+ -+ case zdss_flush: { -+ size_t const toFlushSize = zds->outEnd - zds->outStart; -+ size_t const flushedSize = ZSTD_limitCopy(op, oend - op, zds->outBuff + zds->outStart, toFlushSize); -+ op += flushedSize; -+ zds->outStart += flushedSize; -+ if (flushedSize == toFlushSize) { /* flush completed */ -+ zds->stage = zdss_read; -+ if (zds->outStart + zds->blockSize > zds->outBuffSize) -+ zds->outStart = zds->outEnd = 0; -+ break; -+ } -+ /* cannot complete flush */ -+ someMoreWork = 0; -+ break; -+ } -+ default: -+ return ERROR(GENERIC); /* impossible */ -+ } -+ } -+ -+ /* result */ -+ input->pos += (size_t)(ip - istart); -+ output->pos += (size_t)(op - ostart); -+ { -+ size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx); -+ if (!nextSrcSizeHint) { /* frame fully decoded */ -+ if (zds->outEnd == zds->outStart) { /* output fully flushed */ -+ if (zds->hostageByte) { -+ if (input->pos >= input->size) { -+ zds->stage = zdss_read; -+ return 1; -+ } /* can't release hostage (not present) */ -+ input->pos++; /* release hostage */ -+ } -+ return 0; -+ } -+ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ -+ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ -+ zds->hostageByte = 1; -+ } -+ return 1; -+ } -+ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */ -+ if (zds->inPos > nextSrcSizeHint) -+ return ERROR(GENERIC); /* should never happen */ -+ nextSrcSizeHint -= zds->inPos; /* already loaded*/ -+ return nextSrcSizeHint; -+ } -+} -+ -+EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound); -+EXPORT_SYMBOL(ZSTD_initDCtx); -+EXPORT_SYMBOL(ZSTD_decompressDCtx); -+EXPORT_SYMBOL(ZSTD_decompress_usingDict); -+ -+EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound); -+EXPORT_SYMBOL(ZSTD_initDDict); -+EXPORT_SYMBOL(ZSTD_decompress_usingDDict); -+ -+EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound); -+EXPORT_SYMBOL(ZSTD_initDStream); -+EXPORT_SYMBOL(ZSTD_initDStream_usingDDict); -+EXPORT_SYMBOL(ZSTD_resetDStream); -+EXPORT_SYMBOL(ZSTD_decompressStream); -+EXPORT_SYMBOL(ZSTD_DStreamInSize); -+EXPORT_SYMBOL(ZSTD_DStreamOutSize); -+ -+EXPORT_SYMBOL(ZSTD_findFrameCompressedSize); -+EXPORT_SYMBOL(ZSTD_getFrameContentSize); -+EXPORT_SYMBOL(ZSTD_findDecompressedSize); -+ -+EXPORT_SYMBOL(ZSTD_isFrame); -+EXPORT_SYMBOL(ZSTD_getDictID_fromDict); -+EXPORT_SYMBOL(ZSTD_getDictID_fromDDict); -+EXPORT_SYMBOL(ZSTD_getDictID_fromFrame); -+ -+EXPORT_SYMBOL(ZSTD_getFrameParams); -+EXPORT_SYMBOL(ZSTD_decompressBegin); -+EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict); -+EXPORT_SYMBOL(ZSTD_copyDCtx); -+EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress); -+EXPORT_SYMBOL(ZSTD_decompressContinue); -+EXPORT_SYMBOL(ZSTD_nextInputType); -+ -+EXPORT_SYMBOL(ZSTD_decompressBlock); -+EXPORT_SYMBOL(ZSTD_insertBlock); -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_DESCRIPTION("Zstd Decompressor"); -diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c -new file mode 100644 -index 0000000..2b0a643 ---- /dev/null -+++ b/lib/zstd/entropy_common.c -@@ -0,0 +1,243 @@ -+/* -+ * Common functions of New Generation Entropy library -+ * Copyright (C) 2016, Yann Collet. -+ * -+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are -+ * met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above -+ * copyright notice, this list of conditions and the following disclaimer -+ * in the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ * -+ * You can contact the author at : -+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -+ */ -+ -+/* ************************************* -+* Dependencies -+***************************************/ -+#include "error_private.h" /* ERR_*, ERROR */ -+#include "fse.h" -+#include "huf.h" -+#include "mem.h" -+ -+/*=== Version ===*/ -+unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } -+ -+/*=== Error Management ===*/ -+unsigned FSE_isError(size_t code) { return ERR_isError(code); } -+ -+unsigned HUF_isError(size_t code) { return ERR_isError(code); } -+ -+/*-************************************************************** -+* FSE NCount encoding-decoding -+****************************************************************/ -+size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize) -+{ -+ const BYTE *const istart = (const BYTE *)headerBuffer; -+ const BYTE *const iend = istart + hbSize; -+ const BYTE *ip = istart; -+ int nbBits; -+ int remaining; -+ int threshold; -+ U32 bitStream; -+ int bitCount; -+ unsigned charnum = 0; -+ int previous0 = 0; -+ -+ if (hbSize < 4) -+ return ERROR(srcSize_wrong); -+ bitStream = ZSTD_readLE32(ip); -+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ -+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) -+ return ERROR(tableLog_tooLarge); -+ bitStream >>= 4; -+ bitCount = 4; -+ *tableLogPtr = nbBits; -+ remaining = (1 << nbBits) + 1; -+ threshold = 1 << nbBits; -+ nbBits++; -+ -+ while ((remaining > 1) & (charnum <= *maxSVPtr)) { -+ if (previous0) { -+ unsigned n0 = charnum; -+ while ((bitStream & 0xFFFF) == 0xFFFF) { -+ n0 += 24; -+ if (ip < iend - 5) { -+ ip += 2; -+ bitStream = ZSTD_readLE32(ip) >> bitCount; -+ } else { -+ bitStream >>= 16; -+ bitCount += 16; -+ } -+ } -+ while ((bitStream & 3) == 3) { -+ n0 += 3; -+ bitStream >>= 2; -+ bitCount += 2; -+ } -+ n0 += bitStream & 3; -+ bitCount += 2; -+ if (n0 > *maxSVPtr) -+ return ERROR(maxSymbolValue_tooSmall); -+ while (charnum < n0) -+ normalizedCounter[charnum++] = 0; -+ if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) { -+ ip += bitCount >> 3; -+ bitCount &= 7; -+ bitStream = ZSTD_readLE32(ip) >> bitCount; -+ } else { -+ bitStream >>= 2; -+ } -+ } -+ { -+ int const max = (2 * threshold - 1) - remaining; -+ int count; -+ -+ if ((bitStream & (threshold - 1)) < (U32)max) { -+ count = bitStream & (threshold - 1); -+ bitCount += nbBits - 1; -+ } else { -+ count = bitStream & (2 * threshold - 1); -+ if (count >= threshold) -+ count -= max; -+ bitCount += nbBits; -+ } -+ -+ count--; /* extra accuracy */ -+ remaining -= count < 0 ? -count : count; /* -1 means +1 */ -+ normalizedCounter[charnum++] = (short)count; -+ previous0 = !count; -+ while (remaining < threshold) { -+ nbBits--; -+ threshold >>= 1; -+ } -+ -+ if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) { -+ ip += bitCount >> 3; -+ bitCount &= 7; -+ } else { -+ bitCount -= (int)(8 * (iend - 4 - ip)); -+ ip = iend - 4; -+ } -+ bitStream = ZSTD_readLE32(ip) >> (bitCount & 31); -+ } -+ } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */ -+ if (remaining != 1) -+ return ERROR(corruption_detected); -+ if (bitCount > 32) -+ return ERROR(corruption_detected); -+ *maxSVPtr = charnum - 1; -+ -+ ip += (bitCount + 7) >> 3; -+ return ip - istart; -+} -+ -+/*! HUF_readStats() : -+ Read compact Huffman tree, saved by HUF_writeCTable(). -+ `huffWeight` is destination buffer. -+ `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. -+ @return : size read from `src` , or an error Code . -+ Note : Needed by HUF_readCTable() and HUF_readDTableX?() . -+*/ -+size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) -+{ -+ U32 weightTotal; -+ const BYTE *ip = (const BYTE *)src; -+ size_t iSize; -+ size_t oSize; -+ -+ if (!srcSize) -+ return ERROR(srcSize_wrong); -+ iSize = ip[0]; -+ /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */ -+ -+ if (iSize >= 128) { /* special header */ -+ oSize = iSize - 127; -+ iSize = ((oSize + 1) / 2); -+ if (iSize + 1 > srcSize) -+ return ERROR(srcSize_wrong); -+ if (oSize >= hwSize) -+ return ERROR(corruption_detected); -+ ip += 1; -+ { -+ U32 n; -+ for (n = 0; n < oSize; n += 2) { -+ huffWeight[n] = ip[n / 2] >> 4; -+ huffWeight[n + 1] = ip[n / 2] & 15; -+ } -+ } -+ } else { /* header compressed with FSE (normal case) */ -+ if (iSize + 1 > srcSize) -+ return ERROR(srcSize_wrong); -+ oSize = FSE_decompress_wksp(huffWeight, hwSize - 1, ip + 1, iSize, 6, workspace, workspaceSize); /* max (hwSize-1) values decoded, as last one is implied */ -+ if (FSE_isError(oSize)) -+ return oSize; -+ } -+ -+ /* collect weight stats */ -+ memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); -+ weightTotal = 0; -+ { -+ U32 n; -+ for (n = 0; n < oSize; n++) { -+ if (huffWeight[n] >= HUF_TABLELOG_MAX) -+ return ERROR(corruption_detected); -+ rankStats[huffWeight[n]]++; -+ weightTotal += (1 << huffWeight[n]) >> 1; -+ } -+ } -+ if (weightTotal == 0) -+ return ERROR(corruption_detected); -+ -+ /* get last non-null symbol weight (implied, total must be 2^n) */ -+ { -+ U32 const tableLog = BIT_highbit32(weightTotal) + 1; -+ if (tableLog > HUF_TABLELOG_MAX) -+ return ERROR(corruption_detected); -+ *tableLogPtr = tableLog; -+ /* determine last weight */ -+ { -+ U32 const total = 1 << tableLog; -+ U32 const rest = total - weightTotal; -+ U32 const verif = 1 << BIT_highbit32(rest); -+ U32 const lastWeight = BIT_highbit32(rest) + 1; -+ if (verif != rest) -+ return ERROR(corruption_detected); /* last value must be a clean power of 2 */ -+ huffWeight[oSize] = (BYTE)lastWeight; -+ rankStats[lastWeight]++; -+ } -+ } -+ -+ /* check tree construction validity */ -+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) -+ return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ -+ -+ /* results */ -+ *nbSymbolsPtr = (U32)(oSize + 1); -+ return iSize + 1; -+} -diff --git a/lib/zstd/error_private.h b/lib/zstd/error_private.h -new file mode 100644 -index 0000000..2062ff0 ---- /dev/null -+++ b/lib/zstd/error_private.h -@@ -0,0 +1,51 @@ -+/** -+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. -+ * All rights reserved. -+ * -+ * This source code is licensed under the BSD-style license found in the -+ * LICENSE file in the root directory of https://github.com/facebook/zstd. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ */ -+ -+/* Note : this module is expected to remain private, do not expose it */ -+ -+#ifndef ERROR_H_MODULE -+#define ERROR_H_MODULE -+ -+/* **************************************** -+* Dependencies -+******************************************/ -+#include /* size_t */ -+#include /* enum list */ -+ -+/* **************************************** -+* Compiler-specific -+******************************************/ -+#define ERR_STATIC static __attribute__((unused)) -+ -+/*-**************************************** -+* Customization (error_public.h) -+******************************************/ -+typedef ZSTD_ErrorCode ERR_enum; -+#define PREFIX(name) ZSTD_error_##name -+ -+/*-**************************************** -+* Error codes handling -+******************************************/ -+#define ERROR(name) ((size_t)-PREFIX(name)) -+ -+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } -+ -+ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) -+{ -+ if (!ERR_isError(code)) -+ return (ERR_enum)0; -+ return (ERR_enum)(0 - code); -+} -+ -+#endif /* ERROR_H_MODULE */ -diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h -new file mode 100644 -index 0000000..7460ab0 ---- /dev/null -+++ b/lib/zstd/fse.h -@@ -0,0 +1,575 @@ -+/* -+ * FSE : Finite State Entropy codec -+ * Public Prototypes declaration -+ * Copyright (C) 2013-2016, Yann Collet. -+ * -+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are -+ * met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above -+ * copyright notice, this list of conditions and the following disclaimer -+ * in the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ * -+ * You can contact the author at : -+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -+ */ -+#ifndef FSE_H -+#define FSE_H -+ -+/*-***************************************** -+* Dependencies -+******************************************/ -+#include /* size_t, ptrdiff_t */ -+ -+/*-***************************************** -+* FSE_PUBLIC_API : control library symbols visibility -+******************************************/ -+#define FSE_PUBLIC_API -+ -+/*------ Version ------*/ -+#define FSE_VERSION_MAJOR 0 -+#define FSE_VERSION_MINOR 9 -+#define FSE_VERSION_RELEASE 0 -+ -+#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE -+#define FSE_QUOTE(str) #str -+#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) -+#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) -+ -+#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR * 100 * 100 + FSE_VERSION_MINOR * 100 + FSE_VERSION_RELEASE) -+FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ -+ -+/*-***************************************** -+* Tool functions -+******************************************/ -+FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ -+ -+/* Error Management */ -+FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ -+ -+/*-***************************************** -+* FSE detailed API -+******************************************/ -+/*! -+FSE_compress() does the following: -+1. count symbol occurrence from source[] into table count[] -+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) -+3. save normalized counters to memory buffer using writeNCount() -+4. build encoding table 'CTable' from normalized counters -+5. encode the data stream using encoding table 'CTable' -+ -+FSE_decompress() does the following: -+1. read normalized counters with readNCount() -+2. build decoding table 'DTable' from normalized counters -+3. decode the data stream using decoding table 'DTable' -+ -+The following API allows targeting specific sub-functions for advanced tasks. -+For example, it's possible to compress several blocks using the same 'CTable', -+or to save and provide normalized distribution using external method. -+*/ -+ -+/* *** COMPRESSION *** */ -+/*! FSE_optimalTableLog(): -+ dynamically downsize 'tableLog' when conditions are met. -+ It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. -+ @return : recommended tableLog (necessarily <= 'maxTableLog') */ -+FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); -+ -+/*! FSE_normalizeCount(): -+ normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) -+ 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). -+ @return : tableLog, -+ or an errorCode, which can be tested using FSE_isError() */ -+FSE_PUBLIC_API size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue); -+ -+/*! FSE_NCountWriteBound(): -+ Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. -+ Typically useful for allocation purpose. */ -+FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); -+ -+/*! FSE_writeNCount(): -+ Compactly save 'normalizedCounter' into 'buffer'. -+ @return : size of the compressed table, -+ or an errorCode, which can be tested using FSE_isError(). */ -+FSE_PUBLIC_API size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); -+ -+/*! Constructor and Destructor of FSE_CTable. -+ Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ -+typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ -+ -+/*! FSE_compress_usingCTable(): -+ Compress `src` using `ct` into `dst` which must be already allocated. -+ @return : size of compressed data (<= `dstCapacity`), -+ or 0 if compressed data could not fit into `dst`, -+ or an errorCode, which can be tested using FSE_isError() */ -+FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct); -+ -+/*! -+Tutorial : -+---------- -+The first step is to count all symbols. FSE_count() does this job very fast. -+Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. -+'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] -+maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) -+FSE_count() will return the number of occurrence of the most frequent symbol. -+This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. -+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). -+ -+The next step is to normalize the frequencies. -+FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. -+It also guarantees a minimum of 1 to any Symbol with frequency >= 1. -+You can use 'tableLog'==0 to mean "use default tableLog value". -+If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), -+which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). -+ -+The result of FSE_normalizeCount() will be saved into a table, -+called 'normalizedCounter', which is a table of signed short. -+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. -+The return value is tableLog if everything proceeded as expected. -+It is 0 if there is a single symbol within distribution. -+If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). -+ -+'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). -+'buffer' must be already allocated. -+For guaranteed success, buffer size must be at least FSE_headerBound(). -+The result of the function is the number of bytes written into 'buffer'. -+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). -+ -+'normalizedCounter' can then be used to create the compression table 'CTable'. -+The space required by 'CTable' must be already allocated, using FSE_createCTable(). -+You can then use FSE_buildCTable() to fill 'CTable'. -+If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). -+ -+'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). -+Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' -+The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. -+If it returns '0', compressed data could not fit into 'dst'. -+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). -+*/ -+ -+/* *** DECOMPRESSION *** */ -+ -+/*! FSE_readNCount(): -+ Read compactly saved 'normalizedCounter' from 'rBuffer'. -+ @return : size read from 'rBuffer', -+ or an errorCode, which can be tested using FSE_isError(). -+ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ -+FSE_PUBLIC_API size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize); -+ -+/*! Constructor and Destructor of FSE_DTable. -+ Note that its size depends on 'tableLog' */ -+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ -+ -+/*! FSE_buildDTable(): -+ Builds 'dt', which must be already allocated, using FSE_createDTable(). -+ return : 0, or an errorCode, which can be tested using FSE_isError() */ -+FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize); -+ -+/*! FSE_decompress_usingDTable(): -+ Decompress compressed source `cSrc` of size `cSrcSize` using `dt` -+ into `dst` which must be already allocated. -+ @return : size of regenerated data (necessarily <= `dstCapacity`), -+ or an errorCode, which can be tested using FSE_isError() */ -+FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt); -+ -+/*! -+Tutorial : -+---------- -+(Note : these functions only decompress FSE-compressed blocks. -+ If block is uncompressed, use memcpy() instead -+ If block is a single repeated byte, use memset() instead ) -+ -+The first step is to obtain the normalized frequencies of symbols. -+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). -+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. -+In practice, that means it's necessary to know 'maxSymbolValue' beforehand, -+or size the table to handle worst case situations (typically 256). -+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. -+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. -+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. -+If there is an error, the function will return an error code, which can be tested using FSE_isError(). -+ -+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. -+This is performed by the function FSE_buildDTable(). -+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). -+If there is an error, the function will return an error code, which can be tested using FSE_isError(). -+ -+`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). -+`cSrcSize` must be strictly correct, otherwise decompression will fail. -+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). -+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) -+*/ -+ -+/* *** Dependency *** */ -+#include "bitstream.h" -+ -+/* ***************************************** -+* Static allocation -+*******************************************/ -+/* FSE buffer bounds */ -+#define FSE_NCOUNTBOUND 512 -+#define FSE_BLOCKBOUND(size) (size + (size >> 7)) -+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ -+ -+/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ -+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1 << (maxTableLog - 1)) + ((maxSymbolValue + 1) * 2)) -+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1 << maxTableLog)) -+ -+/* ***************************************** -+* FSE advanced API -+*******************************************/ -+/* FSE_count_wksp() : -+ * Same as FSE_count(), but using an externally provided scratch buffer. -+ * `workSpace` size must be table of >= `1024` unsigned -+ */ -+size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace); -+ -+/* FSE_countFast_wksp() : -+ * Same as FSE_countFast(), but using an externally provided scratch buffer. -+ * `workSpace` must be a table of minimum `1024` unsigned -+ */ -+size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *workSpace); -+ -+/*! FSE_count_simple -+ * Same as FSE_countFast(), but does not use any additional memory (not even on stack). -+ * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`). -+*/ -+size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize); -+ -+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); -+/**< same as FSE_optimalTableLog(), which used `minus==2` */ -+ -+size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits); -+/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ -+ -+size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue); -+/**< build a fake FSE_CTable, designed to compress always the same symbolValue */ -+ -+/* FSE_buildCTable_wksp() : -+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). -+ * `wkspSize` must be >= `(1<= BIT_DStream_completed -+ -+When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. -+Checking if DStream has reached its end is performed by : -+ BIT_endOfDStream(&DStream); -+Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. -+ FSE_endOfDState(&DState); -+*/ -+ -+/* ***************************************** -+* FSE unsafe API -+*******************************************/ -+static unsigned char FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD); -+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ -+ -+/* ***************************************** -+* Implementation of inlined functions -+*******************************************/ -+typedef struct { -+ int deltaFindState; -+ U32 deltaNbBits; -+} FSE_symbolCompressionTransform; /* total 8 bytes */ -+ -+ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct) -+{ -+ const void *ptr = ct; -+ const U16 *u16ptr = (const U16 *)ptr; -+ const U32 tableLog = ZSTD_read16(ptr); -+ statePtr->value = (ptrdiff_t)1 << tableLog; -+ statePtr->stateTable = u16ptr + 2; -+ statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1)); -+ statePtr->stateLog = tableLog; -+} -+ -+/*! FSE_initCState2() : -+* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) -+* uses the smallest state value possible, saving the cost of this symbol */ -+ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol) -+{ -+ FSE_initCState(statePtr, ct); -+ { -+ const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol]; -+ const U16 *stateTable = (const U16 *)(statePtr->stateTable); -+ U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1 << 15)) >> 16); -+ statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; -+ statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; -+ } -+} -+ -+ZSTD_STATIC void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *statePtr, U32 symbol) -+{ -+ const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol]; -+ const U16 *const stateTable = (const U16 *)(statePtr->stateTable); -+ U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); -+ BIT_addBits(bitC, statePtr->value, nbBitsOut); -+ statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; -+} -+ -+ZSTD_STATIC void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *statePtr) -+{ -+ BIT_addBits(bitC, statePtr->value, statePtr->stateLog); -+ BIT_flushBits(bitC); -+} -+ -+/* ====== Decompression ====== */ -+ -+typedef struct { -+ U16 tableLog; -+ U16 fastMode; -+} FSE_DTableHeader; /* sizeof U32 */ -+ -+typedef struct { -+ unsigned short newState; -+ unsigned char symbol; -+ unsigned char nbBits; -+} FSE_decode_t; /* size == U32 */ -+ -+ZSTD_STATIC void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt) -+{ -+ const void *ptr = dt; -+ const FSE_DTableHeader *const DTableH = (const FSE_DTableHeader *)ptr; -+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); -+ BIT_reloadDStream(bitD); -+ DStatePtr->table = dt + 1; -+} -+ -+ZSTD_STATIC BYTE FSE_peekSymbol(const FSE_DState_t *DStatePtr) -+{ -+ FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; -+ return DInfo.symbol; -+} -+ -+ZSTD_STATIC void FSE_updateState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) -+{ -+ FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; -+ U32 const nbBits = DInfo.nbBits; -+ size_t const lowBits = BIT_readBits(bitD, nbBits); -+ DStatePtr->state = DInfo.newState + lowBits; -+} -+ -+ZSTD_STATIC BYTE FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) -+{ -+ FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; -+ U32 const nbBits = DInfo.nbBits; -+ BYTE const symbol = DInfo.symbol; -+ size_t const lowBits = BIT_readBits(bitD, nbBits); -+ -+ DStatePtr->state = DInfo.newState + lowBits; -+ return symbol; -+} -+ -+/*! FSE_decodeSymbolFast() : -+ unsafe, only works if no symbol has a probability > 50% */ -+ZSTD_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) -+{ -+ FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; -+ U32 const nbBits = DInfo.nbBits; -+ BYTE const symbol = DInfo.symbol; -+ size_t const lowBits = BIT_readBitsFast(bitD, nbBits); -+ -+ DStatePtr->state = DInfo.newState + lowBits; -+ return symbol; -+} -+ -+ZSTD_STATIC unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr) { return DStatePtr->state == 0; } -+ -+/* ************************************************************** -+* Tuning parameters -+****************************************************************/ -+/*!MEMORY_USAGE : -+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) -+* Increasing memory usage improves compression ratio -+* Reduced memory usage can improve speed, due to cache effect -+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ -+#ifndef FSE_MAX_MEMORY_USAGE -+#define FSE_MAX_MEMORY_USAGE 14 -+#endif -+#ifndef FSE_DEFAULT_MEMORY_USAGE -+#define FSE_DEFAULT_MEMORY_USAGE 13 -+#endif -+ -+/*!FSE_MAX_SYMBOL_VALUE : -+* Maximum symbol value authorized. -+* Required for proper stack allocation */ -+#ifndef FSE_MAX_SYMBOL_VALUE -+#define FSE_MAX_SYMBOL_VALUE 255 -+#endif -+ -+/* ************************************************************** -+* template functions type & suffix -+****************************************************************/ -+#define FSE_FUNCTION_TYPE BYTE -+#define FSE_FUNCTION_EXTENSION -+#define FSE_DECODE_TYPE FSE_decode_t -+ -+/* *************************************************************** -+* Constants -+*****************************************************************/ -+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE - 2) -+#define FSE_MAX_TABLESIZE (1U << FSE_MAX_TABLELOG) -+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE - 1) -+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE - 2) -+#define FSE_MIN_TABLELOG 5 -+ -+#define FSE_TABLELOG_ABSOLUTE_MAX 15 -+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX -+#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" -+#endif -+ -+#define FSE_TABLESTEP(tableSize) ((tableSize >> 1) + (tableSize >> 3) + 3) -+ -+#endif /* FSE_H */ -diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c -new file mode 100644 -index 0000000..ef3d174 ---- /dev/null -+++ b/lib/zstd/fse_compress.c -@@ -0,0 +1,795 @@ -+/* -+ * FSE : Finite State Entropy encoder -+ * Copyright (C) 2013-2015, Yann Collet. -+ * -+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are -+ * met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above -+ * copyright notice, this list of conditions and the following disclaimer -+ * in the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ * -+ * You can contact the author at : -+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -+ */ -+ -+/* ************************************************************** -+* Compiler specifics -+****************************************************************/ -+#define FORCE_INLINE static __always_inline -+ -+/* ************************************************************** -+* Includes -+****************************************************************/ -+#include "bitstream.h" -+#include "fse.h" -+#include -+#include -+#include -+#include /* memcpy, memset */ -+ -+/* ************************************************************** -+* Error Management -+****************************************************************/ -+#define FSE_STATIC_ASSERT(c) \ -+ { \ -+ enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ -+ } /* use only *after* variable declarations */ -+ -+/* ************************************************************** -+* Templates -+****************************************************************/ -+/* -+ designed to be included -+ for type-specific functions (template emulation in C) -+ Objective is to write these functions only once, for improved maintenance -+*/ -+ -+/* safety checks */ -+#ifndef FSE_FUNCTION_EXTENSION -+#error "FSE_FUNCTION_EXTENSION must be defined" -+#endif -+#ifndef FSE_FUNCTION_TYPE -+#error "FSE_FUNCTION_TYPE must be defined" -+#endif -+ -+/* Function names */ -+#define FSE_CAT(X, Y) X##Y -+#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y) -+#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y) -+ -+/* Function templates */ -+ -+/* FSE_buildCTable_wksp() : -+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). -+ * wkspSize should be sized to handle worst case situation, which is `1<> 1 : 1); -+ FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT); -+ U32 const step = FSE_TABLESTEP(tableSize); -+ U32 highThreshold = tableSize - 1; -+ -+ U32 *cumul; -+ FSE_FUNCTION_TYPE *tableSymbol; -+ size_t spaceUsed32 = 0; -+ -+ cumul = (U32 *)workspace + spaceUsed32; -+ spaceUsed32 += FSE_MAX_SYMBOL_VALUE + 2; -+ tableSymbol = (FSE_FUNCTION_TYPE *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += ALIGN(sizeof(FSE_FUNCTION_TYPE) * ((size_t)1 << tableLog), sizeof(U32)) >> 2; -+ -+ if ((spaceUsed32 << 2) > workspaceSize) -+ return ERROR(tableLog_tooLarge); -+ workspace = (U32 *)workspace + spaceUsed32; -+ workspaceSize -= (spaceUsed32 << 2); -+ -+ /* CTable header */ -+ tableU16[-2] = (U16)tableLog; -+ tableU16[-1] = (U16)maxSymbolValue; -+ -+ /* For explanations on how to distribute symbol values over the table : -+ * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ -+ -+ /* symbol start positions */ -+ { -+ U32 u; -+ cumul[0] = 0; -+ for (u = 1; u <= maxSymbolValue + 1; u++) { -+ if (normalizedCounter[u - 1] == -1) { /* Low proba symbol */ -+ cumul[u] = cumul[u - 1] + 1; -+ tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u - 1); -+ } else { -+ cumul[u] = cumul[u - 1] + normalizedCounter[u - 1]; -+ } -+ } -+ cumul[maxSymbolValue + 1] = tableSize + 1; -+ } -+ -+ /* Spread symbols */ -+ { -+ U32 position = 0; -+ U32 symbol; -+ for (symbol = 0; symbol <= maxSymbolValue; symbol++) { -+ int nbOccurrences; -+ for (nbOccurrences = 0; nbOccurrences < normalizedCounter[symbol]; nbOccurrences++) { -+ tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol; -+ position = (position + step) & tableMask; -+ while (position > highThreshold) -+ position = (position + step) & tableMask; /* Low proba area */ -+ } -+ } -+ -+ if (position != 0) -+ return ERROR(GENERIC); /* Must have gone through all positions */ -+ } -+ -+ /* Build table */ -+ { -+ U32 u; -+ for (u = 0; u < tableSize; u++) { -+ FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */ -+ tableU16[cumul[s]++] = (U16)(tableSize + u); /* TableU16 : sorted by symbol order; gives next state value */ -+ } -+ } -+ -+ /* Build Symbol Transformation Table */ -+ { -+ unsigned total = 0; -+ unsigned s; -+ for (s = 0; s <= maxSymbolValue; s++) { -+ switch (normalizedCounter[s]) { -+ case 0: break; -+ -+ case -1: -+ case 1: -+ symbolTT[s].deltaNbBits = (tableLog << 16) - (1 << tableLog); -+ symbolTT[s].deltaFindState = total - 1; -+ total++; -+ break; -+ default: { -+ U32 const maxBitsOut = tableLog - BIT_highbit32(normalizedCounter[s] - 1); -+ U32 const minStatePlus = normalizedCounter[s] << maxBitsOut; -+ symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; -+ symbolTT[s].deltaFindState = total - normalizedCounter[s]; -+ total += normalizedCounter[s]; -+ } -+ } -+ } -+ } -+ -+ return 0; -+} -+ -+/*-************************************************************** -+* FSE NCount encoding-decoding -+****************************************************************/ -+size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog) -+{ -+ size_t const maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3; -+ return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ -+} -+ -+static size_t FSE_writeNCount_generic(void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, -+ unsigned writeIsSafe) -+{ -+ BYTE *const ostart = (BYTE *)header; -+ BYTE *out = ostart; -+ BYTE *const oend = ostart + headerBufferSize; -+ int nbBits; -+ const int tableSize = 1 << tableLog; -+ int remaining; -+ int threshold; -+ U32 bitStream; -+ int bitCount; -+ unsigned charnum = 0; -+ int previous0 = 0; -+ -+ bitStream = 0; -+ bitCount = 0; -+ /* Table Size */ -+ bitStream += (tableLog - FSE_MIN_TABLELOG) << bitCount; -+ bitCount += 4; -+ -+ /* Init */ -+ remaining = tableSize + 1; /* +1 for extra accuracy */ -+ threshold = tableSize; -+ nbBits = tableLog + 1; -+ -+ while (remaining > 1) { /* stops at 1 */ -+ if (previous0) { -+ unsigned start = charnum; -+ while (!normalizedCounter[charnum]) -+ charnum++; -+ while (charnum >= start + 24) { -+ start += 24; -+ bitStream += 0xFFFFU << bitCount; -+ if ((!writeIsSafe) && (out > oend - 2)) -+ return ERROR(dstSize_tooSmall); /* Buffer overflow */ -+ out[0] = (BYTE)bitStream; -+ out[1] = (BYTE)(bitStream >> 8); -+ out += 2; -+ bitStream >>= 16; -+ } -+ while (charnum >= start + 3) { -+ start += 3; -+ bitStream += 3 << bitCount; -+ bitCount += 2; -+ } -+ bitStream += (charnum - start) << bitCount; -+ bitCount += 2; -+ if (bitCount > 16) { -+ if ((!writeIsSafe) && (out > oend - 2)) -+ return ERROR(dstSize_tooSmall); /* Buffer overflow */ -+ out[0] = (BYTE)bitStream; -+ out[1] = (BYTE)(bitStream >> 8); -+ out += 2; -+ bitStream >>= 16; -+ bitCount -= 16; -+ } -+ } -+ { -+ int count = normalizedCounter[charnum++]; -+ int const max = (2 * threshold - 1) - remaining; -+ remaining -= count < 0 ? -count : count; -+ count++; /* +1 for extra accuracy */ -+ if (count >= threshold) -+ count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ -+ bitStream += count << bitCount; -+ bitCount += nbBits; -+ bitCount -= (count < max); -+ previous0 = (count == 1); -+ if (remaining < 1) -+ return ERROR(GENERIC); -+ while (remaining < threshold) -+ nbBits--, threshold >>= 1; -+ } -+ if (bitCount > 16) { -+ if ((!writeIsSafe) && (out > oend - 2)) -+ return ERROR(dstSize_tooSmall); /* Buffer overflow */ -+ out[0] = (BYTE)bitStream; -+ out[1] = (BYTE)(bitStream >> 8); -+ out += 2; -+ bitStream >>= 16; -+ bitCount -= 16; -+ } -+ } -+ -+ /* flush remaining bitStream */ -+ if ((!writeIsSafe) && (out > oend - 2)) -+ return ERROR(dstSize_tooSmall); /* Buffer overflow */ -+ out[0] = (BYTE)bitStream; -+ out[1] = (BYTE)(bitStream >> 8); -+ out += (bitCount + 7) / 8; -+ -+ if (charnum > maxSymbolValue + 1) -+ return ERROR(GENERIC); -+ -+ return (out - ostart); -+} -+ -+size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) -+{ -+ if (tableLog > FSE_MAX_TABLELOG) -+ return ERROR(tableLog_tooLarge); /* Unsupported */ -+ if (tableLog < FSE_MIN_TABLELOG) -+ return ERROR(GENERIC); /* Unsupported */ -+ -+ if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) -+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); -+ -+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); -+} -+ -+/*-************************************************************** -+* Counting histogram -+****************************************************************/ -+/*! FSE_count_simple -+ This function counts byte values within `src`, and store the histogram into table `count`. -+ It doesn't use any additional memory. -+ But this function is unsafe : it doesn't check that all values within `src` can fit into `count`. -+ For this reason, prefer using a table `count` with 256 elements. -+ @return : count of most numerous element -+*/ -+size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize) -+{ -+ const BYTE *ip = (const BYTE *)src; -+ const BYTE *const end = ip + srcSize; -+ unsigned maxSymbolValue = *maxSymbolValuePtr; -+ unsigned max = 0; -+ -+ memset(count, 0, (maxSymbolValue + 1) * sizeof(*count)); -+ if (srcSize == 0) { -+ *maxSymbolValuePtr = 0; -+ return 0; -+ } -+ -+ while (ip < end) -+ count[*ip++]++; -+ -+ while (!count[maxSymbolValue]) -+ maxSymbolValue--; -+ *maxSymbolValuePtr = maxSymbolValue; -+ -+ { -+ U32 s; -+ for (s = 0; s <= maxSymbolValue; s++) -+ if (count[s] > max) -+ max = count[s]; -+ } -+ -+ return (size_t)max; -+} -+ -+/* FSE_count_parallel_wksp() : -+ * Same as FSE_count_parallel(), but using an externally provided scratch buffer. -+ * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */ -+static size_t FSE_count_parallel_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned checkMax, -+ unsigned *const workSpace) -+{ -+ const BYTE *ip = (const BYTE *)source; -+ const BYTE *const iend = ip + sourceSize; -+ unsigned maxSymbolValue = *maxSymbolValuePtr; -+ unsigned max = 0; -+ U32 *const Counting1 = workSpace; -+ U32 *const Counting2 = Counting1 + 256; -+ U32 *const Counting3 = Counting2 + 256; -+ U32 *const Counting4 = Counting3 + 256; -+ -+ memset(Counting1, 0, 4 * 256 * sizeof(unsigned)); -+ -+ /* safety checks */ -+ if (!sourceSize) { -+ memset(count, 0, maxSymbolValue + 1); -+ *maxSymbolValuePtr = 0; -+ return 0; -+ } -+ if (!maxSymbolValue) -+ maxSymbolValue = 255; /* 0 == default */ -+ -+ /* by stripes of 16 bytes */ -+ { -+ U32 cached = ZSTD_read32(ip); -+ ip += 4; -+ while (ip < iend - 15) { -+ U32 c = cached; -+ cached = ZSTD_read32(ip); -+ ip += 4; -+ Counting1[(BYTE)c]++; -+ Counting2[(BYTE)(c >> 8)]++; -+ Counting3[(BYTE)(c >> 16)]++; -+ Counting4[c >> 24]++; -+ c = cached; -+ cached = ZSTD_read32(ip); -+ ip += 4; -+ Counting1[(BYTE)c]++; -+ Counting2[(BYTE)(c >> 8)]++; -+ Counting3[(BYTE)(c >> 16)]++; -+ Counting4[c >> 24]++; -+ c = cached; -+ cached = ZSTD_read32(ip); -+ ip += 4; -+ Counting1[(BYTE)c]++; -+ Counting2[(BYTE)(c >> 8)]++; -+ Counting3[(BYTE)(c >> 16)]++; -+ Counting4[c >> 24]++; -+ c = cached; -+ cached = ZSTD_read32(ip); -+ ip += 4; -+ Counting1[(BYTE)c]++; -+ Counting2[(BYTE)(c >> 8)]++; -+ Counting3[(BYTE)(c >> 16)]++; -+ Counting4[c >> 24]++; -+ } -+ ip -= 4; -+ } -+ -+ /* finish last symbols */ -+ while (ip < iend) -+ Counting1[*ip++]++; -+ -+ if (checkMax) { /* verify stats will fit into destination table */ -+ U32 s; -+ for (s = 255; s > maxSymbolValue; s--) { -+ Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; -+ if (Counting1[s]) -+ return ERROR(maxSymbolValue_tooSmall); -+ } -+ } -+ -+ { -+ U32 s; -+ for (s = 0; s <= maxSymbolValue; s++) { -+ count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; -+ if (count[s] > max) -+ max = count[s]; -+ } -+ } -+ -+ while (!count[maxSymbolValue]) -+ maxSymbolValue--; -+ *maxSymbolValuePtr = maxSymbolValue; -+ return (size_t)max; -+} -+ -+/* FSE_countFast_wksp() : -+ * Same as FSE_countFast(), but using an externally provided scratch buffer. -+ * `workSpace` size must be table of >= `1024` unsigned */ -+size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace) -+{ -+ if (sourceSize < 1500) -+ return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize); -+ return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace); -+} -+ -+/* FSE_count_wksp() : -+ * Same as FSE_count(), but using an externally provided scratch buffer. -+ * `workSpace` size must be table of >= `1024` unsigned */ -+size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace) -+{ -+ if (*maxSymbolValuePtr < 255) -+ return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace); -+ *maxSymbolValuePtr = 255; -+ return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace); -+} -+ -+/*-************************************************************** -+* FSE Compression Code -+****************************************************************/ -+/*! FSE_sizeof_CTable() : -+ FSE_CTable is a variable size structure which contains : -+ `U16 tableLog;` -+ `U16 maxSymbolValue;` -+ `U16 nextStateNumber[1 << tableLog];` // This size is variable -+ `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable -+Allocation is manual (C standard does not support variable-size structures). -+*/ -+size_t FSE_sizeof_CTable(unsigned maxSymbolValue, unsigned tableLog) -+{ -+ if (tableLog > FSE_MAX_TABLELOG) -+ return ERROR(tableLog_tooLarge); -+ return FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue) * sizeof(U32); -+} -+ -+/* provides the minimum logSize to safely represent a distribution */ -+static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) -+{ -+ U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1; -+ U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; -+ U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; -+ return minBits; -+} -+ -+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) -+{ -+ U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; -+ U32 tableLog = maxTableLog; -+ U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); -+ if (tableLog == 0) -+ tableLog = FSE_DEFAULT_TABLELOG; -+ if (maxBitsSrc < tableLog) -+ tableLog = maxBitsSrc; /* Accuracy can be reduced */ -+ if (minBits > tableLog) -+ tableLog = minBits; /* Need a minimum to safely represent all symbol values */ -+ if (tableLog < FSE_MIN_TABLELOG) -+ tableLog = FSE_MIN_TABLELOG; -+ if (tableLog > FSE_MAX_TABLELOG) -+ tableLog = FSE_MAX_TABLELOG; -+ return tableLog; -+} -+ -+unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) -+{ -+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); -+} -+ -+/* Secondary normalization method. -+ To be used when primary method fails. */ -+ -+static size_t FSE_normalizeM2(short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue) -+{ -+ short const NOT_YET_ASSIGNED = -2; -+ U32 s; -+ U32 distributed = 0; -+ U32 ToDistribute; -+ -+ /* Init */ -+ U32 const lowThreshold = (U32)(total >> tableLog); -+ U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); -+ -+ for (s = 0; s <= maxSymbolValue; s++) { -+ if (count[s] == 0) { -+ norm[s] = 0; -+ continue; -+ } -+ if (count[s] <= lowThreshold) { -+ norm[s] = -1; -+ distributed++; -+ total -= count[s]; -+ continue; -+ } -+ if (count[s] <= lowOne) { -+ norm[s] = 1; -+ distributed++; -+ total -= count[s]; -+ continue; -+ } -+ -+ norm[s] = NOT_YET_ASSIGNED; -+ } -+ ToDistribute = (1 << tableLog) - distributed; -+ -+ if ((total / ToDistribute) > lowOne) { -+ /* risk of rounding to zero */ -+ lowOne = (U32)((total * 3) / (ToDistribute * 2)); -+ for (s = 0; s <= maxSymbolValue; s++) { -+ if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { -+ norm[s] = 1; -+ distributed++; -+ total -= count[s]; -+ continue; -+ } -+ } -+ ToDistribute = (1 << tableLog) - distributed; -+ } -+ -+ if (distributed == maxSymbolValue + 1) { -+ /* all values are pretty poor; -+ probably incompressible data (should have already been detected); -+ find max, then give all remaining points to max */ -+ U32 maxV = 0, maxC = 0; -+ for (s = 0; s <= maxSymbolValue; s++) -+ if (count[s] > maxC) -+ maxV = s, maxC = count[s]; -+ norm[maxV] += (short)ToDistribute; -+ return 0; -+ } -+ -+ if (total == 0) { -+ /* all of the symbols were low enough for the lowOne or lowThreshold */ -+ for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1)) -+ if (norm[s] > 0) -+ ToDistribute--, norm[s]++; -+ return 0; -+ } -+ -+ { -+ U64 const vStepLog = 62 - tableLog; -+ U64 const mid = (1ULL << (vStepLog - 1)) - 1; -+ U64 const rStep = div_u64((((U64)1 << vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */ -+ U64 tmpTotal = mid; -+ for (s = 0; s <= maxSymbolValue; s++) { -+ if (norm[s] == NOT_YET_ASSIGNED) { -+ U64 const end = tmpTotal + (count[s] * rStep); -+ U32 const sStart = (U32)(tmpTotal >> vStepLog); -+ U32 const sEnd = (U32)(end >> vStepLog); -+ U32 const weight = sEnd - sStart; -+ if (weight < 1) -+ return ERROR(GENERIC); -+ norm[s] = (short)weight; -+ tmpTotal = end; -+ } -+ } -+ } -+ -+ return 0; -+} -+ -+size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t total, unsigned maxSymbolValue) -+{ -+ /* Sanity checks */ -+ if (tableLog == 0) -+ tableLog = FSE_DEFAULT_TABLELOG; -+ if (tableLog < FSE_MIN_TABLELOG) -+ return ERROR(GENERIC); /* Unsupported size */ -+ if (tableLog > FSE_MAX_TABLELOG) -+ return ERROR(tableLog_tooLarge); /* Unsupported size */ -+ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) -+ return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ -+ -+ { -+ U32 const rtbTable[] = {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}; -+ U64 const scale = 62 - tableLog; -+ U64 const step = div_u64((U64)1 << 62, (U32)total); /* <== here, one division ! */ -+ U64 const vStep = 1ULL << (scale - 20); -+ int stillToDistribute = 1 << tableLog; -+ unsigned s; -+ unsigned largest = 0; -+ short largestP = 0; -+ U32 lowThreshold = (U32)(total >> tableLog); -+ -+ for (s = 0; s <= maxSymbolValue; s++) { -+ if (count[s] == total) -+ return 0; /* rle special case */ -+ if (count[s] == 0) { -+ normalizedCounter[s] = 0; -+ continue; -+ } -+ if (count[s] <= lowThreshold) { -+ normalizedCounter[s] = -1; -+ stillToDistribute--; -+ } else { -+ short proba = (short)((count[s] * step) >> scale); -+ if (proba < 8) { -+ U64 restToBeat = vStep * rtbTable[proba]; -+ proba += (count[s] * step) - ((U64)proba << scale) > restToBeat; -+ } -+ if (proba > largestP) -+ largestP = proba, largest = s; -+ normalizedCounter[s] = proba; -+ stillToDistribute -= proba; -+ } -+ } -+ if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { -+ /* corner case, need another normalization method */ -+ size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); -+ if (FSE_isError(errorCode)) -+ return errorCode; -+ } else -+ normalizedCounter[largest] += (short)stillToDistribute; -+ } -+ -+ return tableLog; -+} -+ -+/* fake FSE_CTable, for raw (uncompressed) input */ -+size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits) -+{ -+ const unsigned tableSize = 1 << nbBits; -+ const unsigned tableMask = tableSize - 1; -+ const unsigned maxSymbolValue = tableMask; -+ void *const ptr = ct; -+ U16 *const tableU16 = ((U16 *)ptr) + 2; -+ void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableSize >> 1); /* assumption : tableLog >= 1 */ -+ FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT); -+ unsigned s; -+ -+ /* Sanity checks */ -+ if (nbBits < 1) -+ return ERROR(GENERIC); /* min size */ -+ -+ /* header */ -+ tableU16[-2] = (U16)nbBits; -+ tableU16[-1] = (U16)maxSymbolValue; -+ -+ /* Build table */ -+ for (s = 0; s < tableSize; s++) -+ tableU16[s] = (U16)(tableSize + s); -+ -+ /* Build Symbol Transformation Table */ -+ { -+ const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits); -+ for (s = 0; s <= maxSymbolValue; s++) { -+ symbolTT[s].deltaNbBits = deltaNbBits; -+ symbolTT[s].deltaFindState = s - 1; -+ } -+ } -+ -+ return 0; -+} -+ -+/* fake FSE_CTable, for rle input (always same symbol) */ -+size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue) -+{ -+ void *ptr = ct; -+ U16 *tableU16 = ((U16 *)ptr) + 2; -+ void *FSCTptr = (U32 *)ptr + 2; -+ FSE_symbolCompressionTransform *symbolTT = (FSE_symbolCompressionTransform *)FSCTptr; -+ -+ /* header */ -+ tableU16[-2] = (U16)0; -+ tableU16[-1] = (U16)symbolValue; -+ -+ /* Build table */ -+ tableU16[0] = 0; -+ tableU16[1] = 0; /* just in case */ -+ -+ /* Build Symbol Transformation Table */ -+ symbolTT[symbolValue].deltaNbBits = 0; -+ symbolTT[symbolValue].deltaFindState = 0; -+ -+ return 0; -+} -+ -+static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast) -+{ -+ const BYTE *const istart = (const BYTE *)src; -+ const BYTE *const iend = istart + srcSize; -+ const BYTE *ip = iend; -+ -+ BIT_CStream_t bitC; -+ FSE_CState_t CState1, CState2; -+ -+ /* init */ -+ if (srcSize <= 2) -+ return 0; -+ { -+ size_t const initError = BIT_initCStream(&bitC, dst, dstSize); -+ if (FSE_isError(initError)) -+ return 0; /* not enough space available to write a bitstream */ -+ } -+ -+#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s)) -+ -+ if (srcSize & 1) { -+ FSE_initCState2(&CState1, ct, *--ip); -+ FSE_initCState2(&CState2, ct, *--ip); -+ FSE_encodeSymbol(&bitC, &CState1, *--ip); -+ FSE_FLUSHBITS(&bitC); -+ } else { -+ FSE_initCState2(&CState2, ct, *--ip); -+ FSE_initCState2(&CState1, ct, *--ip); -+ } -+ -+ /* join to mod 4 */ -+ srcSize -= 2; -+ if ((sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) && (srcSize & 2)) { /* test bit 2 */ -+ FSE_encodeSymbol(&bitC, &CState2, *--ip); -+ FSE_encodeSymbol(&bitC, &CState1, *--ip); -+ FSE_FLUSHBITS(&bitC); -+ } -+ -+ /* 2 or 4 encoding per loop */ -+ while (ip > istart) { -+ -+ FSE_encodeSymbol(&bitC, &CState2, *--ip); -+ -+ if (sizeof(bitC.bitContainer) * 8 < FSE_MAX_TABLELOG * 2 + 7) /* this test must be static */ -+ FSE_FLUSHBITS(&bitC); -+ -+ FSE_encodeSymbol(&bitC, &CState1, *--ip); -+ -+ if (sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) { /* this test must be static */ -+ FSE_encodeSymbol(&bitC, &CState2, *--ip); -+ FSE_encodeSymbol(&bitC, &CState1, *--ip); -+ } -+ -+ FSE_FLUSHBITS(&bitC); -+ } -+ -+ FSE_flushCState(&bitC, &CState2); -+ FSE_flushCState(&bitC, &CState1); -+ return BIT_closeCStream(&bitC); -+} -+ -+size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct) -+{ -+ unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); -+ -+ if (fast) -+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); -+ else -+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); -+} -+ -+size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } -diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c -new file mode 100644 -index 0000000..a84300e ---- /dev/null -+++ b/lib/zstd/fse_decompress.c -@@ -0,0 +1,332 @@ -+/* -+ * FSE : Finite State Entropy decoder -+ * Copyright (C) 2013-2015, Yann Collet. -+ * -+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are -+ * met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above -+ * copyright notice, this list of conditions and the following disclaimer -+ * in the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ * -+ * You can contact the author at : -+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -+ */ -+ -+/* ************************************************************** -+* Compiler specifics -+****************************************************************/ -+#define FORCE_INLINE static __always_inline -+ -+/* ************************************************************** -+* Includes -+****************************************************************/ -+#include "bitstream.h" -+#include "fse.h" -+#include -+#include -+#include /* memcpy, memset */ -+ -+/* ************************************************************** -+* Error Management -+****************************************************************/ -+#define FSE_isError ERR_isError -+#define FSE_STATIC_ASSERT(c) \ -+ { \ -+ enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ -+ } /* use only *after* variable declarations */ -+ -+/* check and forward error code */ -+#define CHECK_F(f) \ -+ { \ -+ size_t const e = f; \ -+ if (FSE_isError(e)) \ -+ return e; \ -+ } -+ -+/* ************************************************************** -+* Templates -+****************************************************************/ -+/* -+ designed to be included -+ for type-specific functions (template emulation in C) -+ Objective is to write these functions only once, for improved maintenance -+*/ -+ -+/* safety checks */ -+#ifndef FSE_FUNCTION_EXTENSION -+#error "FSE_FUNCTION_EXTENSION must be defined" -+#endif -+#ifndef FSE_FUNCTION_TYPE -+#error "FSE_FUNCTION_TYPE must be defined" -+#endif -+ -+/* Function names */ -+#define FSE_CAT(X, Y) X##Y -+#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y) -+#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y) -+ -+/* Function templates */ -+ -+size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize) -+{ -+ void *const tdPtr = dt + 1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ -+ FSE_DECODE_TYPE *const tableDecode = (FSE_DECODE_TYPE *)(tdPtr); -+ U16 *symbolNext = (U16 *)workspace; -+ -+ U32 const maxSV1 = maxSymbolValue + 1; -+ U32 const tableSize = 1 << tableLog; -+ U32 highThreshold = tableSize - 1; -+ -+ /* Sanity Checks */ -+ if (workspaceSize < sizeof(U16) * (FSE_MAX_SYMBOL_VALUE + 1)) -+ return ERROR(tableLog_tooLarge); -+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) -+ return ERROR(maxSymbolValue_tooLarge); -+ if (tableLog > FSE_MAX_TABLELOG) -+ return ERROR(tableLog_tooLarge); -+ -+ /* Init, lay down lowprob symbols */ -+ { -+ FSE_DTableHeader DTableH; -+ DTableH.tableLog = (U16)tableLog; -+ DTableH.fastMode = 1; -+ { -+ S16 const largeLimit = (S16)(1 << (tableLog - 1)); -+ U32 s; -+ for (s = 0; s < maxSV1; s++) { -+ if (normalizedCounter[s] == -1) { -+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; -+ symbolNext[s] = 1; -+ } else { -+ if (normalizedCounter[s] >= largeLimit) -+ DTableH.fastMode = 0; -+ symbolNext[s] = normalizedCounter[s]; -+ } -+ } -+ } -+ memcpy(dt, &DTableH, sizeof(DTableH)); -+ } -+ -+ /* Spread symbols */ -+ { -+ U32 const tableMask = tableSize - 1; -+ U32 const step = FSE_TABLESTEP(tableSize); -+ U32 s, position = 0; -+ for (s = 0; s < maxSV1; s++) { -+ int i; -+ for (i = 0; i < normalizedCounter[s]; i++) { -+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s; -+ position = (position + step) & tableMask; -+ while (position > highThreshold) -+ position = (position + step) & tableMask; /* lowprob area */ -+ } -+ } -+ if (position != 0) -+ return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ -+ } -+ -+ /* Build Decoding table */ -+ { -+ U32 u; -+ for (u = 0; u < tableSize; u++) { -+ FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol); -+ U16 nextState = symbolNext[symbol]++; -+ tableDecode[u].nbBits = (BYTE)(tableLog - BIT_highbit32((U32)nextState)); -+ tableDecode[u].newState = (U16)((nextState << tableDecode[u].nbBits) - tableSize); -+ } -+ } -+ -+ return 0; -+} -+ -+/*-******************************************************* -+* Decompression (Byte symbols) -+*********************************************************/ -+size_t FSE_buildDTable_rle(FSE_DTable *dt, BYTE symbolValue) -+{ -+ void *ptr = dt; -+ FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr; -+ void *dPtr = dt + 1; -+ FSE_decode_t *const cell = (FSE_decode_t *)dPtr; -+ -+ DTableH->tableLog = 0; -+ DTableH->fastMode = 0; -+ -+ cell->newState = 0; -+ cell->symbol = symbolValue; -+ cell->nbBits = 0; -+ -+ return 0; -+} -+ -+size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits) -+{ -+ void *ptr = dt; -+ FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr; -+ void *dPtr = dt + 1; -+ FSE_decode_t *const dinfo = (FSE_decode_t *)dPtr; -+ const unsigned tableSize = 1 << nbBits; -+ const unsigned tableMask = tableSize - 1; -+ const unsigned maxSV1 = tableMask + 1; -+ unsigned s; -+ -+ /* Sanity checks */ -+ if (nbBits < 1) -+ return ERROR(GENERIC); /* min size */ -+ -+ /* Build Decoding Table */ -+ DTableH->tableLog = (U16)nbBits; -+ DTableH->fastMode = 1; -+ for (s = 0; s < maxSV1; s++) { -+ dinfo[s].newState = 0; -+ dinfo[s].symbol = (BYTE)s; -+ dinfo[s].nbBits = (BYTE)nbBits; -+ } -+ -+ return 0; -+} -+ -+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt, -+ const unsigned fast) -+{ -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *op = ostart; -+ BYTE *const omax = op + maxDstSize; -+ BYTE *const olimit = omax - 3; -+ -+ BIT_DStream_t bitD; -+ FSE_DState_t state1; -+ FSE_DState_t state2; -+ -+ /* Init */ -+ CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize)); -+ -+ FSE_initDState(&state1, &bitD, dt); -+ FSE_initDState(&state2, &bitD, dt); -+ -+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) -+ -+ /* 4 symbols per loop */ -+ for (; (BIT_reloadDStream(&bitD) == BIT_DStream_unfinished) & (op < olimit); op += 4) { -+ op[0] = FSE_GETSYMBOL(&state1); -+ -+ if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */ -+ BIT_reloadDStream(&bitD); -+ -+ op[1] = FSE_GETSYMBOL(&state2); -+ -+ if (FSE_MAX_TABLELOG * 4 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */ -+ { -+ if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { -+ op += 2; -+ break; -+ } -+ } -+ -+ op[2] = FSE_GETSYMBOL(&state1); -+ -+ if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */ -+ BIT_reloadDStream(&bitD); -+ -+ op[3] = FSE_GETSYMBOL(&state2); -+ } -+ -+ /* tail */ -+ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ -+ while (1) { -+ if (op > (omax - 2)) -+ return ERROR(dstSize_tooSmall); -+ *op++ = FSE_GETSYMBOL(&state1); -+ if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) { -+ *op++ = FSE_GETSYMBOL(&state2); -+ break; -+ } -+ -+ if (op > (omax - 2)) -+ return ERROR(dstSize_tooSmall); -+ *op++ = FSE_GETSYMBOL(&state2); -+ if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) { -+ *op++ = FSE_GETSYMBOL(&state1); -+ break; -+ } -+ } -+ -+ return op - ostart; -+} -+ -+size_t FSE_decompress_usingDTable(void *dst, size_t originalSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt) -+{ -+ const void *ptr = dt; -+ const FSE_DTableHeader *DTableH = (const FSE_DTableHeader *)ptr; -+ const U32 fastMode = DTableH->fastMode; -+ -+ /* select fast mode (static) */ -+ if (fastMode) -+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); -+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); -+} -+ -+size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize) -+{ -+ const BYTE *const istart = (const BYTE *)cSrc; -+ const BYTE *ip = istart; -+ unsigned tableLog; -+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; -+ size_t NCountLength; -+ -+ FSE_DTable *dt; -+ short *counting; -+ size_t spaceUsed32 = 0; -+ -+ FSE_STATIC_ASSERT(sizeof(FSE_DTable) == sizeof(U32)); -+ -+ dt = (FSE_DTable *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += FSE_DTABLE_SIZE_U32(maxLog); -+ counting = (short *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += ALIGN(sizeof(short) * (FSE_MAX_SYMBOL_VALUE + 1), sizeof(U32)) >> 2; -+ -+ if ((spaceUsed32 << 2) > workspaceSize) -+ return ERROR(tableLog_tooLarge); -+ workspace = (U32 *)workspace + spaceUsed32; -+ workspaceSize -= (spaceUsed32 << 2); -+ -+ /* normal FSE decoding mode */ -+ NCountLength = FSE_readNCount(counting, &maxSymbolValue, &tableLog, istart, cSrcSize); -+ if (FSE_isError(NCountLength)) -+ return NCountLength; -+ // if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining -+ // case : NCountLength==cSrcSize */ -+ if (tableLog > maxLog) -+ return ERROR(tableLog_tooLarge); -+ ip += NCountLength; -+ cSrcSize -= NCountLength; -+ -+ CHECK_F(FSE_buildDTable_wksp(dt, counting, maxSymbolValue, tableLog, workspace, workspaceSize)); -+ -+ return FSE_decompress_usingDTable(dst, dstCapacity, ip, cSrcSize, dt); /* always return, even if it is an error code */ -+} -diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h -new file mode 100644 -index 0000000..2143da2 ---- /dev/null -+++ b/lib/zstd/huf.h -@@ -0,0 +1,212 @@ -+/* -+ * Huffman coder, part of New Generation Entropy library -+ * header file -+ * Copyright (C) 2013-2016, Yann Collet. -+ * -+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are -+ * met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above -+ * copyright notice, this list of conditions and the following disclaimer -+ * in the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ * -+ * You can contact the author at : -+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -+ */ -+#ifndef HUF_H_298734234 -+#define HUF_H_298734234 -+ -+/* *** Dependencies *** */ -+#include /* size_t */ -+ -+/* *** Tool functions *** */ -+#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ -+size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ -+ -+/* Error Management */ -+unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ -+ -+/* *** Advanced function *** */ -+ -+/** HUF_compress4X_wksp() : -+* Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */ -+size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, -+ size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ -+ -+/* *** Dependencies *** */ -+#include "mem.h" /* U32 */ -+ -+/* *** Constants *** */ -+#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ -+#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */ -+#define HUF_SYMBOLVALUE_MAX 255 -+ -+#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ -+#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) -+#error "HUF_TABLELOG_MAX is too large !" -+#endif -+ -+/* **************************************** -+* Static allocation -+******************************************/ -+/* HUF buffer bounds */ -+#define HUF_CTABLEBOUND 129 -+#define HUF_BLOCKBOUND(size) (size + (size >> 8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ -+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ -+ -+/* static allocation of HUF's Compression Table */ -+#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ -+ U32 name##hb[maxSymbolValue + 1]; \ -+ void *name##hv = &(name##hb); \ -+ HUF_CElt *name = (HUF_CElt *)(name##hv) /* no final ; */ -+ -+/* static allocation of HUF's DTable */ -+typedef U32 HUF_DTable; -+#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1 << (maxTableLog))) -+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = {((U32)((maxTableLog)-1) * 0x01000001)} -+#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = {((U32)(maxTableLog)*0x01000001)} -+ -+/* The workspace must have alignment at least 4 and be at least this large */ -+#define HUF_COMPRESS_WORKSPACE_SIZE (6 << 10) -+#define HUF_COMPRESS_WORKSPACE_SIZE_U32 (HUF_COMPRESS_WORKSPACE_SIZE / sizeof(U32)) -+ -+/* The workspace must have alignment at least 4 and be at least this large */ -+#define HUF_DECOMPRESS_WORKSPACE_SIZE (3 << 10) -+#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) -+ -+/* **************************************** -+* Advanced decompression functions -+******************************************/ -+size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); /**< decodes RLE and uncompressed */ -+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, -+ size_t workspaceSize); /**< considers RLE and uncompressed as errors */ -+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, -+ size_t workspaceSize); /**< single-symbol decoder */ -+size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, -+ size_t workspaceSize); /**< double-symbols decoder */ -+ -+/* **************************************** -+* HUF detailed API -+******************************************/ -+/*! -+HUF_compress() does the following: -+1. count symbol occurrence from source[] into table count[] using FSE_count() -+2. (optional) refine tableLog using HUF_optimalTableLog() -+3. build Huffman table from count using HUF_buildCTable() -+4. save Huffman table to memory buffer using HUF_writeCTable_wksp() -+5. encode the data stream using HUF_compress4X_usingCTable() -+ -+The following API allows targeting specific sub-functions for advanced tasks. -+For example, it's possible to compress several blocks using the same 'CTable', -+or to save and regenerate 'CTable' using external methods. -+*/ -+/* FSE_count() : find it within "fse.h" */ -+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); -+typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ -+size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize); -+size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable); -+ -+typedef enum { -+ HUF_repeat_none, /**< Cannot use the previous table */ -+ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, -+ 4}X_repeat */ -+ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ -+} HUF_repeat; -+/** HUF_compress4X_repeat() : -+* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. -+* If it uses hufTable it does not modify hufTable or repeat. -+* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. -+* If preferRepeat then the old table will always be used if valid. */ -+size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, -+ size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, -+ int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ -+ -+/** HUF_buildCTable_wksp() : -+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer. -+ * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. -+ */ -+size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize); -+ -+/*! HUF_readStats() : -+ Read compact Huffman tree, saved by HUF_writeCTable(). -+ `huffWeight` is destination buffer. -+ @return : size read from `src` , or an error Code . -+ Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ -+size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, -+ void *workspace, size_t workspaceSize); -+ -+/** HUF_readCTable() : -+* Loading a CTable saved with HUF_writeCTable() */ -+size_t HUF_readCTable_wksp(HUF_CElt *CTable, unsigned maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize); -+ -+/* -+HUF_decompress() does the following: -+1. select the decompression algorithm (X2, X4) based on pre-computed heuristics -+2. build Huffman table from save, using HUF_readDTableXn() -+3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable -+*/ -+ -+/** HUF_selectDecoder() : -+* Tells which decoder is likely to decode faster, -+* based on a set of pre-determined metrics. -+* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . -+* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ -+U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize); -+ -+size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize); -+size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize); -+ -+size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); -+size_t HUF_decompress4X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); -+size_t HUF_decompress4X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); -+ -+/* single stream variants */ -+ -+size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, -+ size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ -+size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable); -+/** HUF_compress1X_repeat() : -+* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. -+* If it uses hufTable it does not modify hufTable or repeat. -+* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. -+* If preferRepeat then the old table will always be used if valid. */ -+size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, -+ size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, -+ int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ -+ -+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); -+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, -+ size_t workspaceSize); /**< single-symbol decoder */ -+size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, -+ size_t workspaceSize); /**< double-symbols decoder */ -+ -+size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, -+ const HUF_DTable *DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ -+size_t HUF_decompress1X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); -+size_t HUF_decompress1X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); -+ -+#endif /* HUF_H_298734234 */ -diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c -new file mode 100644 -index 0000000..40055a7 ---- /dev/null -+++ b/lib/zstd/huf_compress.c -@@ -0,0 +1,770 @@ -+/* -+ * Huffman encoder, part of New Generation Entropy library -+ * Copyright (C) 2013-2016, Yann Collet. -+ * -+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are -+ * met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above -+ * copyright notice, this list of conditions and the following disclaimer -+ * in the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ * -+ * You can contact the author at : -+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -+ */ -+ -+/* ************************************************************** -+* Includes -+****************************************************************/ -+#include "bitstream.h" -+#include "fse.h" /* header compression */ -+#include "huf.h" -+#include -+#include /* memcpy, memset */ -+ -+/* ************************************************************** -+* Error Management -+****************************************************************/ -+#define HUF_STATIC_ASSERT(c) \ -+ { \ -+ enum { HUF_static_assert = 1 / (int)(!!(c)) }; \ -+ } /* use only *after* variable declarations */ -+#define CHECK_V_F(e, f) \ -+ size_t const e = f; \ -+ if (ERR_isError(e)) \ -+ return f -+#define CHECK_F(f) \ -+ { \ -+ CHECK_V_F(_var_err__, f); \ -+ } -+ -+/* ************************************************************** -+* Utils -+****************************************************************/ -+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) -+{ -+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); -+} -+ -+/* ******************************************************* -+* HUF : Huffman block compression -+*********************************************************/ -+/* HUF_compressWeights() : -+ * Same as FSE_compress(), but dedicated to huff0's weights compression. -+ * The use case needs much less stack memory. -+ * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. -+ */ -+#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 -+size_t HUF_compressWeights_wksp(void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize) -+{ -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *op = ostart; -+ BYTE *const oend = ostart + dstSize; -+ -+ U32 maxSymbolValue = HUF_TABLELOG_MAX; -+ U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; -+ -+ FSE_CTable *CTable; -+ U32 *count; -+ S16 *norm; -+ size_t spaceUsed32 = 0; -+ -+ HUF_STATIC_ASSERT(sizeof(FSE_CTable) == sizeof(U32)); -+ -+ CTable = (FSE_CTable *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX); -+ count = (U32 *)workspace + spaceUsed32; -+ spaceUsed32 += HUF_TABLELOG_MAX + 1; -+ norm = (S16 *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += ALIGN(sizeof(S16) * (HUF_TABLELOG_MAX + 1), sizeof(U32)) >> 2; -+ -+ if ((spaceUsed32 << 2) > workspaceSize) -+ return ERROR(tableLog_tooLarge); -+ workspace = (U32 *)workspace + spaceUsed32; -+ workspaceSize -= (spaceUsed32 << 2); -+ -+ /* init conditions */ -+ if (wtSize <= 1) -+ return 0; /* Not compressible */ -+ -+ /* Scan input and build symbol stats */ -+ { -+ CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize)); -+ if (maxCount == wtSize) -+ return 1; /* only a single symbol in src : rle */ -+ if (maxCount == 1) -+ return 0; /* each symbol present maximum once => not compressible */ -+ } -+ -+ tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); -+ CHECK_F(FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue)); -+ -+ /* Write table description header */ -+ { -+ CHECK_V_F(hSize, FSE_writeNCount(op, oend - op, norm, maxSymbolValue, tableLog)); -+ op += hSize; -+ } -+ -+ /* Compress */ -+ CHECK_F(FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, workspace, workspaceSize)); -+ { -+ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable)); -+ if (cSize == 0) -+ return 0; /* not enough space for compressed data */ -+ op += cSize; -+ } -+ -+ return op - ostart; -+} -+ -+struct HUF_CElt_s { -+ U16 val; -+ BYTE nbBits; -+}; /* typedef'd to HUF_CElt within "huf.h" */ -+ -+/*! HUF_writeCTable_wksp() : -+ `CTable` : Huffman tree to save, using huf representation. -+ @return : size of saved CTable */ -+size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, U32 maxSymbolValue, U32 huffLog, void *workspace, size_t workspaceSize) -+{ -+ BYTE *op = (BYTE *)dst; -+ U32 n; -+ -+ BYTE *bitsToWeight; -+ BYTE *huffWeight; -+ size_t spaceUsed32 = 0; -+ -+ bitsToWeight = (BYTE *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += ALIGN(HUF_TABLELOG_MAX + 1, sizeof(U32)) >> 2; -+ huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX, sizeof(U32)) >> 2; -+ -+ if ((spaceUsed32 << 2) > workspaceSize) -+ return ERROR(tableLog_tooLarge); -+ workspace = (U32 *)workspace + spaceUsed32; -+ workspaceSize -= (spaceUsed32 << 2); -+ -+ /* check conditions */ -+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) -+ return ERROR(maxSymbolValue_tooLarge); -+ -+ /* convert to weight */ -+ bitsToWeight[0] = 0; -+ for (n = 1; n < huffLog + 1; n++) -+ bitsToWeight[n] = (BYTE)(huffLog + 1 - n); -+ for (n = 0; n < maxSymbolValue; n++) -+ huffWeight[n] = bitsToWeight[CTable[n].nbBits]; -+ -+ /* attempt weights compression by FSE */ -+ { -+ CHECK_V_F(hSize, HUF_compressWeights_wksp(op + 1, maxDstSize - 1, huffWeight, maxSymbolValue, workspace, workspaceSize)); -+ if ((hSize > 1) & (hSize < maxSymbolValue / 2)) { /* FSE compressed */ -+ op[0] = (BYTE)hSize; -+ return hSize + 1; -+ } -+ } -+ -+ /* write raw values as 4-bits (max : 15) */ -+ if (maxSymbolValue > (256 - 128)) -+ return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ -+ if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize) -+ return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ -+ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue - 1)); -+ huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ -+ for (n = 0; n < maxSymbolValue; n += 2) -+ op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]); -+ return ((maxSymbolValue + 1) / 2) + 1; -+} -+ -+size_t HUF_readCTable_wksp(HUF_CElt *CTable, U32 maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) -+{ -+ U32 *rankVal; -+ BYTE *huffWeight; -+ U32 tableLog = 0; -+ U32 nbSymbols = 0; -+ size_t readSize; -+ size_t spaceUsed32 = 0; -+ -+ rankVal = (U32 *)workspace + spaceUsed32; -+ spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; -+ huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; -+ -+ if ((spaceUsed32 << 2) > workspaceSize) -+ return ERROR(tableLog_tooLarge); -+ workspace = (U32 *)workspace + spaceUsed32; -+ workspaceSize -= (spaceUsed32 << 2); -+ -+ /* get symbol weights */ -+ readSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize); -+ if (ERR_isError(readSize)) -+ return readSize; -+ -+ /* check result */ -+ if (tableLog > HUF_TABLELOG_MAX) -+ return ERROR(tableLog_tooLarge); -+ if (nbSymbols > maxSymbolValue + 1) -+ return ERROR(maxSymbolValue_tooSmall); -+ -+ /* Prepare base value per rank */ -+ { -+ U32 n, nextRankStart = 0; -+ for (n = 1; n <= tableLog; n++) { -+ U32 curr = nextRankStart; -+ nextRankStart += (rankVal[n] << (n - 1)); -+ rankVal[n] = curr; -+ } -+ } -+ -+ /* fill nbBits */ -+ { -+ U32 n; -+ for (n = 0; n < nbSymbols; n++) { -+ const U32 w = huffWeight[n]; -+ CTable[n].nbBits = (BYTE)(tableLog + 1 - w); -+ } -+ } -+ -+ /* fill val */ -+ { -+ U16 nbPerRank[HUF_TABLELOG_MAX + 2] = {0}; /* support w=0=>n=tableLog+1 */ -+ U16 valPerRank[HUF_TABLELOG_MAX + 2] = {0}; -+ { -+ U32 n; -+ for (n = 0; n < nbSymbols; n++) -+ nbPerRank[CTable[n].nbBits]++; -+ } -+ /* determine stating value per rank */ -+ valPerRank[tableLog + 1] = 0; /* for w==0 */ -+ { -+ U16 min = 0; -+ U32 n; -+ for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */ -+ valPerRank[n] = min; /* get starting value within each rank */ -+ min += nbPerRank[n]; -+ min >>= 1; -+ } -+ } -+ /* assign value within rank, symbol order */ -+ { -+ U32 n; -+ for (n = 0; n <= maxSymbolValue; n++) -+ CTable[n].val = valPerRank[CTable[n].nbBits]++; -+ } -+ } -+ -+ return readSize; -+} -+ -+typedef struct nodeElt_s { -+ U32 count; -+ U16 parent; -+ BYTE byte; -+ BYTE nbBits; -+} nodeElt; -+ -+static U32 HUF_setMaxHeight(nodeElt *huffNode, U32 lastNonNull, U32 maxNbBits) -+{ -+ const U32 largestBits = huffNode[lastNonNull].nbBits; -+ if (largestBits <= maxNbBits) -+ return largestBits; /* early exit : no elt > maxNbBits */ -+ -+ /* there are several too large elements (at least >= 2) */ -+ { -+ int totalCost = 0; -+ const U32 baseCost = 1 << (largestBits - maxNbBits); -+ U32 n = lastNonNull; -+ -+ while (huffNode[n].nbBits > maxNbBits) { -+ totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); -+ huffNode[n].nbBits = (BYTE)maxNbBits; -+ n--; -+ } /* n stops at huffNode[n].nbBits <= maxNbBits */ -+ while (huffNode[n].nbBits == maxNbBits) -+ n--; /* n end at index of smallest symbol using < maxNbBits */ -+ -+ /* renorm totalCost */ -+ totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ -+ -+ /* repay normalized cost */ -+ { -+ U32 const noSymbol = 0xF0F0F0F0; -+ U32 rankLast[HUF_TABLELOG_MAX + 2]; -+ int pos; -+ -+ /* Get pos of last (smallest) symbol per rank */ -+ memset(rankLast, 0xF0, sizeof(rankLast)); -+ { -+ U32 currNbBits = maxNbBits; -+ for (pos = n; pos >= 0; pos--) { -+ if (huffNode[pos].nbBits >= currNbBits) -+ continue; -+ currNbBits = huffNode[pos].nbBits; /* < maxNbBits */ -+ rankLast[maxNbBits - currNbBits] = pos; -+ } -+ } -+ -+ while (totalCost > 0) { -+ U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1; -+ for (; nBitsToDecrease > 1; nBitsToDecrease--) { -+ U32 highPos = rankLast[nBitsToDecrease]; -+ U32 lowPos = rankLast[nBitsToDecrease - 1]; -+ if (highPos == noSymbol) -+ continue; -+ if (lowPos == noSymbol) -+ break; -+ { -+ U32 const highTotal = huffNode[highPos].count; -+ U32 const lowTotal = 2 * huffNode[lowPos].count; -+ if (highTotal <= lowTotal) -+ break; -+ } -+ } -+ /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ -+ /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ -+ while ((nBitsToDecrease <= HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) -+ nBitsToDecrease++; -+ totalCost -= 1 << (nBitsToDecrease - 1); -+ if (rankLast[nBitsToDecrease - 1] == noSymbol) -+ rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ -+ huffNode[rankLast[nBitsToDecrease]].nbBits++; -+ if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ -+ rankLast[nBitsToDecrease] = noSymbol; -+ else { -+ rankLast[nBitsToDecrease]--; -+ if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits - nBitsToDecrease) -+ rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ -+ } -+ } /* while (totalCost > 0) */ -+ -+ while (totalCost < 0) { /* Sometimes, cost correction overshoot */ -+ if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 -+ (using maxNbBits) */ -+ while (huffNode[n].nbBits == maxNbBits) -+ n--; -+ huffNode[n + 1].nbBits--; -+ rankLast[1] = n + 1; -+ totalCost++; -+ continue; -+ } -+ huffNode[rankLast[1] + 1].nbBits--; -+ rankLast[1]++; -+ totalCost++; -+ } -+ } -+ } /* there are several too large elements (at least >= 2) */ -+ -+ return maxNbBits; -+} -+ -+typedef struct { -+ U32 base; -+ U32 curr; -+} rankPos; -+ -+static void HUF_sort(nodeElt *huffNode, const U32 *count, U32 maxSymbolValue) -+{ -+ rankPos rank[32]; -+ U32 n; -+ -+ memset(rank, 0, sizeof(rank)); -+ for (n = 0; n <= maxSymbolValue; n++) { -+ U32 r = BIT_highbit32(count[n] + 1); -+ rank[r].base++; -+ } -+ for (n = 30; n > 0; n--) -+ rank[n - 1].base += rank[n].base; -+ for (n = 0; n < 32; n++) -+ rank[n].curr = rank[n].base; -+ for (n = 0; n <= maxSymbolValue; n++) { -+ U32 const c = count[n]; -+ U32 const r = BIT_highbit32(c + 1) + 1; -+ U32 pos = rank[r].curr++; -+ while ((pos > rank[r].base) && (c > huffNode[pos - 1].count)) -+ huffNode[pos] = huffNode[pos - 1], pos--; -+ huffNode[pos].count = c; -+ huffNode[pos].byte = (BYTE)n; -+ } -+} -+ -+/** HUF_buildCTable_wksp() : -+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer. -+ * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. -+ */ -+#define STARTNODE (HUF_SYMBOLVALUE_MAX + 1) -+typedef nodeElt huffNodeTable[2 * HUF_SYMBOLVALUE_MAX + 1 + 1]; -+size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize) -+{ -+ nodeElt *const huffNode0 = (nodeElt *)workSpace; -+ nodeElt *const huffNode = huffNode0 + 1; -+ U32 n, nonNullRank; -+ int lowS, lowN; -+ U16 nodeNb = STARTNODE; -+ U32 nodeRoot; -+ -+ /* safety checks */ -+ if (wkspSize < sizeof(huffNodeTable)) -+ return ERROR(GENERIC); /* workSpace is not large enough */ -+ if (maxNbBits == 0) -+ maxNbBits = HUF_TABLELOG_DEFAULT; -+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) -+ return ERROR(GENERIC); -+ memset(huffNode0, 0, sizeof(huffNodeTable)); -+ -+ /* sort, decreasing order */ -+ HUF_sort(huffNode, count, maxSymbolValue); -+ -+ /* init for parents */ -+ nonNullRank = maxSymbolValue; -+ while (huffNode[nonNullRank].count == 0) -+ nonNullRank--; -+ lowS = nonNullRank; -+ nodeRoot = nodeNb + lowS - 1; -+ lowN = nodeNb; -+ huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count; -+ huffNode[lowS].parent = huffNode[lowS - 1].parent = nodeNb; -+ nodeNb++; -+ lowS -= 2; -+ for (n = nodeNb; n <= nodeRoot; n++) -+ huffNode[n].count = (U32)(1U << 30); -+ huffNode0[0].count = (U32)(1U << 31); /* fake entry, strong barrier */ -+ -+ /* create parents */ -+ while (nodeNb <= nodeRoot) { -+ U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; -+ U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; -+ huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; -+ huffNode[n1].parent = huffNode[n2].parent = nodeNb; -+ nodeNb++; -+ } -+ -+ /* distribute weights (unlimited tree height) */ -+ huffNode[nodeRoot].nbBits = 0; -+ for (n = nodeRoot - 1; n >= STARTNODE; n--) -+ huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1; -+ for (n = 0; n <= nonNullRank; n++) -+ huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1; -+ -+ /* enforce maxTableLog */ -+ maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); -+ -+ /* fill result into tree (val, nbBits) */ -+ { -+ U16 nbPerRank[HUF_TABLELOG_MAX + 1] = {0}; -+ U16 valPerRank[HUF_TABLELOG_MAX + 1] = {0}; -+ if (maxNbBits > HUF_TABLELOG_MAX) -+ return ERROR(GENERIC); /* check fit into table */ -+ for (n = 0; n <= nonNullRank; n++) -+ nbPerRank[huffNode[n].nbBits]++; -+ /* determine stating value per rank */ -+ { -+ U16 min = 0; -+ for (n = maxNbBits; n > 0; n--) { -+ valPerRank[n] = min; /* get starting value within each rank */ -+ min += nbPerRank[n]; -+ min >>= 1; -+ } -+ } -+ for (n = 0; n <= maxSymbolValue; n++) -+ tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ -+ for (n = 0; n <= maxSymbolValue; n++) -+ tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ -+ } -+ -+ return maxNbBits; -+} -+ -+static size_t HUF_estimateCompressedSize(HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue) -+{ -+ size_t nbBits = 0; -+ int s; -+ for (s = 0; s <= (int)maxSymbolValue; ++s) { -+ nbBits += CTable[s].nbBits * count[s]; -+ } -+ return nbBits >> 3; -+} -+ -+static int HUF_validateCTable(const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue) -+{ -+ int bad = 0; -+ int s; -+ for (s = 0; s <= (int)maxSymbolValue; ++s) { -+ bad |= (count[s] != 0) & (CTable[s].nbBits == 0); -+ } -+ return !bad; -+} -+ -+static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable) -+{ -+ BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); -+} -+ -+size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } -+ -+#define HUF_FLUSHBITS(s) BIT_flushBits(s) -+ -+#define HUF_FLUSHBITS_1(stream) \ -+ if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \ -+ HUF_FLUSHBITS(stream) -+ -+#define HUF_FLUSHBITS_2(stream) \ -+ if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 4 + 7) \ -+ HUF_FLUSHBITS(stream) -+ -+size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable) -+{ -+ const BYTE *ip = (const BYTE *)src; -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *const oend = ostart + dstSize; -+ BYTE *op = ostart; -+ size_t n; -+ BIT_CStream_t bitC; -+ -+ /* init */ -+ if (dstSize < 8) -+ return 0; /* not enough space to compress */ -+ { -+ size_t const initErr = BIT_initCStream(&bitC, op, oend - op); -+ if (HUF_isError(initErr)) -+ return 0; -+ } -+ -+ n = srcSize & ~3; /* join to mod 4 */ -+ switch (srcSize & 3) { -+ case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC); -+ case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC); -+ case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC); -+ case 0: -+ default:; -+ } -+ -+ for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */ -+ HUF_encodeSymbol(&bitC, ip[n - 1], CTable); -+ HUF_FLUSHBITS_1(&bitC); -+ HUF_encodeSymbol(&bitC, ip[n - 2], CTable); -+ HUF_FLUSHBITS_2(&bitC); -+ HUF_encodeSymbol(&bitC, ip[n - 3], CTable); -+ HUF_FLUSHBITS_1(&bitC); -+ HUF_encodeSymbol(&bitC, ip[n - 4], CTable); -+ HUF_FLUSHBITS(&bitC); -+ } -+ -+ return BIT_closeCStream(&bitC); -+} -+ -+size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable) -+{ -+ size_t const segmentSize = (srcSize + 3) / 4; /* first 3 segments */ -+ const BYTE *ip = (const BYTE *)src; -+ const BYTE *const iend = ip + srcSize; -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *const oend = ostart + dstSize; -+ BYTE *op = ostart; -+ -+ if (dstSize < 6 + 1 + 1 + 1 + 8) -+ return 0; /* minimum space to compress successfully */ -+ if (srcSize < 12) -+ return 0; /* no saving possible : too small input */ -+ op += 6; /* jumpTable */ -+ -+ { -+ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable)); -+ if (cSize == 0) -+ return 0; -+ ZSTD_writeLE16(ostart, (U16)cSize); -+ op += cSize; -+ } -+ -+ ip += segmentSize; -+ { -+ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable)); -+ if (cSize == 0) -+ return 0; -+ ZSTD_writeLE16(ostart + 2, (U16)cSize); -+ op += cSize; -+ } -+ -+ ip += segmentSize; -+ { -+ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable)); -+ if (cSize == 0) -+ return 0; -+ ZSTD_writeLE16(ostart + 4, (U16)cSize); -+ op += cSize; -+ } -+ -+ ip += segmentSize; -+ { -+ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, iend - ip, CTable)); -+ if (cSize == 0) -+ return 0; -+ op += cSize; -+ } -+ -+ return op - ostart; -+} -+ -+static size_t HUF_compressCTable_internal(BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, unsigned singleStream, -+ const HUF_CElt *CTable) -+{ -+ size_t const cSize = -+ singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable); -+ if (HUF_isError(cSize)) { -+ return cSize; -+ } -+ if (cSize == 0) { -+ return 0; -+ } /* uncompressible */ -+ op += cSize; -+ /* check compressibility */ -+ if ((size_t)(op - ostart) >= srcSize - 1) { -+ return 0; -+ } -+ return op - ostart; -+} -+ -+/* `workSpace` must a table of at least 1024 unsigned */ -+static size_t HUF_compress_internal(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, -+ unsigned singleStream, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int preferRepeat) -+{ -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *const oend = ostart + dstSize; -+ BYTE *op = ostart; -+ -+ U32 *count; -+ size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1); -+ HUF_CElt *CTable; -+ size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1); -+ -+ /* checks & inits */ -+ if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) -+ return ERROR(GENERIC); -+ if (!srcSize) -+ return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */ -+ if (!dstSize) -+ return 0; /* cannot fit within dst budget */ -+ if (srcSize > HUF_BLOCKSIZE_MAX) -+ return ERROR(srcSize_wrong); /* curr block size limit */ -+ if (huffLog > HUF_TABLELOG_MAX) -+ return ERROR(tableLog_tooLarge); -+ if (!maxSymbolValue) -+ maxSymbolValue = HUF_SYMBOLVALUE_MAX; -+ if (!huffLog) -+ huffLog = HUF_TABLELOG_DEFAULT; -+ -+ count = (U32 *)workSpace; -+ workSpace = (BYTE *)workSpace + countSize; -+ wkspSize -= countSize; -+ CTable = (HUF_CElt *)workSpace; -+ workSpace = (BYTE *)workSpace + CTableSize; -+ wkspSize -= CTableSize; -+ -+ /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */ -+ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { -+ return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); -+ } -+ -+ /* Scan input and build symbol stats */ -+ { -+ CHECK_V_F(largest, FSE_count_wksp(count, &maxSymbolValue, (const BYTE *)src, srcSize, (U32 *)workSpace)); -+ if (largest == srcSize) { -+ *ostart = ((const BYTE *)src)[0]; -+ return 1; -+ } /* single symbol, rle */ -+ if (largest <= (srcSize >> 7) + 1) -+ return 0; /* Fast heuristic : not compressible enough */ -+ } -+ -+ /* Check validity of previous table */ -+ if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) { -+ *repeat = HUF_repeat_none; -+ } -+ /* Heuristic : use existing table for small inputs */ -+ if (preferRepeat && repeat && *repeat != HUF_repeat_none) { -+ return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); -+ } -+ -+ /* Build Huffman Tree */ -+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); -+ { -+ CHECK_V_F(maxBits, HUF_buildCTable_wksp(CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize)); -+ huffLog = (U32)maxBits; -+ /* Zero the unused symbols so we can check it for validity */ -+ memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt)); -+ } -+ -+ /* Write table description header */ -+ { -+ CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, CTable, maxSymbolValue, huffLog, workSpace, wkspSize)); -+ /* Check if using the previous table will be beneficial */ -+ if (repeat && *repeat != HUF_repeat_none) { -+ size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue); -+ size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue); -+ if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { -+ return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); -+ } -+ } -+ /* Use the new table */ -+ if (hSize + 12ul >= srcSize) { -+ return 0; -+ } -+ op += hSize; -+ if (repeat) { -+ *repeat = HUF_repeat_none; -+ } -+ if (oldHufTable) { -+ memcpy(oldHufTable, CTable, CTableSize); -+ } /* Save the new table */ -+ } -+ return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable); -+} -+ -+size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, -+ size_t wkspSize) -+{ -+ return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0); -+} -+ -+size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, -+ size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat) -+{ -+ return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, -+ preferRepeat); -+} -+ -+size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, -+ size_t wkspSize) -+{ -+ return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0); -+} -+ -+size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, -+ size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat) -+{ -+ return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, -+ preferRepeat); -+} -diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c -new file mode 100644 -index 0000000..6526482 ---- /dev/null -+++ b/lib/zstd/huf_decompress.c -@@ -0,0 +1,960 @@ -+/* -+ * Huffman decoder, part of New Generation Entropy library -+ * Copyright (C) 2013-2016, Yann Collet. -+ * -+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are -+ * met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above -+ * copyright notice, this list of conditions and the following disclaimer -+ * in the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ * -+ * You can contact the author at : -+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -+ */ -+ -+/* ************************************************************** -+* Compiler specifics -+****************************************************************/ -+#define FORCE_INLINE static __always_inline -+ -+/* ************************************************************** -+* Dependencies -+****************************************************************/ -+#include "bitstream.h" /* BIT_* */ -+#include "fse.h" /* header compression */ -+#include "huf.h" -+#include -+#include -+#include /* memcpy, memset */ -+ -+/* ************************************************************** -+* Error Management -+****************************************************************/ -+#define HUF_STATIC_ASSERT(c) \ -+ { \ -+ enum { HUF_static_assert = 1 / (int)(!!(c)) }; \ -+ } /* use only *after* variable declarations */ -+ -+/*-***************************/ -+/* generic DTableDesc */ -+/*-***************************/ -+ -+typedef struct { -+ BYTE maxTableLog; -+ BYTE tableType; -+ BYTE tableLog; -+ BYTE reserved; -+} DTableDesc; -+ -+static DTableDesc HUF_getDTableDesc(const HUF_DTable *table) -+{ -+ DTableDesc dtd; -+ memcpy(&dtd, table, sizeof(dtd)); -+ return dtd; -+} -+ -+/*-***************************/ -+/* single-symbol decoding */ -+/*-***************************/ -+ -+typedef struct { -+ BYTE byte; -+ BYTE nbBits; -+} HUF_DEltX2; /* single-symbol decoding */ -+ -+size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) -+{ -+ U32 tableLog = 0; -+ U32 nbSymbols = 0; -+ size_t iSize; -+ void *const dtPtr = DTable + 1; -+ HUF_DEltX2 *const dt = (HUF_DEltX2 *)dtPtr; -+ -+ U32 *rankVal; -+ BYTE *huffWeight; -+ size_t spaceUsed32 = 0; -+ -+ rankVal = (U32 *)workspace + spaceUsed32; -+ spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; -+ huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; -+ -+ if ((spaceUsed32 << 2) > workspaceSize) -+ return ERROR(tableLog_tooLarge); -+ workspace = (U32 *)workspace + spaceUsed32; -+ workspaceSize -= (spaceUsed32 << 2); -+ -+ HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); -+ /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ -+ -+ iSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize); -+ if (HUF_isError(iSize)) -+ return iSize; -+ -+ /* Table header */ -+ { -+ DTableDesc dtd = HUF_getDTableDesc(DTable); -+ if (tableLog > (U32)(dtd.maxTableLog + 1)) -+ return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ -+ dtd.tableType = 0; -+ dtd.tableLog = (BYTE)tableLog; -+ memcpy(DTable, &dtd, sizeof(dtd)); -+ } -+ -+ /* Calculate starting value for each rank */ -+ { -+ U32 n, nextRankStart = 0; -+ for (n = 1; n < tableLog + 1; n++) { -+ U32 const curr = nextRankStart; -+ nextRankStart += (rankVal[n] << (n - 1)); -+ rankVal[n] = curr; -+ } -+ } -+ -+ /* fill DTable */ -+ { -+ U32 n; -+ for (n = 0; n < nbSymbols; n++) { -+ U32 const w = huffWeight[n]; -+ U32 const length = (1 << w) >> 1; -+ U32 u; -+ HUF_DEltX2 D; -+ D.byte = (BYTE)n; -+ D.nbBits = (BYTE)(tableLog + 1 - w); -+ for (u = rankVal[w]; u < rankVal[w] + length; u++) -+ dt[u] = D; -+ rankVal[w] += length; -+ } -+ } -+ -+ return iSize; -+} -+ -+static BYTE HUF_decodeSymbolX2(BIT_DStream_t *Dstream, const HUF_DEltX2 *dt, const U32 dtLog) -+{ -+ size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ -+ BYTE const c = dt[val].byte; -+ BIT_skipBits(Dstream, dt[val].nbBits); -+ return c; -+} -+ -+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) -+ -+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ -+ if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \ -+ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) -+ -+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ -+ if (ZSTD_64bits()) \ -+ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) -+ -+FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog) -+{ -+ BYTE *const pStart = p; -+ -+ /* up to 4 symbols at a time */ -+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) { -+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr); -+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr); -+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr); -+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); -+ } -+ -+ /* closer to the end */ -+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) -+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); -+ -+ /* no more data to retrieve from bitstream, hence no need to reload */ -+ while (p < pEnd) -+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); -+ -+ return pEnd - pStart; -+} -+ -+static size_t HUF_decompress1X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -+{ -+ BYTE *op = (BYTE *)dst; -+ BYTE *const oend = op + dstSize; -+ const void *dtPtr = DTable + 1; -+ const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr; -+ BIT_DStream_t bitD; -+ DTableDesc const dtd = HUF_getDTableDesc(DTable); -+ U32 const dtLog = dtd.tableLog; -+ -+ { -+ size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); -+ if (HUF_isError(errorCode)) -+ return errorCode; -+ } -+ -+ HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog); -+ -+ /* check */ -+ if (!BIT_endOfDStream(&bitD)) -+ return ERROR(corruption_detected); -+ -+ return dstSize; -+} -+ -+size_t HUF_decompress1X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -+{ -+ DTableDesc dtd = HUF_getDTableDesc(DTable); -+ if (dtd.tableType != 0) -+ return ERROR(GENERIC); -+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); -+} -+ -+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -+{ -+ const BYTE *ip = (const BYTE *)cSrc; -+ -+ size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize); -+ if (HUF_isError(hSize)) -+ return hSize; -+ if (hSize >= cSrcSize) -+ return ERROR(srcSize_wrong); -+ ip += hSize; -+ cSrcSize -= hSize; -+ -+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx); -+} -+ -+static size_t HUF_decompress4X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -+{ -+ /* Check */ -+ if (cSrcSize < 10) -+ return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ -+ -+ { -+ const BYTE *const istart = (const BYTE *)cSrc; -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *const oend = ostart + dstSize; -+ const void *const dtPtr = DTable + 1; -+ const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr; -+ -+ /* Init */ -+ BIT_DStream_t bitD1; -+ BIT_DStream_t bitD2; -+ BIT_DStream_t bitD3; -+ BIT_DStream_t bitD4; -+ size_t const length1 = ZSTD_readLE16(istart); -+ size_t const length2 = ZSTD_readLE16(istart + 2); -+ size_t const length3 = ZSTD_readLE16(istart + 4); -+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); -+ const BYTE *const istart1 = istart + 6; /* jumpTable */ -+ const BYTE *const istart2 = istart1 + length1; -+ const BYTE *const istart3 = istart2 + length2; -+ const BYTE *const istart4 = istart3 + length3; -+ const size_t segmentSize = (dstSize + 3) / 4; -+ BYTE *const opStart2 = ostart + segmentSize; -+ BYTE *const opStart3 = opStart2 + segmentSize; -+ BYTE *const opStart4 = opStart3 + segmentSize; -+ BYTE *op1 = ostart; -+ BYTE *op2 = opStart2; -+ BYTE *op3 = opStart3; -+ BYTE *op4 = opStart4; -+ U32 endSignal; -+ DTableDesc const dtd = HUF_getDTableDesc(DTable); -+ U32 const dtLog = dtd.tableLog; -+ -+ if (length4 > cSrcSize) -+ return ERROR(corruption_detected); /* overflow */ -+ { -+ size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); -+ if (HUF_isError(errorCode)) -+ return errorCode; -+ } -+ { -+ size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); -+ if (HUF_isError(errorCode)) -+ return errorCode; -+ } -+ { -+ size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); -+ if (HUF_isError(errorCode)) -+ return errorCode; -+ } -+ { -+ size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); -+ if (HUF_isError(errorCode)) -+ return errorCode; -+ } -+ -+ /* 16-32 symbols per loop (4-8 symbols per stream) */ -+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); -+ for (; (endSignal == BIT_DStream_unfinished) && (op4 < (oend - 7));) { -+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1); -+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2); -+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3); -+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4); -+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1); -+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2); -+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3); -+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4); -+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1); -+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2); -+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3); -+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4); -+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1); -+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2); -+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3); -+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4); -+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); -+ } -+ -+ /* check corruption */ -+ if (op1 > opStart2) -+ return ERROR(corruption_detected); -+ if (op2 > opStart3) -+ return ERROR(corruption_detected); -+ if (op3 > opStart4) -+ return ERROR(corruption_detected); -+ /* note : op4 supposed already verified within main loop */ -+ -+ /* finish bitStreams one by one */ -+ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); -+ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); -+ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); -+ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); -+ -+ /* check */ -+ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); -+ if (!endSignal) -+ return ERROR(corruption_detected); -+ -+ /* decoded size */ -+ return dstSize; -+ } -+} -+ -+size_t HUF_decompress4X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -+{ -+ DTableDesc dtd = HUF_getDTableDesc(DTable); -+ if (dtd.tableType != 0) -+ return ERROR(GENERIC); -+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); -+} -+ -+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -+{ -+ const BYTE *ip = (const BYTE *)cSrc; -+ -+ size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize); -+ if (HUF_isError(hSize)) -+ return hSize; -+ if (hSize >= cSrcSize) -+ return ERROR(srcSize_wrong); -+ ip += hSize; -+ cSrcSize -= hSize; -+ -+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); -+} -+ -+/* *************************/ -+/* double-symbols decoding */ -+/* *************************/ -+typedef struct { -+ U16 sequence; -+ BYTE nbBits; -+ BYTE length; -+} HUF_DEltX4; /* double-symbols decoding */ -+ -+typedef struct { -+ BYTE symbol; -+ BYTE weight; -+} sortedSymbol_t; -+ -+/* HUF_fillDTableX4Level2() : -+ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ -+static void HUF_fillDTableX4Level2(HUF_DEltX4 *DTable, U32 sizeLog, const U32 consumed, const U32 *rankValOrigin, const int minWeight, -+ const sortedSymbol_t *sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) -+{ -+ HUF_DEltX4 DElt; -+ U32 rankVal[HUF_TABLELOG_MAX + 1]; -+ -+ /* get pre-calculated rankVal */ -+ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); -+ -+ /* fill skipped values */ -+ if (minWeight > 1) { -+ U32 i, skipSize = rankVal[minWeight]; -+ ZSTD_writeLE16(&(DElt.sequence), baseSeq); -+ DElt.nbBits = (BYTE)(consumed); -+ DElt.length = 1; -+ for (i = 0; i < skipSize; i++) -+ DTable[i] = DElt; -+ } -+ -+ /* fill DTable */ -+ { -+ U32 s; -+ for (s = 0; s < sortedListSize; s++) { /* note : sortedSymbols already skipped */ -+ const U32 symbol = sortedSymbols[s].symbol; -+ const U32 weight = sortedSymbols[s].weight; -+ const U32 nbBits = nbBitsBaseline - weight; -+ const U32 length = 1 << (sizeLog - nbBits); -+ const U32 start = rankVal[weight]; -+ U32 i = start; -+ const U32 end = start + length; -+ -+ ZSTD_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8))); -+ DElt.nbBits = (BYTE)(nbBits + consumed); -+ DElt.length = 2; -+ do { -+ DTable[i++] = DElt; -+ } while (i < end); /* since length >= 1 */ -+ -+ rankVal[weight] += length; -+ } -+ } -+} -+ -+typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1]; -+typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; -+ -+static void HUF_fillDTableX4(HUF_DEltX4 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 sortedListSize, const U32 *rankStart, -+ rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) -+{ -+ U32 rankVal[HUF_TABLELOG_MAX + 1]; -+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ -+ const U32 minBits = nbBitsBaseline - maxWeight; -+ U32 s; -+ -+ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); -+ -+ /* fill DTable */ -+ for (s = 0; s < sortedListSize; s++) { -+ const U16 symbol = sortedList[s].symbol; -+ const U32 weight = sortedList[s].weight; -+ const U32 nbBits = nbBitsBaseline - weight; -+ const U32 start = rankVal[weight]; -+ const U32 length = 1 << (targetLog - nbBits); -+ -+ if (targetLog - nbBits >= minBits) { /* enough room for a second symbol */ -+ U32 sortedRank; -+ int minWeight = nbBits + scaleLog; -+ if (minWeight < 1) -+ minWeight = 1; -+ sortedRank = rankStart[minWeight]; -+ HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank, -+ sortedListSize - sortedRank, nbBitsBaseline, symbol); -+ } else { -+ HUF_DEltX4 DElt; -+ ZSTD_writeLE16(&(DElt.sequence), symbol); -+ DElt.nbBits = (BYTE)(nbBits); -+ DElt.length = 1; -+ { -+ U32 const end = start + length; -+ U32 u; -+ for (u = start; u < end; u++) -+ DTable[u] = DElt; -+ } -+ } -+ rankVal[weight] += length; -+ } -+} -+ -+size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) -+{ -+ U32 tableLog, maxW, sizeOfSort, nbSymbols; -+ DTableDesc dtd = HUF_getDTableDesc(DTable); -+ U32 const maxTableLog = dtd.maxTableLog; -+ size_t iSize; -+ void *dtPtr = DTable + 1; /* force compiler to avoid strict-aliasing */ -+ HUF_DEltX4 *const dt = (HUF_DEltX4 *)dtPtr; -+ U32 *rankStart; -+ -+ rankValCol_t *rankVal; -+ U32 *rankStats; -+ U32 *rankStart0; -+ sortedSymbol_t *sortedSymbol; -+ BYTE *weightList; -+ size_t spaceUsed32 = 0; -+ -+ HUF_STATIC_ASSERT((sizeof(rankValCol_t) & 3) == 0); -+ -+ rankVal = (rankValCol_t *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2; -+ rankStats = (U32 *)workspace + spaceUsed32; -+ spaceUsed32 += HUF_TABLELOG_MAX + 1; -+ rankStart0 = (U32 *)workspace + spaceUsed32; -+ spaceUsed32 += HUF_TABLELOG_MAX + 2; -+ sortedSymbol = (sortedSymbol_t *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2; -+ weightList = (BYTE *)((U32 *)workspace + spaceUsed32); -+ spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; -+ -+ if ((spaceUsed32 << 2) > workspaceSize) -+ return ERROR(tableLog_tooLarge); -+ workspace = (U32 *)workspace + spaceUsed32; -+ workspaceSize -= (spaceUsed32 << 2); -+ -+ rankStart = rankStart0 + 1; -+ memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1)); -+ -+ HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ -+ if (maxTableLog > HUF_TABLELOG_MAX) -+ return ERROR(tableLog_tooLarge); -+ /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ -+ -+ iSize = HUF_readStats_wksp(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize); -+ if (HUF_isError(iSize)) -+ return iSize; -+ -+ /* check result */ -+ if (tableLog > maxTableLog) -+ return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ -+ -+ /* find maxWeight */ -+ for (maxW = tableLog; rankStats[maxW] == 0; maxW--) { -+ } /* necessarily finds a solution before 0 */ -+ -+ /* Get start index of each weight */ -+ { -+ U32 w, nextRankStart = 0; -+ for (w = 1; w < maxW + 1; w++) { -+ U32 curr = nextRankStart; -+ nextRankStart += rankStats[w]; -+ rankStart[w] = curr; -+ } -+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ -+ sizeOfSort = nextRankStart; -+ } -+ -+ /* sort symbols by weight */ -+ { -+ U32 s; -+ for (s = 0; s < nbSymbols; s++) { -+ U32 const w = weightList[s]; -+ U32 const r = rankStart[w]++; -+ sortedSymbol[r].symbol = (BYTE)s; -+ sortedSymbol[r].weight = (BYTE)w; -+ } -+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ -+ } -+ -+ /* Build rankVal */ -+ { -+ U32 *const rankVal0 = rankVal[0]; -+ { -+ int const rescale = (maxTableLog - tableLog) - 1; /* tableLog <= maxTableLog */ -+ U32 nextRankVal = 0; -+ U32 w; -+ for (w = 1; w < maxW + 1; w++) { -+ U32 curr = nextRankVal; -+ nextRankVal += rankStats[w] << (w + rescale); -+ rankVal0[w] = curr; -+ } -+ } -+ { -+ U32 const minBits = tableLog + 1 - maxW; -+ U32 consumed; -+ for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) { -+ U32 *const rankValPtr = rankVal[consumed]; -+ U32 w; -+ for (w = 1; w < maxW + 1; w++) { -+ rankValPtr[w] = rankVal0[w] >> consumed; -+ } -+ } -+ } -+ } -+ -+ HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog + 1); -+ -+ dtd.tableLog = (BYTE)maxTableLog; -+ dtd.tableType = 1; -+ memcpy(DTable, &dtd, sizeof(dtd)); -+ return iSize; -+} -+ -+static U32 HUF_decodeSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog) -+{ -+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ -+ memcpy(op, dt + val, 2); -+ BIT_skipBits(DStream, dt[val].nbBits); -+ return dt[val].length; -+} -+ -+static U32 HUF_decodeLastSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog) -+{ -+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ -+ memcpy(op, dt + val, 1); -+ if (dt[val].length == 1) -+ BIT_skipBits(DStream, dt[val].nbBits); -+ else { -+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer) * 8)) { -+ BIT_skipBits(DStream, dt[val].nbBits); -+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer) * 8)) -+ /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ -+ DStream->bitsConsumed = (sizeof(DStream->bitContainer) * 8); -+ } -+ } -+ return 1; -+} -+ -+#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) -+ -+#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ -+ if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \ -+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) -+ -+#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ -+ if (ZSTD_64bits()) \ -+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) -+ -+FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog) -+{ -+ BYTE *const pStart = p; -+ -+ /* up to 8 symbols at a time */ -+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) { -+ HUF_DECODE_SYMBOLX4_2(p, bitDPtr); -+ HUF_DECODE_SYMBOLX4_1(p, bitDPtr); -+ HUF_DECODE_SYMBOLX4_2(p, bitDPtr); -+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr); -+ } -+ -+ /* closer to end : up to 2 symbols at a time */ -+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2)) -+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr); -+ -+ while (p <= pEnd - 2) -+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ -+ -+ if (p < pEnd) -+ p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); -+ -+ return p - pStart; -+} -+ -+static size_t HUF_decompress1X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -+{ -+ BIT_DStream_t bitD; -+ -+ /* Init */ -+ { -+ size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); -+ if (HUF_isError(errorCode)) -+ return errorCode; -+ } -+ -+ /* decode */ -+ { -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *const oend = ostart + dstSize; -+ const void *const dtPtr = DTable + 1; /* force compiler to not use strict-aliasing */ -+ const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr; -+ DTableDesc const dtd = HUF_getDTableDesc(DTable); -+ HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog); -+ } -+ -+ /* check */ -+ if (!BIT_endOfDStream(&bitD)) -+ return ERROR(corruption_detected); -+ -+ /* decoded size */ -+ return dstSize; -+} -+ -+size_t HUF_decompress1X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -+{ -+ DTableDesc dtd = HUF_getDTableDesc(DTable); -+ if (dtd.tableType != 1) -+ return ERROR(GENERIC); -+ return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); -+} -+ -+size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -+{ -+ const BYTE *ip = (const BYTE *)cSrc; -+ -+ size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize); -+ if (HUF_isError(hSize)) -+ return hSize; -+ if (hSize >= cSrcSize) -+ return ERROR(srcSize_wrong); -+ ip += hSize; -+ cSrcSize -= hSize; -+ -+ return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx); -+} -+ -+static size_t HUF_decompress4X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -+{ -+ if (cSrcSize < 10) -+ return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ -+ -+ { -+ const BYTE *const istart = (const BYTE *)cSrc; -+ BYTE *const ostart = (BYTE *)dst; -+ BYTE *const oend = ostart + dstSize; -+ const void *const dtPtr = DTable + 1; -+ const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr; -+ -+ /* Init */ -+ BIT_DStream_t bitD1; -+ BIT_DStream_t bitD2; -+ BIT_DStream_t bitD3; -+ BIT_DStream_t bitD4; -+ size_t const length1 = ZSTD_readLE16(istart); -+ size_t const length2 = ZSTD_readLE16(istart + 2); -+ size_t const length3 = ZSTD_readLE16(istart + 4); -+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); -+ const BYTE *const istart1 = istart + 6; /* jumpTable */ -+ const BYTE *const istart2 = istart1 + length1; -+ const BYTE *const istart3 = istart2 + length2; -+ const BYTE *const istart4 = istart3 + length3; -+ size_t const segmentSize = (dstSize + 3) / 4; -+ BYTE *const opStart2 = ostart + segmentSize; -+ BYTE *const opStart3 = opStart2 + segmentSize; -+ BYTE *const opStart4 = opStart3 + segmentSize; -+ BYTE *op1 = ostart; -+ BYTE *op2 = opStart2; -+ BYTE *op3 = opStart3; -+ BYTE *op4 = opStart4; -+ U32 endSignal; -+ DTableDesc const dtd = HUF_getDTableDesc(DTable); -+ U32 const dtLog = dtd.tableLog; -+ -+ if (length4 > cSrcSize) -+ return ERROR(corruption_detected); /* overflow */ -+ { -+ size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); -+ if (HUF_isError(errorCode)) -+ return errorCode; -+ } -+ { -+ size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); -+ if (HUF_isError(errorCode)) -+ return errorCode; -+ } -+ { -+ size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); -+ if (HUF_isError(errorCode)) -+ return errorCode; -+ } -+ { -+ size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); -+ if (HUF_isError(errorCode)) -+ return errorCode; -+ } -+ -+ /* 16-32 symbols per loop (4-8 symbols per stream) */ -+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); -+ for (; (endSignal == BIT_DStream_unfinished) & (op4 < (oend - (sizeof(bitD4.bitContainer) - 1)));) { -+ HUF_DECODE_SYMBOLX4_2(op1, &bitD1); -+ HUF_DECODE_SYMBOLX4_2(op2, &bitD2); -+ HUF_DECODE_SYMBOLX4_2(op3, &bitD3); -+ HUF_DECODE_SYMBOLX4_2(op4, &bitD4); -+ HUF_DECODE_SYMBOLX4_1(op1, &bitD1); -+ HUF_DECODE_SYMBOLX4_1(op2, &bitD2); -+ HUF_DECODE_SYMBOLX4_1(op3, &bitD3); -+ HUF_DECODE_SYMBOLX4_1(op4, &bitD4); -+ HUF_DECODE_SYMBOLX4_2(op1, &bitD1); -+ HUF_DECODE_SYMBOLX4_2(op2, &bitD2); -+ HUF_DECODE_SYMBOLX4_2(op3, &bitD3); -+ HUF_DECODE_SYMBOLX4_2(op4, &bitD4); -+ HUF_DECODE_SYMBOLX4_0(op1, &bitD1); -+ HUF_DECODE_SYMBOLX4_0(op2, &bitD2); -+ HUF_DECODE_SYMBOLX4_0(op3, &bitD3); -+ HUF_DECODE_SYMBOLX4_0(op4, &bitD4); -+ -+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); -+ } -+ -+ /* check corruption */ -+ if (op1 > opStart2) -+ return ERROR(corruption_detected); -+ if (op2 > opStart3) -+ return ERROR(corruption_detected); -+ if (op3 > opStart4) -+ return ERROR(corruption_detected); -+ /* note : op4 already verified within main loop */ -+ -+ /* finish bitStreams one by one */ -+ HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); -+ HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); -+ HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); -+ HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); -+ -+ /* check */ -+ { -+ U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); -+ if (!endCheck) -+ return ERROR(corruption_detected); -+ } -+ -+ /* decoded size */ -+ return dstSize; -+ } -+} -+ -+size_t HUF_decompress4X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -+{ -+ DTableDesc dtd = HUF_getDTableDesc(DTable); -+ if (dtd.tableType != 1) -+ return ERROR(GENERIC); -+ return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); -+} -+ -+size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -+{ -+ const BYTE *ip = (const BYTE *)cSrc; -+ -+ size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize); -+ if (HUF_isError(hSize)) -+ return hSize; -+ if (hSize >= cSrcSize) -+ return ERROR(srcSize_wrong); -+ ip += hSize; -+ cSrcSize -= hSize; -+ -+ return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); -+} -+ -+/* ********************************/ -+/* Generic decompression selector */ -+/* ********************************/ -+ -+size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -+{ -+ DTableDesc const dtd = HUF_getDTableDesc(DTable); -+ return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) -+ : HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); -+} -+ -+size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -+{ -+ DTableDesc const dtd = HUF_getDTableDesc(DTable); -+ return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) -+ : HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); -+} -+ -+typedef struct { -+ U32 tableTime; -+ U32 decode256Time; -+} algo_time_t; -+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { -+ /* single, double, quad */ -+ {{0, 0}, {1, 1}, {2, 2}}, /* Q==0 : impossible */ -+ {{0, 0}, {1, 1}, {2, 2}}, /* Q==1 : impossible */ -+ {{38, 130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ -+ {{448, 128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ -+ {{556, 128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ -+ {{714, 128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ -+ {{883, 128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ -+ {{897, 128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ -+ {{926, 128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ -+ {{947, 128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ -+ {{1107, 128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ -+ {{1177, 128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ -+ {{1242, 128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ -+ {{1349, 128}, {2644, 106}, {5260, 106}}, /* Q ==13 : 81-87% */ -+ {{1455, 128}, {2422, 124}, {4174, 124}}, /* Q ==14 : 87-93% */ -+ {{722, 128}, {1891, 145}, {1936, 146}}, /* Q ==15 : 93-99% */ -+}; -+ -+/** HUF_selectDecoder() : -+* Tells which decoder is likely to decode faster, -+* based on a set of pre-determined metrics. -+* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . -+* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ -+U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize) -+{ -+ /* decoder timing evaluation */ -+ U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ -+ U32 const D256 = (U32)(dstSize >> 8); -+ U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); -+ U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); -+ DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */ -+ -+ return DTime1 < DTime0; -+} -+ -+typedef size_t (*decompressionAlgo)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize); -+ -+size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -+{ -+ /* validation checks */ -+ if (dstSize == 0) -+ return ERROR(dstSize_tooSmall); -+ if (cSrcSize > dstSize) -+ return ERROR(corruption_detected); /* invalid */ -+ if (cSrcSize == dstSize) { -+ memcpy(dst, cSrc, dstSize); -+ return dstSize; -+ } /* not compressed */ -+ if (cSrcSize == 1) { -+ memset(dst, *(const BYTE *)cSrc, dstSize); -+ return dstSize; -+ } /* RLE */ -+ -+ { -+ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -+ return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize) -+ : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize); -+ } -+} -+ -+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -+{ -+ /* validation checks */ -+ if (dstSize == 0) -+ return ERROR(dstSize_tooSmall); -+ if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) -+ return ERROR(corruption_detected); /* invalid */ -+ -+ { -+ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -+ return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize) -+ : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize); -+ } -+} -+ -+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -+{ -+ /* validation checks */ -+ if (dstSize == 0) -+ return ERROR(dstSize_tooSmall); -+ if (cSrcSize > dstSize) -+ return ERROR(corruption_detected); /* invalid */ -+ if (cSrcSize == dstSize) { -+ memcpy(dst, cSrc, dstSize); -+ return dstSize; -+ } /* not compressed */ -+ if (cSrcSize == 1) { -+ memset(dst, *(const BYTE *)cSrc, dstSize); -+ return dstSize; -+ } /* RLE */ -+ -+ { -+ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -+ return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize) -+ : HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize); -+ } -+} -diff --git a/lib/zstd/mem.h b/lib/zstd/mem.h -new file mode 100644 -index 0000000..42a697b ---- /dev/null -+++ b/lib/zstd/mem.h -@@ -0,0 +1,149 @@ -+/** -+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. -+ * All rights reserved. -+ * -+ * This source code is licensed under the BSD-style license found in the -+ * LICENSE file in the root directory of https://github.com/facebook/zstd. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ */ -+ -+#ifndef MEM_H_MODULE -+#define MEM_H_MODULE -+ -+/*-**************************************** -+* Dependencies -+******************************************/ -+#include -+#include /* memcpy */ -+#include /* size_t, ptrdiff_t */ -+ -+/*-**************************************** -+* Compiler specifics -+******************************************/ -+#define ZSTD_STATIC static __inline __attribute__((unused)) -+ -+/*-************************************************************** -+* Basic Types -+*****************************************************************/ -+typedef uint8_t BYTE; -+typedef uint16_t U16; -+typedef int16_t S16; -+typedef uint32_t U32; -+typedef int32_t S32; -+typedef uint64_t U64; -+typedef int64_t S64; -+typedef ptrdiff_t iPtrDiff; -+typedef uintptr_t uPtrDiff; -+ -+/*-************************************************************** -+* Memory I/O -+*****************************************************************/ -+ZSTD_STATIC unsigned ZSTD_32bits(void) { return sizeof(size_t) == 4; } -+ZSTD_STATIC unsigned ZSTD_64bits(void) { return sizeof(size_t) == 8; } -+ -+#if defined(__LITTLE_ENDIAN) -+#define ZSTD_LITTLE_ENDIAN 1 -+#else -+#define ZSTD_LITTLE_ENDIAN 0 -+#endif -+ -+ZSTD_STATIC unsigned ZSTD_isLittleEndian(void) { return ZSTD_LITTLE_ENDIAN; } -+ -+ZSTD_STATIC U16 ZSTD_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); } -+ -+ZSTD_STATIC U32 ZSTD_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); } -+ -+ZSTD_STATIC U64 ZSTD_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); } -+ -+ZSTD_STATIC size_t ZSTD_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); } -+ -+ZSTD_STATIC void ZSTD_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); } -+ -+ZSTD_STATIC void ZSTD_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); } -+ -+ZSTD_STATIC void ZSTD_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); } -+ -+/*=== Little endian r/w ===*/ -+ -+ZSTD_STATIC U16 ZSTD_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); } -+ -+ZSTD_STATIC void ZSTD_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); } -+ -+ZSTD_STATIC U32 ZSTD_readLE24(const void *memPtr) { return ZSTD_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); } -+ -+ZSTD_STATIC void ZSTD_writeLE24(void *memPtr, U32 val) -+{ -+ ZSTD_writeLE16(memPtr, (U16)val); -+ ((BYTE *)memPtr)[2] = (BYTE)(val >> 16); -+} -+ -+ZSTD_STATIC U32 ZSTD_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); } -+ -+ZSTD_STATIC void ZSTD_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); } -+ -+ZSTD_STATIC U64 ZSTD_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); } -+ -+ZSTD_STATIC void ZSTD_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); } -+ -+ZSTD_STATIC size_t ZSTD_readLEST(const void *memPtr) -+{ -+ if (ZSTD_32bits()) -+ return (size_t)ZSTD_readLE32(memPtr); -+ else -+ return (size_t)ZSTD_readLE64(memPtr); -+} -+ -+ZSTD_STATIC void ZSTD_writeLEST(void *memPtr, size_t val) -+{ -+ if (ZSTD_32bits()) -+ ZSTD_writeLE32(memPtr, (U32)val); -+ else -+ ZSTD_writeLE64(memPtr, (U64)val); -+} -+ -+/*=== Big endian r/w ===*/ -+ -+ZSTD_STATIC U32 ZSTD_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); } -+ -+ZSTD_STATIC void ZSTD_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); } -+ -+ZSTD_STATIC U64 ZSTD_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); } -+ -+ZSTD_STATIC void ZSTD_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); } -+ -+ZSTD_STATIC size_t ZSTD_readBEST(const void *memPtr) -+{ -+ if (ZSTD_32bits()) -+ return (size_t)ZSTD_readBE32(memPtr); -+ else -+ return (size_t)ZSTD_readBE64(memPtr); -+} -+ -+ZSTD_STATIC void ZSTD_writeBEST(void *memPtr, size_t val) -+{ -+ if (ZSTD_32bits()) -+ ZSTD_writeBE32(memPtr, (U32)val); -+ else -+ ZSTD_writeBE64(memPtr, (U64)val); -+} -+ -+/* function safe only for comparisons */ -+ZSTD_STATIC U32 ZSTD_readMINMATCH(const void *memPtr, U32 length) -+{ -+ switch (length) { -+ default: -+ case 4: return ZSTD_read32(memPtr); -+ case 3: -+ if (ZSTD_isLittleEndian()) -+ return ZSTD_read32(memPtr) << 8; -+ else -+ return ZSTD_read32(memPtr) >> 8; -+ } -+} -+ -+#endif /* MEM_H_MODULE */ -diff --git a/lib/zstd/zstd_common.c b/lib/zstd/zstd_common.c -new file mode 100644 -index 0000000..e5f06d7 ---- /dev/null -+++ b/lib/zstd/zstd_common.c -@@ -0,0 +1,73 @@ -+/** -+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. -+ * All rights reserved. -+ * -+ * This source code is licensed under the BSD-style license found in the -+ * LICENSE file in the root directory of https://github.com/facebook/zstd. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ */ -+ -+/*-************************************* -+* Dependencies -+***************************************/ -+#include "error_private.h" -+#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */ -+#include -+ -+/*=************************************************************** -+* Custom allocator -+****************************************************************/ -+ -+#define stack_push(stack, size) \ -+ ({ \ -+ void *const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \ -+ (stack)->ptr = (char *)ptr + (size); \ -+ (stack)->ptr <= (stack)->end ? ptr : NULL; \ -+ }) -+ -+ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize) -+{ -+ ZSTD_customMem stackMem = {ZSTD_stackAlloc, ZSTD_stackFree, workspace}; -+ ZSTD_stack *stack = (ZSTD_stack *)workspace; -+ /* Verify preconditions */ -+ if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) { -+ ZSTD_customMem error = {NULL, NULL, NULL}; -+ return error; -+ } -+ /* Initialize the stack */ -+ stack->ptr = workspace; -+ stack->end = (char *)workspace + workspaceSize; -+ stack_push(stack, sizeof(ZSTD_stack)); -+ return stackMem; -+} -+ -+void *ZSTD_stackAllocAll(void *opaque, size_t *size) -+{ -+ ZSTD_stack *stack = (ZSTD_stack *)opaque; -+ *size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr); -+ return stack_push(stack, *size); -+} -+ -+void *ZSTD_stackAlloc(void *opaque, size_t size) -+{ -+ ZSTD_stack *stack = (ZSTD_stack *)opaque; -+ return stack_push(stack, size); -+} -+void ZSTD_stackFree(void *opaque, void *address) -+{ -+ (void)opaque; -+ (void)address; -+} -+ -+void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.opaque, size); } -+ -+void ZSTD_free(void *ptr, ZSTD_customMem customMem) -+{ -+ if (ptr != NULL) -+ customMem.customFree(customMem.opaque, ptr); -+} -diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h -new file mode 100644 -index 0000000..a0fb83e ---- /dev/null -+++ b/lib/zstd/zstd_internal.h -@@ -0,0 +1,261 @@ -+/** -+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. -+ * All rights reserved. -+ * -+ * This source code is licensed under the BSD-style license found in the -+ * LICENSE file in the root directory of https://github.com/facebook/zstd. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ */ -+ -+#ifndef ZSTD_CCOMMON_H_MODULE -+#define ZSTD_CCOMMON_H_MODULE -+ -+/*-******************************************************* -+* Compiler specifics -+*********************************************************/ -+#define FORCE_INLINE static __always_inline -+#define FORCE_NOINLINE static noinline -+ -+/*-************************************* -+* Dependencies -+***************************************/ -+#include "error_private.h" -+#include "mem.h" -+#include -+#include -+#include -+#include -+ -+/*-************************************* -+* shared macros -+***************************************/ -+#define MIN(a, b) ((a) < (b) ? (a) : (b)) -+#define MAX(a, b) ((a) > (b) ? (a) : (b)) -+#define CHECK_F(f) \ -+ { \ -+ size_t const errcod = f; \ -+ if (ERR_isError(errcod)) \ -+ return errcod; \ -+ } /* check and Forward error code */ -+#define CHECK_E(f, e) \ -+ { \ -+ size_t const errcod = f; \ -+ if (ERR_isError(errcod)) \ -+ return ERROR(e); \ -+ } /* check and send Error code */ -+#define ZSTD_STATIC_ASSERT(c) \ -+ { \ -+ enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \ -+ } -+ -+/*-************************************* -+* Common constants -+***************************************/ -+#define ZSTD_OPT_NUM (1 << 12) -+#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */ -+ -+#define ZSTD_REP_NUM 3 /* number of repcodes */ -+#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */ -+#define ZSTD_REP_MOVE (ZSTD_REP_NUM - 1) -+#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM) -+static const U32 repStartValue[ZSTD_REP_NUM] = {1, 4, 8}; -+ -+#define KB *(1 << 10) -+#define MB *(1 << 20) -+#define GB *(1U << 30) -+ -+#define BIT7 128 -+#define BIT6 64 -+#define BIT5 32 -+#define BIT4 16 -+#define BIT1 2 -+#define BIT0 1 -+ -+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 -+static const size_t ZSTD_fcs_fieldSize[4] = {0, 2, 4, 8}; -+static const size_t ZSTD_did_fieldSize[4] = {0, 1, 2, 4}; -+ -+#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ -+static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; -+typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; -+ -+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ -+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ -+ -+#define HufLog 12 -+typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; -+ -+#define LONGNBSEQ 0x7F00 -+ -+#define MINMATCH 3 -+#define EQUAL_READ32 4 -+ -+#define Litbits 8 -+#define MaxLit ((1 << Litbits) - 1) -+#define MaxML 52 -+#define MaxLL 35 -+#define MaxOff 28 -+#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */ -+#define MLFSELog 9 -+#define LLFSELog 9 -+#define OffFSELog 8 -+ -+static const U32 LL_bits[MaxLL + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; -+static const S16 LL_defaultNorm[MaxLL + 1] = {4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1}; -+#define LL_DEFAULTNORMLOG 6 /* for static allocation */ -+static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG; -+ -+static const U32 ML_bits[MaxML + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; -+static const S16 ML_defaultNorm[MaxML + 1] = {1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1}; -+#define ML_DEFAULTNORMLOG 6 /* for static allocation */ -+static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG; -+ -+static const S16 OF_defaultNorm[MaxOff + 1] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}; -+#define OF_DEFAULTNORMLOG 5 /* for static allocation */ -+static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; -+ -+/*-******************************************* -+* Shared functions to include for inlining -+*********************************************/ -+ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) { -+ memcpy(dst, src, 8); -+} -+/*! ZSTD_wildcopy() : -+* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */ -+#define WILDCOPY_OVERLENGTH 8 -+ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length) -+{ -+ const BYTE* ip = (const BYTE*)src; -+ BYTE* op = (BYTE*)dst; -+ BYTE* const oend = op + length; -+ /* Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. -+ * Avoid the bad case where the loop only runs once by handling the -+ * special case separately. This doesn't trigger the bug because it -+ * doesn't involve pointer/integer overflow. -+ */ -+ if (length <= 8) -+ return ZSTD_copy8(dst, src); -+ do { -+ ZSTD_copy8(op, ip); -+ op += 8; -+ ip += 8; -+ } while (op < oend); -+} -+ -+/*-******************************************* -+* Private interfaces -+*********************************************/ -+typedef struct ZSTD_stats_s ZSTD_stats_t; -+ -+typedef struct { -+ U32 off; -+ U32 len; -+} ZSTD_match_t; -+ -+typedef struct { -+ U32 price; -+ U32 off; -+ U32 mlen; -+ U32 litlen; -+ U32 rep[ZSTD_REP_NUM]; -+} ZSTD_optimal_t; -+ -+typedef struct seqDef_s { -+ U32 offset; -+ U16 litLength; -+ U16 matchLength; -+} seqDef; -+ -+typedef struct { -+ seqDef *sequencesStart; -+ seqDef *sequences; -+ BYTE *litStart; -+ BYTE *lit; -+ BYTE *llCode; -+ BYTE *mlCode; -+ BYTE *ofCode; -+ U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */ -+ U32 longLengthPos; -+ /* opt */ -+ ZSTD_optimal_t *priceTable; -+ ZSTD_match_t *matchTable; -+ U32 *matchLengthFreq; -+ U32 *litLengthFreq; -+ U32 *litFreq; -+ U32 *offCodeFreq; -+ U32 matchLengthSum; -+ U32 matchSum; -+ U32 litLengthSum; -+ U32 litSum; -+ U32 offCodeSum; -+ U32 log2matchLengthSum; -+ U32 log2matchSum; -+ U32 log2litLengthSum; -+ U32 log2litSum; -+ U32 log2offCodeSum; -+ U32 factor; -+ U32 staticPrices; -+ U32 cachedPrice; -+ U32 cachedLitLength; -+ const BYTE *cachedLiterals; -+} seqStore_t; -+ -+const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx); -+void ZSTD_seqToCodes(const seqStore_t *seqStorePtr); -+int ZSTD_isSkipFrame(ZSTD_DCtx *dctx); -+ -+/*= Custom memory allocation functions */ -+typedef void *(*ZSTD_allocFunction)(void *opaque, size_t size); -+typedef void (*ZSTD_freeFunction)(void *opaque, void *address); -+typedef struct { -+ ZSTD_allocFunction customAlloc; -+ ZSTD_freeFunction customFree; -+ void *opaque; -+} ZSTD_customMem; -+ -+void *ZSTD_malloc(size_t size, ZSTD_customMem customMem); -+void ZSTD_free(void *ptr, ZSTD_customMem customMem); -+ -+/*====== stack allocation ======*/ -+ -+typedef struct { -+ void *ptr; -+ const void *end; -+} ZSTD_stack; -+ -+#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t)) -+#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t)) -+ -+ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize); -+ -+void *ZSTD_stackAllocAll(void *opaque, size_t *size); -+void *ZSTD_stackAlloc(void *opaque, size_t size); -+void ZSTD_stackFree(void *opaque, void *address); -+ -+/*====== common function ======*/ -+ -+ZSTD_STATIC U32 ZSTD_highbit32(U32 val) { return 31 - __builtin_clz(val); } -+ -+/* hidden functions */ -+ -+/* ZSTD_invalidateRepCodes() : -+ * ensures next compression will not use repcodes from previous block. -+ * Note : only works with regular variant; -+ * do not use with extDict variant ! */ -+void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx); -+ -+size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx); -+size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx); -+size_t ZSTD_freeCDict(ZSTD_CDict *cdict); -+size_t ZSTD_freeDDict(ZSTD_DDict *cdict); -+size_t ZSTD_freeCStream(ZSTD_CStream *zcs); -+size_t ZSTD_freeDStream(ZSTD_DStream *zds); -+ -+#endif /* ZSTD_CCOMMON_H_MODULE */ -diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h -new file mode 100644 -index 0000000..ecdd725 ---- /dev/null -+++ b/lib/zstd/zstd_opt.h -@@ -0,0 +1,1012 @@ -+/** -+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. -+ * All rights reserved. -+ * -+ * This source code is licensed under the BSD-style license found in the -+ * LICENSE file in the root directory of https://github.com/facebook/zstd. -+ * -+ * This program is free software; you can redistribute it and/or modify it under -+ * the terms of the GNU General Public License version 2 as published by the -+ * Free Software Foundation. This program is dual-licensed; you may select -+ * either version 2 of the GNU General Public License ("GPL") or BSD license -+ * ("BSD"). -+ */ -+ -+/* Note : this file is intended to be included within zstd_compress.c */ -+ -+#ifndef ZSTD_OPT_H_91842398743 -+#define ZSTD_OPT_H_91842398743 -+ -+#define ZSTD_LITFREQ_ADD 2 -+#define ZSTD_FREQ_DIV 4 -+#define ZSTD_MAX_PRICE (1 << 30) -+ -+/*-************************************* -+* Price functions for optimal parser -+***************************************/ -+FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t *ssPtr) -+{ -+ ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum + 1); -+ ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum + 1); -+ ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum + 1); -+ ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum + 1); -+ ssPtr->factor = 1 + ((ssPtr->litSum >> 5) / ssPtr->litLengthSum) + ((ssPtr->litSum << 1) / (ssPtr->litSum + ssPtr->matchSum)); -+} -+ -+ZSTD_STATIC void ZSTD_rescaleFreqs(seqStore_t *ssPtr, const BYTE *src, size_t srcSize) -+{ -+ unsigned u; -+ -+ ssPtr->cachedLiterals = NULL; -+ ssPtr->cachedPrice = ssPtr->cachedLitLength = 0; -+ ssPtr->staticPrices = 0; -+ -+ if (ssPtr->litLengthSum == 0) { -+ if (srcSize <= 1024) -+ ssPtr->staticPrices = 1; -+ -+ for (u = 0; u <= MaxLit; u++) -+ ssPtr->litFreq[u] = 0; -+ for (u = 0; u < srcSize; u++) -+ ssPtr->litFreq[src[u]]++; -+ -+ ssPtr->litSum = 0; -+ ssPtr->litLengthSum = MaxLL + 1; -+ ssPtr->matchLengthSum = MaxML + 1; -+ ssPtr->offCodeSum = (MaxOff + 1); -+ ssPtr->matchSum = (ZSTD_LITFREQ_ADD << Litbits); -+ -+ for (u = 0; u <= MaxLit; u++) { -+ ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> ZSTD_FREQ_DIV); -+ ssPtr->litSum += ssPtr->litFreq[u]; -+ } -+ for (u = 0; u <= MaxLL; u++) -+ ssPtr->litLengthFreq[u] = 1; -+ for (u = 0; u <= MaxML; u++) -+ ssPtr->matchLengthFreq[u] = 1; -+ for (u = 0; u <= MaxOff; u++) -+ ssPtr->offCodeFreq[u] = 1; -+ } else { -+ ssPtr->matchLengthSum = 0; -+ ssPtr->litLengthSum = 0; -+ ssPtr->offCodeSum = 0; -+ ssPtr->matchSum = 0; -+ ssPtr->litSum = 0; -+ -+ for (u = 0; u <= MaxLit; u++) { -+ ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> (ZSTD_FREQ_DIV + 1)); -+ ssPtr->litSum += ssPtr->litFreq[u]; -+ } -+ for (u = 0; u <= MaxLL; u++) { -+ ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u] >> (ZSTD_FREQ_DIV + 1)); -+ ssPtr->litLengthSum += ssPtr->litLengthFreq[u]; -+ } -+ for (u = 0; u <= MaxML; u++) { -+ ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u] >> ZSTD_FREQ_DIV); -+ ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u]; -+ ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3); -+ } -+ ssPtr->matchSum *= ZSTD_LITFREQ_ADD; -+ for (u = 0; u <= MaxOff; u++) { -+ ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u] >> ZSTD_FREQ_DIV); -+ ssPtr->offCodeSum += ssPtr->offCodeFreq[u]; -+ } -+ } -+ -+ ZSTD_setLog2Prices(ssPtr); -+} -+ -+FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t *ssPtr, U32 litLength, const BYTE *literals) -+{ -+ U32 price, u; -+ -+ if (ssPtr->staticPrices) -+ return ZSTD_highbit32((U32)litLength + 1) + (litLength * 6); -+ -+ if (litLength == 0) -+ return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0] + 1); -+ -+ /* literals */ -+ if (ssPtr->cachedLiterals == literals) { -+ U32 const additional = litLength - ssPtr->cachedLitLength; -+ const BYTE *literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength; -+ price = ssPtr->cachedPrice + additional * ssPtr->log2litSum; -+ for (u = 0; u < additional; u++) -+ price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]] + 1); -+ ssPtr->cachedPrice = price; -+ ssPtr->cachedLitLength = litLength; -+ } else { -+ price = litLength * ssPtr->log2litSum; -+ for (u = 0; u < litLength; u++) -+ price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]] + 1); -+ -+ if (litLength >= 12) { -+ ssPtr->cachedLiterals = literals; -+ ssPtr->cachedPrice = price; -+ ssPtr->cachedLitLength = litLength; -+ } -+ } -+ -+ /* literal Length */ -+ { -+ const BYTE LL_deltaCode = 19; -+ const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; -+ price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode] + 1); -+ } -+ -+ return price; -+} -+ -+FORCE_INLINE U32 ZSTD_getPrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength, const int ultra) -+{ -+ /* offset */ -+ U32 price; -+ BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1); -+ -+ if (seqStorePtr->staticPrices) -+ return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength + 1) + 16 + offCode; -+ -+ price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode] + 1); -+ if (!ultra && offCode >= 20) -+ price += (offCode - 19) * 2; -+ -+ /* match Length */ -+ { -+ const BYTE ML_deltaCode = 36; -+ const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; -+ price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode] + 1); -+ } -+ -+ return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor; -+} -+ -+ZSTD_STATIC void ZSTD_updatePrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength) -+{ -+ U32 u; -+ -+ /* literals */ -+ seqStorePtr->litSum += litLength * ZSTD_LITFREQ_ADD; -+ for (u = 0; u < litLength; u++) -+ seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; -+ -+ /* literal Length */ -+ { -+ const BYTE LL_deltaCode = 19; -+ const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; -+ seqStorePtr->litLengthFreq[llCode]++; -+ seqStorePtr->litLengthSum++; -+ } -+ -+ /* match offset */ -+ { -+ BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1); -+ seqStorePtr->offCodeSum++; -+ seqStorePtr->offCodeFreq[offCode]++; -+ } -+ -+ /* match Length */ -+ { -+ const BYTE ML_deltaCode = 36; -+ const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; -+ seqStorePtr->matchLengthFreq[mlCode]++; -+ seqStorePtr->matchLengthSum++; -+ } -+ -+ ZSTD_setLog2Prices(seqStorePtr); -+} -+ -+#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \ -+ { \ -+ while (last_pos < pos) { \ -+ opt[last_pos + 1].price = ZSTD_MAX_PRICE; \ -+ last_pos++; \ -+ } \ -+ opt[pos].mlen = mlen_; \ -+ opt[pos].off = offset_; \ -+ opt[pos].litlen = litlen_; \ -+ opt[pos].price = price_; \ -+ } -+ -+/* Update hashTable3 up to ip (excluded) -+ Assumption : always within prefix (i.e. not within extDict) */ -+FORCE_INLINE -+U32 ZSTD_insertAndFindFirstIndexHash3(ZSTD_CCtx *zc, const BYTE *ip) -+{ -+ U32 *const hashTable3 = zc->hashTable3; -+ U32 const hashLog3 = zc->hashLog3; -+ const BYTE *const base = zc->base; -+ U32 idx = zc->nextToUpdate3; -+ const U32 target = zc->nextToUpdate3 = (U32)(ip - base); -+ const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3); -+ -+ while (idx < target) { -+ hashTable3[ZSTD_hash3Ptr(base + idx, hashLog3)] = idx; -+ idx++; -+ } -+ -+ return hashTable3[hash3]; -+} -+ -+/*-************************************* -+* Binary Tree search -+***************************************/ -+static U32 ZSTD_insertBtAndGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, U32 nbCompares, const U32 mls, U32 extDict, -+ ZSTD_match_t *matches, const U32 minMatchLen) -+{ -+ const BYTE *const base = zc->base; -+ const U32 curr = (U32)(ip - base); -+ const U32 hashLog = zc->params.cParams.hashLog; -+ const size_t h = ZSTD_hashPtr(ip, hashLog, mls); -+ U32 *const hashTable = zc->hashTable; -+ U32 matchIndex = hashTable[h]; -+ U32 *const bt = zc->chainTable; -+ const U32 btLog = zc->params.cParams.chainLog - 1; -+ const U32 btMask = (1U << btLog) - 1; -+ size_t commonLengthSmaller = 0, commonLengthLarger = 0; -+ const BYTE *const dictBase = zc->dictBase; -+ const U32 dictLimit = zc->dictLimit; -+ const BYTE *const dictEnd = dictBase + dictLimit; -+ const BYTE *const prefixStart = base + dictLimit; -+ const U32 btLow = btMask >= curr ? 0 : curr - btMask; -+ const U32 windowLow = zc->lowLimit; -+ U32 *smallerPtr = bt + 2 * (curr & btMask); -+ U32 *largerPtr = bt + 2 * (curr & btMask) + 1; -+ U32 matchEndIdx = curr + 8; -+ U32 dummy32; /* to be nullified at the end */ -+ U32 mnum = 0; -+ -+ const U32 minMatch = (mls == 3) ? 3 : 4; -+ size_t bestLength = minMatchLen - 1; -+ -+ if (minMatch == 3) { /* HC3 match finder */ -+ U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(zc, ip); -+ if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) { -+ const BYTE *match; -+ size_t currMl = 0; -+ if ((!extDict) || matchIndex3 >= dictLimit) { -+ match = base + matchIndex3; -+ if (match[bestLength] == ip[bestLength]) -+ currMl = ZSTD_count(ip, match, iLimit); -+ } else { -+ match = dictBase + matchIndex3; -+ if (ZSTD_readMINMATCH(match, MINMATCH) == -+ ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */ -+ currMl = ZSTD_count_2segments(ip + MINMATCH, match + MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH; -+ } -+ -+ /* save best solution */ -+ if (currMl > bestLength) { -+ bestLength = currMl; -+ matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3; -+ matches[mnum].len = (U32)currMl; -+ mnum++; -+ if (currMl > ZSTD_OPT_NUM) -+ goto update; -+ if (ip + currMl == iLimit) -+ goto update; /* best possible, and avoid read overflow*/ -+ } -+ } -+ } -+ -+ hashTable[h] = curr; /* Update Hash Table */ -+ -+ while (nbCompares-- && (matchIndex > windowLow)) { -+ U32 *nextPtr = bt + 2 * (matchIndex & btMask); -+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ -+ const BYTE *match; -+ -+ if ((!extDict) || (matchIndex + matchLength >= dictLimit)) { -+ match = base + matchIndex; -+ if (match[matchLength] == ip[matchLength]) { -+ matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iLimit) + 1; -+ } -+ } else { -+ match = dictBase + matchIndex; -+ matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart); -+ if (matchIndex + matchLength >= dictLimit) -+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ -+ } -+ -+ if (matchLength > bestLength) { -+ if (matchLength > matchEndIdx - matchIndex) -+ matchEndIdx = matchIndex + (U32)matchLength; -+ bestLength = matchLength; -+ matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex; -+ matches[mnum].len = (U32)matchLength; -+ mnum++; -+ if (matchLength > ZSTD_OPT_NUM) -+ break; -+ if (ip + matchLength == iLimit) /* equal : no way to know if inf or sup */ -+ break; /* drop, to guarantee consistency (miss a little bit of compression) */ -+ } -+ -+ if (match[matchLength] < ip[matchLength]) { -+ /* match is smaller than curr */ -+ *smallerPtr = matchIndex; /* update smaller idx */ -+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ -+ if (matchIndex <= btLow) { -+ smallerPtr = &dummy32; -+ break; -+ } /* beyond tree size, stop the search */ -+ smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */ -+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ -+ } else { -+ /* match is larger than curr */ -+ *largerPtr = matchIndex; -+ commonLengthLarger = matchLength; -+ if (matchIndex <= btLow) { -+ largerPtr = &dummy32; -+ break; -+ } /* beyond tree size, stop the search */ -+ largerPtr = nextPtr; -+ matchIndex = nextPtr[0]; -+ } -+ } -+ -+ *smallerPtr = *largerPtr = 0; -+ -+update: -+ zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1; -+ return mnum; -+} -+ -+/** Tree updater, providing best match */ -+static U32 ZSTD_BtGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, ZSTD_match_t *matches, -+ const U32 minMatchLen) -+{ -+ if (ip < zc->base + zc->nextToUpdate) -+ return 0; /* skipped area */ -+ ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); -+ return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen); -+} -+ -+static U32 ZSTD_BtGetAllMatches_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */ -+ const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch, -+ ZSTD_match_t *matches, const U32 minMatchLen) -+{ -+ switch (matchLengthSearch) { -+ case 3: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); -+ default: -+ case 4: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); -+ case 5: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); -+ case 7: -+ case 6: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); -+ } -+} -+ -+/** Tree updater, providing best match */ -+static U32 ZSTD_BtGetAllMatches_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, -+ ZSTD_match_t *matches, const U32 minMatchLen) -+{ -+ if (ip < zc->base + zc->nextToUpdate) -+ return 0; /* skipped area */ -+ ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); -+ return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen); -+} -+ -+static U32 ZSTD_BtGetAllMatches_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */ -+ const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch, -+ ZSTD_match_t *matches, const U32 minMatchLen) -+{ -+ switch (matchLengthSearch) { -+ case 3: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); -+ default: -+ case 4: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); -+ case 5: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); -+ case 7: -+ case 6: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); -+ } -+} -+ -+/*-******************************* -+* Optimal parser -+*********************************/ -+FORCE_INLINE -+void ZSTD_compressBlock_opt_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra) -+{ -+ seqStore_t *seqStorePtr = &(ctx->seqStore); -+ const BYTE *const istart = (const BYTE *)src; -+ const BYTE *ip = istart; -+ const BYTE *anchor = istart; -+ const BYTE *const iend = istart + srcSize; -+ const BYTE *const ilimit = iend - 8; -+ const BYTE *const base = ctx->base; -+ const BYTE *const prefixStart = base + ctx->dictLimit; -+ -+ const U32 maxSearches = 1U << ctx->params.cParams.searchLog; -+ const U32 sufficient_len = ctx->params.cParams.targetLength; -+ const U32 mls = ctx->params.cParams.searchLength; -+ const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; -+ -+ ZSTD_optimal_t *opt = seqStorePtr->priceTable; -+ ZSTD_match_t *matches = seqStorePtr->matchTable; -+ const BYTE *inr; -+ U32 offset, rep[ZSTD_REP_NUM]; -+ -+ /* init */ -+ ctx->nextToUpdate3 = ctx->nextToUpdate; -+ ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize); -+ ip += (ip == prefixStart); -+ { -+ U32 i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ rep[i] = ctx->rep[i]; -+ } -+ -+ /* Match Loop */ -+ while (ip < ilimit) { -+ U32 cur, match_num, last_pos, litlen, price; -+ U32 u, mlen, best_mlen, best_off, litLength; -+ memset(opt, 0, sizeof(ZSTD_optimal_t)); -+ last_pos = 0; -+ litlen = (U32)(ip - anchor); -+ -+ /* check repCode */ -+ { -+ U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor); -+ for (i = (ip == anchor); i < last_i; i++) { -+ const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i]; -+ if ((repCur > 0) && (repCur < (S32)(ip - prefixStart)) && -+ (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) { -+ mlen = (U32)ZSTD_count(ip + minMatch, ip + minMatch - repCur, iend) + minMatch; -+ if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { -+ best_mlen = mlen; -+ best_off = i; -+ cur = 0; -+ last_pos = 1; -+ goto _storeSequence; -+ } -+ best_off = i - (ip == anchor); -+ do { -+ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); -+ if (mlen > last_pos || price < opt[mlen].price) -+ SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ -+ mlen--; -+ } while (mlen >= minMatch); -+ } -+ } -+ } -+ -+ match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch); -+ -+ if (!last_pos && !match_num) { -+ ip++; -+ continue; -+ } -+ -+ if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) { -+ best_mlen = matches[match_num - 1].len; -+ best_off = matches[match_num - 1].off; -+ cur = 0; -+ last_pos = 1; -+ goto _storeSequence; -+ } -+ -+ /* set prices using matches at position = 0 */ -+ best_mlen = (last_pos) ? last_pos : minMatch; -+ for (u = 0; u < match_num; u++) { -+ mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; -+ best_mlen = matches[u].len; -+ while (mlen <= best_mlen) { -+ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); -+ if (mlen > last_pos || price < opt[mlen].price) -+ SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */ -+ mlen++; -+ } -+ } -+ -+ if (last_pos < minMatch) { -+ ip++; -+ continue; -+ } -+ -+ /* initialize opt[0] */ -+ { -+ U32 i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ opt[0].rep[i] = rep[i]; -+ } -+ opt[0].mlen = 1; -+ opt[0].litlen = litlen; -+ -+ /* check further positions */ -+ for (cur = 1; cur <= last_pos; cur++) { -+ inr = ip + cur; -+ -+ if (opt[cur - 1].mlen == 1) { -+ litlen = opt[cur - 1].litlen + 1; -+ if (cur > litlen) { -+ price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen); -+ } else -+ price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); -+ } else { -+ litlen = 1; -+ price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1); -+ } -+ -+ if (cur > last_pos || price <= opt[cur].price) -+ SET_PRICE(cur, 1, 0, litlen, price); -+ -+ if (cur == last_pos) -+ break; -+ -+ if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ -+ continue; -+ -+ mlen = opt[cur].mlen; -+ if (opt[cur].off > ZSTD_REP_MOVE_OPT) { -+ opt[cur].rep[2] = opt[cur - mlen].rep[1]; -+ opt[cur].rep[1] = opt[cur - mlen].rep[0]; -+ opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; -+ } else { -+ opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2]; -+ opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1]; -+ opt[cur].rep[0] = -+ ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]); -+ } -+ -+ best_mlen = minMatch; -+ { -+ U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); -+ for (i = (opt[cur].mlen != 1); i < last_i; i++) { /* check rep */ -+ const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i]; -+ if ((repCur > 0) && (repCur < (S32)(inr - prefixStart)) && -+ (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) { -+ mlen = (U32)ZSTD_count(inr + minMatch, inr + minMatch - repCur, iend) + minMatch; -+ -+ if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { -+ best_mlen = mlen; -+ best_off = i; -+ last_pos = cur + 1; -+ goto _storeSequence; -+ } -+ -+ best_off = i - (opt[cur].mlen != 1); -+ if (mlen > best_mlen) -+ best_mlen = mlen; -+ -+ do { -+ if (opt[cur].mlen == 1) { -+ litlen = opt[cur].litlen; -+ if (cur > litlen) { -+ price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen, -+ best_off, mlen - MINMATCH, ultra); -+ } else -+ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); -+ } else { -+ litlen = 0; -+ price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); -+ } -+ -+ if (cur + mlen > last_pos || price <= opt[cur + mlen].price) -+ SET_PRICE(cur + mlen, mlen, i, litlen, price); -+ mlen--; -+ } while (mlen >= minMatch); -+ } -+ } -+ } -+ -+ match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen); -+ -+ if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) { -+ best_mlen = matches[match_num - 1].len; -+ best_off = matches[match_num - 1].off; -+ last_pos = cur + 1; -+ goto _storeSequence; -+ } -+ -+ /* set prices using matches at position = cur */ -+ for (u = 0; u < match_num; u++) { -+ mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; -+ best_mlen = matches[u].len; -+ -+ while (mlen <= best_mlen) { -+ if (opt[cur].mlen == 1) { -+ litlen = opt[cur].litlen; -+ if (cur > litlen) -+ price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen, -+ matches[u].off - 1, mlen - MINMATCH, ultra); -+ else -+ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); -+ } else { -+ litlen = 0; -+ price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra); -+ } -+ -+ if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) -+ SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); -+ -+ mlen++; -+ } -+ } -+ } -+ -+ best_mlen = opt[last_pos].mlen; -+ best_off = opt[last_pos].off; -+ cur = last_pos - best_mlen; -+ -+ /* store sequence */ -+_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ -+ opt[0].mlen = 1; -+ -+ while (1) { -+ mlen = opt[cur].mlen; -+ offset = opt[cur].off; -+ opt[cur].mlen = best_mlen; -+ opt[cur].off = best_off; -+ best_mlen = mlen; -+ best_off = offset; -+ if (mlen > cur) -+ break; -+ cur -= mlen; -+ } -+ -+ for (u = 0; u <= last_pos;) { -+ u += opt[u].mlen; -+ } -+ -+ for (cur = 0; cur < last_pos;) { -+ mlen = opt[cur].mlen; -+ if (mlen == 1) { -+ ip++; -+ cur++; -+ continue; -+ } -+ offset = opt[cur].off; -+ cur += mlen; -+ litLength = (U32)(ip - anchor); -+ -+ if (offset > ZSTD_REP_MOVE_OPT) { -+ rep[2] = rep[1]; -+ rep[1] = rep[0]; -+ rep[0] = offset - ZSTD_REP_MOVE_OPT; -+ offset--; -+ } else { -+ if (offset != 0) { -+ best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); -+ if (offset != 1) -+ rep[2] = rep[1]; -+ rep[1] = rep[0]; -+ rep[0] = best_off; -+ } -+ if (litLength == 0) -+ offset--; -+ } -+ -+ ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); -+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); -+ anchor = ip = ip + mlen; -+ } -+ } /* for (cur=0; cur < last_pos; ) */ -+ -+ /* Save reps for next block */ -+ { -+ int i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ ctx->repToConfirm[i] = rep[i]; -+ } -+ -+ /* Last Literals */ -+ { -+ size_t const lastLLSize = iend - anchor; -+ memcpy(seqStorePtr->lit, anchor, lastLLSize); -+ seqStorePtr->lit += lastLLSize; -+ } -+} -+ -+FORCE_INLINE -+void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra) -+{ -+ seqStore_t *seqStorePtr = &(ctx->seqStore); -+ const BYTE *const istart = (const BYTE *)src; -+ const BYTE *ip = istart; -+ const BYTE *anchor = istart; -+ const BYTE *const iend = istart + srcSize; -+ const BYTE *const ilimit = iend - 8; -+ const BYTE *const base = ctx->base; -+ const U32 lowestIndex = ctx->lowLimit; -+ const U32 dictLimit = ctx->dictLimit; -+ const BYTE *const prefixStart = base + dictLimit; -+ const BYTE *const dictBase = ctx->dictBase; -+ const BYTE *const dictEnd = dictBase + dictLimit; -+ -+ const U32 maxSearches = 1U << ctx->params.cParams.searchLog; -+ const U32 sufficient_len = ctx->params.cParams.targetLength; -+ const U32 mls = ctx->params.cParams.searchLength; -+ const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; -+ -+ ZSTD_optimal_t *opt = seqStorePtr->priceTable; -+ ZSTD_match_t *matches = seqStorePtr->matchTable; -+ const BYTE *inr; -+ -+ /* init */ -+ U32 offset, rep[ZSTD_REP_NUM]; -+ { -+ U32 i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ rep[i] = ctx->rep[i]; -+ } -+ -+ ctx->nextToUpdate3 = ctx->nextToUpdate; -+ ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize); -+ ip += (ip == prefixStart); -+ -+ /* Match Loop */ -+ while (ip < ilimit) { -+ U32 cur, match_num, last_pos, litlen, price; -+ U32 u, mlen, best_mlen, best_off, litLength; -+ U32 curr = (U32)(ip - base); -+ memset(opt, 0, sizeof(ZSTD_optimal_t)); -+ last_pos = 0; -+ opt[0].litlen = (U32)(ip - anchor); -+ -+ /* check repCode */ -+ { -+ U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor); -+ for (i = (ip == anchor); i < last_i; i++) { -+ const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i]; -+ const U32 repIndex = (U32)(curr - repCur); -+ const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; -+ const BYTE *const repMatch = repBase + repIndex; -+ if ((repCur > 0 && repCur <= (S32)curr) && -+ (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ -+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) { -+ /* repcode detected we should take it */ -+ const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; -+ mlen = (U32)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch; -+ -+ if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { -+ best_mlen = mlen; -+ best_off = i; -+ cur = 0; -+ last_pos = 1; -+ goto _storeSequence; -+ } -+ -+ best_off = i - (ip == anchor); -+ litlen = opt[0].litlen; -+ do { -+ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); -+ if (mlen > last_pos || price < opt[mlen].price) -+ SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ -+ mlen--; -+ } while (mlen >= minMatch); -+ } -+ } -+ } -+ -+ match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */ -+ -+ if (!last_pos && !match_num) { -+ ip++; -+ continue; -+ } -+ -+ { -+ U32 i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ opt[0].rep[i] = rep[i]; -+ } -+ opt[0].mlen = 1; -+ -+ if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) { -+ best_mlen = matches[match_num - 1].len; -+ best_off = matches[match_num - 1].off; -+ cur = 0; -+ last_pos = 1; -+ goto _storeSequence; -+ } -+ -+ best_mlen = (last_pos) ? last_pos : minMatch; -+ -+ /* set prices using matches at position = 0 */ -+ for (u = 0; u < match_num; u++) { -+ mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; -+ best_mlen = matches[u].len; -+ litlen = opt[0].litlen; -+ while (mlen <= best_mlen) { -+ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); -+ if (mlen > last_pos || price < opt[mlen].price) -+ SET_PRICE(mlen, mlen, matches[u].off, litlen, price); -+ mlen++; -+ } -+ } -+ -+ if (last_pos < minMatch) { -+ ip++; -+ continue; -+ } -+ -+ /* check further positions */ -+ for (cur = 1; cur <= last_pos; cur++) { -+ inr = ip + cur; -+ -+ if (opt[cur - 1].mlen == 1) { -+ litlen = opt[cur - 1].litlen + 1; -+ if (cur > litlen) { -+ price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen); -+ } else -+ price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); -+ } else { -+ litlen = 1; -+ price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1); -+ } -+ -+ if (cur > last_pos || price <= opt[cur].price) -+ SET_PRICE(cur, 1, 0, litlen, price); -+ -+ if (cur == last_pos) -+ break; -+ -+ if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ -+ continue; -+ -+ mlen = opt[cur].mlen; -+ if (opt[cur].off > ZSTD_REP_MOVE_OPT) { -+ opt[cur].rep[2] = opt[cur - mlen].rep[1]; -+ opt[cur].rep[1] = opt[cur - mlen].rep[0]; -+ opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; -+ } else { -+ opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2]; -+ opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1]; -+ opt[cur].rep[0] = -+ ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]); -+ } -+ -+ best_mlen = minMatch; -+ { -+ U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); -+ for (i = (mlen != 1); i < last_i; i++) { -+ const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i]; -+ const U32 repIndex = (U32)(curr + cur - repCur); -+ const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; -+ const BYTE *const repMatch = repBase + repIndex; -+ if ((repCur > 0 && repCur <= (S32)(curr + cur)) && -+ (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ -+ && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) { -+ /* repcode detected */ -+ const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; -+ mlen = (U32)ZSTD_count_2segments(inr + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch; -+ -+ if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { -+ best_mlen = mlen; -+ best_off = i; -+ last_pos = cur + 1; -+ goto _storeSequence; -+ } -+ -+ best_off = i - (opt[cur].mlen != 1); -+ if (mlen > best_mlen) -+ best_mlen = mlen; -+ -+ do { -+ if (opt[cur].mlen == 1) { -+ litlen = opt[cur].litlen; -+ if (cur > litlen) { -+ price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen, -+ best_off, mlen - MINMATCH, ultra); -+ } else -+ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); -+ } else { -+ litlen = 0; -+ price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); -+ } -+ -+ if (cur + mlen > last_pos || price <= opt[cur + mlen].price) -+ SET_PRICE(cur + mlen, mlen, i, litlen, price); -+ mlen--; -+ } while (mlen >= minMatch); -+ } -+ } -+ } -+ -+ match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch); -+ -+ if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) { -+ best_mlen = matches[match_num - 1].len; -+ best_off = matches[match_num - 1].off; -+ last_pos = cur + 1; -+ goto _storeSequence; -+ } -+ -+ /* set prices using matches at position = cur */ -+ for (u = 0; u < match_num; u++) { -+ mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; -+ best_mlen = matches[u].len; -+ -+ while (mlen <= best_mlen) { -+ if (opt[cur].mlen == 1) { -+ litlen = opt[cur].litlen; -+ if (cur > litlen) -+ price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen, -+ matches[u].off - 1, mlen - MINMATCH, ultra); -+ else -+ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); -+ } else { -+ litlen = 0; -+ price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra); -+ } -+ -+ if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) -+ SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); -+ -+ mlen++; -+ } -+ } -+ } /* for (cur = 1; cur <= last_pos; cur++) */ -+ -+ best_mlen = opt[last_pos].mlen; -+ best_off = opt[last_pos].off; -+ cur = last_pos - best_mlen; -+ -+ /* store sequence */ -+_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ -+ opt[0].mlen = 1; -+ -+ while (1) { -+ mlen = opt[cur].mlen; -+ offset = opt[cur].off; -+ opt[cur].mlen = best_mlen; -+ opt[cur].off = best_off; -+ best_mlen = mlen; -+ best_off = offset; -+ if (mlen > cur) -+ break; -+ cur -= mlen; -+ } -+ -+ for (u = 0; u <= last_pos;) { -+ u += opt[u].mlen; -+ } -+ -+ for (cur = 0; cur < last_pos;) { -+ mlen = opt[cur].mlen; -+ if (mlen == 1) { -+ ip++; -+ cur++; -+ continue; -+ } -+ offset = opt[cur].off; -+ cur += mlen; -+ litLength = (U32)(ip - anchor); -+ -+ if (offset > ZSTD_REP_MOVE_OPT) { -+ rep[2] = rep[1]; -+ rep[1] = rep[0]; -+ rep[0] = offset - ZSTD_REP_MOVE_OPT; -+ offset--; -+ } else { -+ if (offset != 0) { -+ best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); -+ if (offset != 1) -+ rep[2] = rep[1]; -+ rep[1] = rep[0]; -+ rep[0] = best_off; -+ } -+ -+ if (litLength == 0) -+ offset--; -+ } -+ -+ ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); -+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); -+ anchor = ip = ip + mlen; -+ } -+ } /* for (cur=0; cur < last_pos; ) */ -+ -+ /* Save reps for next block */ -+ { -+ int i; -+ for (i = 0; i < ZSTD_REP_NUM; i++) -+ ctx->repToConfirm[i] = rep[i]; -+ } -+ -+ /* Last Literals */ -+ { -+ size_t lastLLSize = iend - anchor; -+ memcpy(seqStorePtr->lit, anchor, lastLLSize); -+ seqStorePtr->lit += lastLLSize; -+ } -+} -+ -+#endif /* ZSTD_OPT_H_91842398743 */ --- -2.9.5 diff --git a/contrib/linux-kernel/0003-btrfs-Add-zstd-support.patch b/contrib/linux-kernel/0003-btrfs-Add-zstd-support.patch deleted file mode 100644 index edc7839a061..00000000000 --- a/contrib/linux-kernel/0003-btrfs-Add-zstd-support.patch +++ /dev/null @@ -1,740 +0,0 @@ -From 8a9dddfbf6551afea73911e367dd4be64d62b9fd Mon Sep 17 00:00:00 2001 -From: Nick Terrell -Date: Mon, 17 Jul 2017 17:08:39 -0700 -Subject: [PATCH v5 3/5] btrfs: Add zstd support - -Add zstd compression and decompression support to BtrFS. zstd at its -fastest level compresses almost as well as zlib, while offering much -faster compression and decompression, approaching lzo speeds. - -I benchmarked btrfs with zstd compression against no compression, lzo -compression, and zlib compression. I benchmarked two scenarios. Copying -a set of files to btrfs, and then reading the files. Copying a tarball -to btrfs, extracting it to btrfs, and then reading the extracted files. -After every operation, I call `sync` and include the sync time. -Between every pair of operations I unmount and remount the filesystem -to avoid caching. The benchmark files can be found in the upstream -zstd source repository under -`contrib/linux-kernel/{btrfs-benchmark.sh,btrfs-extract-benchmark.sh}` -[1] [2]. - -I ran the benchmarks on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM. -The VM is running on a MacBook Pro with a 3.1 GHz Intel Core i7 processor, -16 GB of RAM, and a SSD. - -The first compression benchmark is copying 10 copies of the unzipped -Silesia corpus [3] into a BtrFS filesystem mounted with -`-o compress-force=Method`. The decompression benchmark times how long -it takes to `tar` all 10 copies into `/dev/null`. The compression ratio is -measured by comparing the output of `df` and `du`. See the benchmark file -[1] for details. I benchmarked multiple zstd compression levels, although -the patch uses zstd level 1. - -| Method | Ratio | Compression MB/s | Decompression speed | -|---------|-------|------------------|---------------------| -| None | 0.99 | 504 | 686 | -| lzo | 1.66 | 398 | 442 | -| zlib | 2.58 | 65 | 241 | -| zstd 1 | 2.57 | 260 | 383 | -| zstd 3 | 2.71 | 174 | 408 | -| zstd 6 | 2.87 | 70 | 398 | -| zstd 9 | 2.92 | 43 | 406 | -| zstd 12 | 2.93 | 21 | 408 | -| zstd 15 | 3.01 | 11 | 354 | - -The next benchmark first copies `linux-4.11.6.tar` [4] to btrfs. Then it -measures the compression ratio, extracts the tar, and deletes the tar. -Then it measures the compression ratio again, and `tar`s the extracted -files into `/dev/null`. See the benchmark file [2] for details. - -| Method | Tar Ratio | Extract Ratio | Copy (s) | Extract (s)| Read (s) | -|--------|-----------|---------------|----------|------------|----------| -| None | 0.97 | 0.78 | 0.981 | 5.501 | 8.807 | -| lzo | 2.06 | 1.38 | 1.631 | 8.458 | 8.585 | -| zlib | 3.40 | 1.86 | 7.750 | 21.544 | 11.744 | -| zstd 1 | 3.57 | 1.85 | 2.579 | 11.479 | 9.389 | - -[1] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/btrfs-benchmark.sh -[2] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/btrfs-extract-benchmark.sh -[3] http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia -[4] https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.11.6.tar.xz - -zstd source repository: https://github.com/facebook/zstd - -Signed-off-by: Nick Terrell ---- -v2 -> v3: -- Port upstream BtrFS commits e1ddce71d6, 389a6cfc2a, and 6acafd1eff -- Change default compression level for BtrFS to 3 - -v3 -> v4: -- Add missing includes, which fixes the aarch64 build -- Fix minor linter warnings - - fs/btrfs/Kconfig | 2 + - fs/btrfs/Makefile | 2 +- - fs/btrfs/compression.c | 1 + - fs/btrfs/compression.h | 6 +- - fs/btrfs/ctree.h | 1 + - fs/btrfs/disk-io.c | 2 + - fs/btrfs/ioctl.c | 6 +- - fs/btrfs/props.c | 6 + - fs/btrfs/super.c | 12 +- - fs/btrfs/sysfs.c | 2 + - fs/btrfs/zstd.c | 432 +++++++++++++++++++++++++++++++++++++++++++++ - include/uapi/linux/btrfs.h | 8 +- - 12 files changed, 468 insertions(+), 12 deletions(-) - create mode 100644 fs/btrfs/zstd.c - -diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig -index 80e9c18..a26c63b 100644 ---- a/fs/btrfs/Kconfig -+++ b/fs/btrfs/Kconfig -@@ -6,6 +6,8 @@ config BTRFS_FS - select ZLIB_DEFLATE - select LZO_COMPRESS - select LZO_DECOMPRESS -+ select ZSTD_COMPRESS -+ select ZSTD_DECOMPRESS - select RAID6_PQ - select XOR_BLOCKS - select SRCU -diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile -index 128ce17..962a95a 100644 ---- a/fs/btrfs/Makefile -+++ b/fs/btrfs/Makefile -@@ -6,7 +6,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ - transaction.o inode.o file.o tree-defrag.o \ - extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ - extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ -- export.o tree-log.o free-space-cache.o zlib.o lzo.o \ -+ export.o tree-log.o free-space-cache.o zlib.o lzo.o zstd.o \ - compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ - reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ - uuid-tree.o props.o hash.o free-space-tree.o -diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c -index d2ef9ac..4ff42d1 100644 ---- a/fs/btrfs/compression.c -+++ b/fs/btrfs/compression.c -@@ -704,6 +704,7 @@ static struct { - static const struct btrfs_compress_op * const btrfs_compress_op[] = { - &btrfs_zlib_compress, - &btrfs_lzo_compress, -+ &btrfs_zstd_compress, - }; - - void __init btrfs_init_compress(void) -diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h -index 87f6d33..2269e00 100644 ---- a/fs/btrfs/compression.h -+++ b/fs/btrfs/compression.h -@@ -99,8 +99,9 @@ enum btrfs_compression_type { - BTRFS_COMPRESS_NONE = 0, - BTRFS_COMPRESS_ZLIB = 1, - BTRFS_COMPRESS_LZO = 2, -- BTRFS_COMPRESS_TYPES = 2, -- BTRFS_COMPRESS_LAST = 3, -+ BTRFS_COMPRESS_ZSTD = 3, -+ BTRFS_COMPRESS_TYPES = 3, -+ BTRFS_COMPRESS_LAST = 4, - }; - - struct btrfs_compress_op { -@@ -128,5 +129,6 @@ struct btrfs_compress_op { - - extern const struct btrfs_compress_op btrfs_zlib_compress; - extern const struct btrfs_compress_op btrfs_lzo_compress; -+extern const struct btrfs_compress_op btrfs_zstd_compress; - - #endif -diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h -index 3f3eb7b..845d77c 100644 ---- a/fs/btrfs/ctree.h -+++ b/fs/btrfs/ctree.h -@@ -270,6 +270,7 @@ struct btrfs_super_block { - BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ - BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ - BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ -+ BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \ - BTRFS_FEATURE_INCOMPAT_RAID56 | \ - BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ - BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ -diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c -index 080e2eb..04632f4 100644 ---- a/fs/btrfs/disk-io.c -+++ b/fs/btrfs/disk-io.c -@@ -2828,6 +2828,8 @@ int open_ctree(struct super_block *sb, - features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; - if (fs_info->compress_type == BTRFS_COMPRESS_LZO) - features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; -+ else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD) -+ features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD; - - if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) - btrfs_info(fs_info, "has skinny extents"); -diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c -index fa1b78c..b9963d9 100644 ---- a/fs/btrfs/ioctl.c -+++ b/fs/btrfs/ioctl.c -@@ -327,8 +327,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) - - if (fs_info->compress_type == BTRFS_COMPRESS_LZO) - comp = "lzo"; -- else -+ else if (fs_info->compress_type == BTRFS_COMPRESS_ZLIB) - comp = "zlib"; -+ else -+ comp = "zstd"; - ret = btrfs_set_prop(inode, "btrfs.compression", - comp, strlen(comp), 0); - if (ret) -@@ -1466,6 +1468,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, - - if (range->compress_type == BTRFS_COMPRESS_LZO) { - btrfs_set_fs_incompat(fs_info, COMPRESS_LZO); -+ } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) { -+ btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); - } - - ret = defrag_count; -diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c -index 4b23ae5..20631e9 100644 ---- a/fs/btrfs/props.c -+++ b/fs/btrfs/props.c -@@ -390,6 +390,8 @@ static int prop_compression_validate(const char *value, size_t len) - return 0; - else if (!strncmp("zlib", value, len)) - return 0; -+ else if (!strncmp("zstd", value, len)) -+ return 0; - - return -EINVAL; - } -@@ -412,6 +414,8 @@ static int prop_compression_apply(struct inode *inode, - type = BTRFS_COMPRESS_LZO; - else if (!strncmp("zlib", value, len)) - type = BTRFS_COMPRESS_ZLIB; -+ else if (!strncmp("zstd", value, len)) -+ type = BTRFS_COMPRESS_ZSTD; - else - return -EINVAL; - -@@ -429,6 +433,8 @@ static const char *prop_compression_extract(struct inode *inode) - return "zlib"; - case BTRFS_COMPRESS_LZO: - return "lzo"; -+ case BTRFS_COMPRESS_ZSTD: -+ return "zstd"; - } - - return NULL; -diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c -index 12540b6..c370dea 100644 ---- a/fs/btrfs/super.c -+++ b/fs/btrfs/super.c -@@ -513,6 +513,14 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, - btrfs_clear_opt(info->mount_opt, NODATASUM); - btrfs_set_fs_incompat(info, COMPRESS_LZO); - no_compress = 0; -+ } else if (strcmp(args[0].from, "zstd") == 0) { -+ compress_type = "zstd"; -+ info->compress_type = BTRFS_COMPRESS_ZSTD; -+ btrfs_set_opt(info->mount_opt, COMPRESS); -+ btrfs_clear_opt(info->mount_opt, NODATACOW); -+ btrfs_clear_opt(info->mount_opt, NODATASUM); -+ btrfs_set_fs_incompat(info, COMPRESS_ZSTD); -+ no_compress = 0; - } else if (strncmp(args[0].from, "no", 2) == 0) { - compress_type = "no"; - btrfs_clear_opt(info->mount_opt, COMPRESS); -@@ -1227,8 +1235,10 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) - if (btrfs_test_opt(info, COMPRESS)) { - if (info->compress_type == BTRFS_COMPRESS_ZLIB) - compress_type = "zlib"; -- else -+ else if (info->compress_type == BTRFS_COMPRESS_LZO) - compress_type = "lzo"; -+ else -+ compress_type = "zstd"; - if (btrfs_test_opt(info, FORCE_COMPRESS)) - seq_printf(seq, ",compress-force=%s", compress_type); - else -diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c -index c2d5f35..2b6d37c 100644 ---- a/fs/btrfs/sysfs.c -+++ b/fs/btrfs/sysfs.c -@@ -200,6 +200,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(mixed_backref, MIXED_BACKREF); - BTRFS_FEAT_ATTR_INCOMPAT(default_subvol, DEFAULT_SUBVOL); - BTRFS_FEAT_ATTR_INCOMPAT(mixed_groups, MIXED_GROUPS); - BTRFS_FEAT_ATTR_INCOMPAT(compress_lzo, COMPRESS_LZO); -+BTRFS_FEAT_ATTR_INCOMPAT(compress_zstd, COMPRESS_ZSTD); - BTRFS_FEAT_ATTR_INCOMPAT(big_metadata, BIG_METADATA); - BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF); - BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56); -@@ -212,6 +213,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = { - BTRFS_FEAT_ATTR_PTR(default_subvol), - BTRFS_FEAT_ATTR_PTR(mixed_groups), - BTRFS_FEAT_ATTR_PTR(compress_lzo), -+ BTRFS_FEAT_ATTR_PTR(compress_zstd), - BTRFS_FEAT_ATTR_PTR(big_metadata), - BTRFS_FEAT_ATTR_PTR(extended_iref), - BTRFS_FEAT_ATTR_PTR(raid56), -diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c -new file mode 100644 -index 0000000..607ce47 ---- /dev/null -+++ b/fs/btrfs/zstd.c -@@ -0,0 +1,432 @@ -+/* -+ * Copyright (c) 2016-present, Facebook, Inc. -+ * All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public -+ * License v2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "compression.h" -+ -+#define ZSTD_BTRFS_MAX_WINDOWLOG 17 -+#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG) -+#define ZSTD_BTRFS_DEFAULT_LEVEL 3 -+ -+static ZSTD_parameters zstd_get_btrfs_parameters(size_t src_len) -+{ -+ ZSTD_parameters params = ZSTD_getParams(ZSTD_BTRFS_DEFAULT_LEVEL, -+ src_len, 0); -+ -+ if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG) -+ params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG; -+ WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT); -+ return params; -+} -+ -+struct workspace { -+ void *mem; -+ size_t size; -+ char *buf; -+ struct list_head list; -+}; -+ -+static void zstd_free_workspace(struct list_head *ws) -+{ -+ struct workspace *workspace = list_entry(ws, struct workspace, list); -+ -+ kvfree(workspace->mem); -+ kfree(workspace->buf); -+ kfree(workspace); -+} -+ -+static struct list_head *zstd_alloc_workspace(void) -+{ -+ ZSTD_parameters params = -+ zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT); -+ struct workspace *workspace; -+ -+ workspace = kzalloc(sizeof(*workspace), GFP_KERNEL); -+ if (!workspace) -+ return ERR_PTR(-ENOMEM); -+ -+ workspace->size = max_t(size_t, -+ ZSTD_CStreamWorkspaceBound(params.cParams), -+ ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT)); -+ workspace->mem = kvmalloc(workspace->size, GFP_KERNEL); -+ workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); -+ if (!workspace->mem || !workspace->buf) -+ goto fail; -+ -+ INIT_LIST_HEAD(&workspace->list); -+ -+ return &workspace->list; -+fail: -+ zstd_free_workspace(&workspace->list); -+ return ERR_PTR(-ENOMEM); -+} -+ -+static int zstd_compress_pages(struct list_head *ws, -+ struct address_space *mapping, -+ u64 start, -+ struct page **pages, -+ unsigned long *out_pages, -+ unsigned long *total_in, -+ unsigned long *total_out) -+{ -+ struct workspace *workspace = list_entry(ws, struct workspace, list); -+ ZSTD_CStream *stream; -+ int ret = 0; -+ int nr_pages = 0; -+ struct page *in_page = NULL; /* The current page to read */ -+ struct page *out_page = NULL; /* The current page to write to */ -+ ZSTD_inBuffer in_buf = { NULL, 0, 0 }; -+ ZSTD_outBuffer out_buf = { NULL, 0, 0 }; -+ unsigned long tot_in = 0; -+ unsigned long tot_out = 0; -+ unsigned long len = *total_out; -+ const unsigned long nr_dest_pages = *out_pages; -+ unsigned long max_out = nr_dest_pages * PAGE_SIZE; -+ ZSTD_parameters params = zstd_get_btrfs_parameters(len); -+ -+ *out_pages = 0; -+ *total_out = 0; -+ *total_in = 0; -+ -+ /* Initialize the stream */ -+ stream = ZSTD_initCStream(params, len, workspace->mem, -+ workspace->size); -+ if (!stream) { -+ pr_warn("BTRFS: ZSTD_initCStream failed\n"); -+ ret = -EIO; -+ goto out; -+ } -+ -+ /* map in the first page of input data */ -+ in_page = find_get_page(mapping, start >> PAGE_SHIFT); -+ in_buf.src = kmap(in_page); -+ in_buf.pos = 0; -+ in_buf.size = min_t(size_t, len, PAGE_SIZE); -+ -+ -+ /* Allocate and map in the output buffer */ -+ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); -+ if (out_page == NULL) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ pages[nr_pages++] = out_page; -+ out_buf.dst = kmap(out_page); -+ out_buf.pos = 0; -+ out_buf.size = min_t(size_t, max_out, PAGE_SIZE); -+ -+ while (1) { -+ size_t ret2; -+ -+ ret2 = ZSTD_compressStream(stream, &out_buf, &in_buf); -+ if (ZSTD_isError(ret2)) { -+ pr_debug("BTRFS: ZSTD_compressStream returned %d\n", -+ ZSTD_getErrorCode(ret2)); -+ ret = -EIO; -+ goto out; -+ } -+ -+ /* Check to see if we are making it bigger */ -+ if (tot_in + in_buf.pos > 8192 && -+ tot_in + in_buf.pos < -+ tot_out + out_buf.pos) { -+ ret = -E2BIG; -+ goto out; -+ } -+ -+ /* We've reached the end of our output range */ -+ if (out_buf.pos >= max_out) { -+ tot_out += out_buf.pos; -+ ret = -E2BIG; -+ goto out; -+ } -+ -+ /* Check if we need more output space */ -+ if (out_buf.pos == out_buf.size) { -+ tot_out += PAGE_SIZE; -+ max_out -= PAGE_SIZE; -+ kunmap(out_page); -+ if (nr_pages == nr_dest_pages) { -+ out_page = NULL; -+ ret = -E2BIG; -+ goto out; -+ } -+ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); -+ if (out_page == NULL) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ pages[nr_pages++] = out_page; -+ out_buf.dst = kmap(out_page); -+ out_buf.pos = 0; -+ out_buf.size = min_t(size_t, max_out, PAGE_SIZE); -+ } -+ -+ /* We've reached the end of the input */ -+ if (in_buf.pos >= len) { -+ tot_in += in_buf.pos; -+ break; -+ } -+ -+ /* Check if we need more input */ -+ if (in_buf.pos == in_buf.size) { -+ tot_in += PAGE_SIZE; -+ kunmap(in_page); -+ put_page(in_page); -+ -+ start += PAGE_SIZE; -+ len -= PAGE_SIZE; -+ in_page = find_get_page(mapping, start >> PAGE_SHIFT); -+ in_buf.src = kmap(in_page); -+ in_buf.pos = 0; -+ in_buf.size = min_t(size_t, len, PAGE_SIZE); -+ } -+ } -+ while (1) { -+ size_t ret2; -+ -+ ret2 = ZSTD_endStream(stream, &out_buf); -+ if (ZSTD_isError(ret2)) { -+ pr_debug("BTRFS: ZSTD_endStream returned %d\n", -+ ZSTD_getErrorCode(ret2)); -+ ret = -EIO; -+ goto out; -+ } -+ if (ret2 == 0) { -+ tot_out += out_buf.pos; -+ break; -+ } -+ if (out_buf.pos >= max_out) { -+ tot_out += out_buf.pos; -+ ret = -E2BIG; -+ goto out; -+ } -+ -+ tot_out += PAGE_SIZE; -+ max_out -= PAGE_SIZE; -+ kunmap(out_page); -+ if (nr_pages == nr_dest_pages) { -+ out_page = NULL; -+ ret = -E2BIG; -+ goto out; -+ } -+ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); -+ if (out_page == NULL) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ pages[nr_pages++] = out_page; -+ out_buf.dst = kmap(out_page); -+ out_buf.pos = 0; -+ out_buf.size = min_t(size_t, max_out, PAGE_SIZE); -+ } -+ -+ if (tot_out >= tot_in) { -+ ret = -E2BIG; -+ goto out; -+ } -+ -+ ret = 0; -+ *total_in = tot_in; -+ *total_out = tot_out; -+out: -+ *out_pages = nr_pages; -+ /* Cleanup */ -+ if (in_page) { -+ kunmap(in_page); -+ put_page(in_page); -+ } -+ if (out_page) -+ kunmap(out_page); -+ return ret; -+} -+ -+static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) -+{ -+ struct workspace *workspace = list_entry(ws, struct workspace, list); -+ struct page **pages_in = cb->compressed_pages; -+ u64 disk_start = cb->start; -+ struct bio *orig_bio = cb->orig_bio; -+ size_t srclen = cb->compressed_len; -+ ZSTD_DStream *stream; -+ int ret = 0; -+ unsigned long page_in_index = 0; -+ unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); -+ unsigned long buf_start; -+ unsigned long total_out = 0; -+ ZSTD_inBuffer in_buf = { NULL, 0, 0 }; -+ ZSTD_outBuffer out_buf = { NULL, 0, 0 }; -+ -+ stream = ZSTD_initDStream( -+ ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); -+ if (!stream) { -+ pr_debug("BTRFS: ZSTD_initDStream failed\n"); -+ ret = -EIO; -+ goto done; -+ } -+ -+ in_buf.src = kmap(pages_in[page_in_index]); -+ in_buf.pos = 0; -+ in_buf.size = min_t(size_t, srclen, PAGE_SIZE); -+ -+ out_buf.dst = workspace->buf; -+ out_buf.pos = 0; -+ out_buf.size = PAGE_SIZE; -+ -+ while (1) { -+ size_t ret2; -+ -+ ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf); -+ if (ZSTD_isError(ret2)) { -+ pr_debug("BTRFS: ZSTD_decompressStream returned %d\n", -+ ZSTD_getErrorCode(ret2)); -+ ret = -EIO; -+ goto done; -+ } -+ buf_start = total_out; -+ total_out += out_buf.pos; -+ out_buf.pos = 0; -+ -+ ret = btrfs_decompress_buf2page(out_buf.dst, buf_start, -+ total_out, disk_start, orig_bio); -+ if (ret == 0) -+ break; -+ -+ if (in_buf.pos >= srclen) -+ break; -+ -+ /* Check if we've hit the end of a frame */ -+ if (ret2 == 0) -+ break; -+ -+ if (in_buf.pos == in_buf.size) { -+ kunmap(pages_in[page_in_index++]); -+ if (page_in_index >= total_pages_in) { -+ in_buf.src = NULL; -+ ret = -EIO; -+ goto done; -+ } -+ srclen -= PAGE_SIZE; -+ in_buf.src = kmap(pages_in[page_in_index]); -+ in_buf.pos = 0; -+ in_buf.size = min_t(size_t, srclen, PAGE_SIZE); -+ } -+ } -+ ret = 0; -+ zero_fill_bio(orig_bio); -+done: -+ if (in_buf.src) -+ kunmap(pages_in[page_in_index]); -+ return ret; -+} -+ -+static int zstd_decompress(struct list_head *ws, unsigned char *data_in, -+ struct page *dest_page, -+ unsigned long start_byte, -+ size_t srclen, size_t destlen) -+{ -+ struct workspace *workspace = list_entry(ws, struct workspace, list); -+ ZSTD_DStream *stream; -+ int ret = 0; -+ size_t ret2; -+ ZSTD_inBuffer in_buf = { NULL, 0, 0 }; -+ ZSTD_outBuffer out_buf = { NULL, 0, 0 }; -+ unsigned long total_out = 0; -+ unsigned long pg_offset = 0; -+ char *kaddr; -+ -+ stream = ZSTD_initDStream( -+ ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); -+ if (!stream) { -+ pr_warn("BTRFS: ZSTD_initDStream failed\n"); -+ ret = -EIO; -+ goto finish; -+ } -+ -+ destlen = min_t(size_t, destlen, PAGE_SIZE); -+ -+ in_buf.src = data_in; -+ in_buf.pos = 0; -+ in_buf.size = srclen; -+ -+ out_buf.dst = workspace->buf; -+ out_buf.pos = 0; -+ out_buf.size = PAGE_SIZE; -+ -+ ret2 = 1; -+ while (pg_offset < destlen && in_buf.pos < in_buf.size) { -+ unsigned long buf_start; -+ unsigned long buf_offset; -+ unsigned long bytes; -+ -+ /* Check if the frame is over and we still need more input */ -+ if (ret2 == 0) { -+ pr_debug("BTRFS: ZSTD_decompressStream ended early\n"); -+ ret = -EIO; -+ goto finish; -+ } -+ ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf); -+ if (ZSTD_isError(ret2)) { -+ pr_debug("BTRFS: ZSTD_decompressStream returned %d\n", -+ ZSTD_getErrorCode(ret2)); -+ ret = -EIO; -+ goto finish; -+ } -+ -+ buf_start = total_out; -+ total_out += out_buf.pos; -+ out_buf.pos = 0; -+ -+ if (total_out <= start_byte) -+ continue; -+ -+ if (total_out > start_byte && buf_start < start_byte) -+ buf_offset = start_byte - buf_start; -+ else -+ buf_offset = 0; -+ -+ bytes = min_t(unsigned long, destlen - pg_offset, -+ out_buf.size - buf_offset); -+ -+ kaddr = kmap_atomic(dest_page); -+ memcpy(kaddr + pg_offset, out_buf.dst + buf_offset, bytes); -+ kunmap_atomic(kaddr); -+ -+ pg_offset += bytes; -+ } -+ ret = 0; -+finish: -+ if (pg_offset < destlen) { -+ kaddr = kmap_atomic(dest_page); -+ memset(kaddr + pg_offset, 0, destlen - pg_offset); -+ kunmap_atomic(kaddr); -+ } -+ return ret; -+} -+ -+const struct btrfs_compress_op btrfs_zstd_compress = { -+ .alloc_workspace = zstd_alloc_workspace, -+ .free_workspace = zstd_free_workspace, -+ .compress_pages = zstd_compress_pages, -+ .decompress_bio = zstd_decompress_bio, -+ .decompress = zstd_decompress, -+}; -diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h -index 9aa74f3..378230c 100644 ---- a/include/uapi/linux/btrfs.h -+++ b/include/uapi/linux/btrfs.h -@@ -255,13 +255,7 @@ struct btrfs_ioctl_fs_info_args { - #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) - #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) - #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) --/* -- * some patches floated around with a second compression method -- * lets save that incompat here for when they do get in -- * Note we don't actually support it, we're just reserving the -- * number -- */ --#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4) -+#define BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD (1ULL << 4) - - /* - * older kernels tried to do bigger metadata blocks, but the --- -2.9.3 diff --git a/contrib/linux-kernel/0004-squashfs-Add-zstd-support.patch b/contrib/linux-kernel/0004-squashfs-Add-zstd-support.patch deleted file mode 100644 index 36cdf71df1f..00000000000 --- a/contrib/linux-kernel/0004-squashfs-Add-zstd-support.patch +++ /dev/null @@ -1,306 +0,0 @@ -From 46bf8f6d30d6ddf2446c110f122482b5e5e16933 Mon Sep 17 00:00:00 2001 -From: Sean Purcell -Date: Mon, 17 Jul 2017 17:08:59 -0700 -Subject: [PATCH v5 4/5] squashfs: Add zstd support - -Add zstd compression and decompression support to SquashFS. zstd is a -great fit for SquashFS because it can compress at ratios approaching xz, -while decompressing twice as fast as zlib. For SquashFS in particular, -it can decompress as fast as lzo and lz4. It also has the flexibility -to turn down the compression ratio for faster compression times. - -The compression benchmark is run on the file tree from the SquashFS archive -found in ubuntu-16.10-desktop-amd64.iso [1]. It uses `mksquashfs` with the -default block size (128 KB) and and various compression algorithms/levels. -xz and zstd are also benchmarked with 256 KB blocks. The decompression -benchmark times how long it takes to `tar` the file tree into `/dev/null`. -See the benchmark file in the upstream zstd source repository located under -`contrib/linux-kernel/squashfs-benchmark.sh` [2] for details. - -I ran the benchmarks on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM. -The VM is running on a MacBook Pro with a 3.1 GHz Intel Core i7 processor, -16 GB of RAM, and a SSD. - -| Method | Ratio | Compression MB/s | Decompression MB/s | -|----------------|-------|------------------|--------------------| -| gzip | 2.92 | 15 | 128 | -| lzo | 2.64 | 9.5 | 217 | -| lz4 | 2.12 | 94 | 218 | -| xz | 3.43 | 5.5 | 35 | -| xz 256 KB | 3.53 | 5.4 | 40 | -| zstd 1 | 2.71 | 96 | 210 | -| zstd 5 | 2.93 | 69 | 198 | -| zstd 10 | 3.01 | 41 | 225 | -| zstd 15 | 3.13 | 11.4 | 224 | -| zstd 16 256 KB | 3.24 | 8.1 | 210 | - -This patch was written by Sean Purcell , but I will be -taking over the submission process. - -[1] http://releases.ubuntu.com/16.10/ -[2] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/squashfs-benchmark.sh - -zstd source repository: https://github.com/facebook/zstd - -Signed-off-by: Sean Purcell -Signed-off-by: Nick Terrell ---- -v3 -> v4: -- Fix minor linter warnings - -v4 -> v5: -- Fix ZSTD_DStream initialization code in squashfs -- Fix patch documentation to reflect that Sean Purcell is the author - - fs/squashfs/Kconfig | 14 +++++ - fs/squashfs/Makefile | 1 + - fs/squashfs/decompressor.c | 7 +++ - fs/squashfs/decompressor.h | 4 ++ - fs/squashfs/squashfs_fs.h | 1 + - fs/squashfs/zstd_wrapper.c | 151 +++++++++++++++++++++++++++++++++++++++++++++ - 6 files changed, 178 insertions(+) - create mode 100644 fs/squashfs/zstd_wrapper.c - -diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig -index ffb093e..1adb334 100644 ---- a/fs/squashfs/Kconfig -+++ b/fs/squashfs/Kconfig -@@ -165,6 +165,20 @@ config SQUASHFS_XZ - - If unsure, say N. - -+config SQUASHFS_ZSTD -+ bool "Include support for ZSTD compressed file systems" -+ depends on SQUASHFS -+ select ZSTD_DECOMPRESS -+ help -+ Saying Y here includes support for reading Squashfs file systems -+ compressed with ZSTD compression. ZSTD gives better compression than -+ the default ZLIB compression, while using less CPU. -+ -+ ZSTD is not the standard compression used in Squashfs and so most -+ file systems will be readable without selecting this option. -+ -+ If unsure, say N. -+ - config SQUASHFS_4K_DEVBLK_SIZE - bool "Use 4K device block size?" - depends on SQUASHFS -diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile -index 246a6f3..6655631 100644 ---- a/fs/squashfs/Makefile -+++ b/fs/squashfs/Makefile -@@ -15,3 +15,4 @@ squashfs-$(CONFIG_SQUASHFS_LZ4) += lz4_wrapper.o - squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o - squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o - squashfs-$(CONFIG_SQUASHFS_ZLIB) += zlib_wrapper.o -+squashfs-$(CONFIG_SQUASHFS_ZSTD) += zstd_wrapper.o -diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c -index d2bc136..8366398 100644 ---- a/fs/squashfs/decompressor.c -+++ b/fs/squashfs/decompressor.c -@@ -65,6 +65,12 @@ static const struct squashfs_decompressor squashfs_zlib_comp_ops = { - }; - #endif - -+#ifndef CONFIG_SQUASHFS_ZSTD -+static const struct squashfs_decompressor squashfs_zstd_comp_ops = { -+ NULL, NULL, NULL, NULL, ZSTD_COMPRESSION, "zstd", 0 -+}; -+#endif -+ - static const struct squashfs_decompressor squashfs_unknown_comp_ops = { - NULL, NULL, NULL, NULL, 0, "unknown", 0 - }; -@@ -75,6 +81,7 @@ static const struct squashfs_decompressor *decompressor[] = { - &squashfs_lzo_comp_ops, - &squashfs_xz_comp_ops, - &squashfs_lzma_unsupported_comp_ops, -+ &squashfs_zstd_comp_ops, - &squashfs_unknown_comp_ops - }; - -diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h -index a25713c..0f5a8e4 100644 ---- a/fs/squashfs/decompressor.h -+++ b/fs/squashfs/decompressor.h -@@ -58,4 +58,8 @@ extern const struct squashfs_decompressor squashfs_lzo_comp_ops; - extern const struct squashfs_decompressor squashfs_zlib_comp_ops; - #endif - -+#ifdef CONFIG_SQUASHFS_ZSTD -+extern const struct squashfs_decompressor squashfs_zstd_comp_ops; -+#endif -+ - #endif -diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h -index 506f4ba..24d12fd 100644 ---- a/fs/squashfs/squashfs_fs.h -+++ b/fs/squashfs/squashfs_fs.h -@@ -241,6 +241,7 @@ struct meta_index { - #define LZO_COMPRESSION 3 - #define XZ_COMPRESSION 4 - #define LZ4_COMPRESSION 5 -+#define ZSTD_COMPRESSION 6 - - struct squashfs_super_block { - __le32 s_magic; -diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c -new file mode 100644 -index 0000000..eeaabf8 ---- /dev/null -+++ b/fs/squashfs/zstd_wrapper.c -@@ -0,0 +1,151 @@ -+/* -+ * Squashfs - a compressed read only filesystem for Linux -+ * -+ * Copyright (c) 2016-present, Facebook, Inc. -+ * All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2, -+ * or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * zstd_wrapper.c -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "squashfs_fs.h" -+#include "squashfs_fs_sb.h" -+#include "squashfs.h" -+#include "decompressor.h" -+#include "page_actor.h" -+ -+struct workspace { -+ void *mem; -+ size_t mem_size; -+ size_t window_size; -+}; -+ -+static void *zstd_init(struct squashfs_sb_info *msblk, void *buff) -+{ -+ struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL); -+ -+ if (wksp == NULL) -+ goto failed; -+ wksp->window_size = max_t(size_t, -+ msblk->block_size, SQUASHFS_METADATA_SIZE); -+ wksp->mem_size = ZSTD_DStreamWorkspaceBound(wksp->window_size); -+ wksp->mem = vmalloc(wksp->mem_size); -+ if (wksp->mem == NULL) -+ goto failed; -+ -+ return wksp; -+ -+failed: -+ ERROR("Failed to allocate zstd workspace\n"); -+ kfree(wksp); -+ return ERR_PTR(-ENOMEM); -+} -+ -+ -+static void zstd_free(void *strm) -+{ -+ struct workspace *wksp = strm; -+ -+ if (wksp) -+ vfree(wksp->mem); -+ kfree(wksp); -+} -+ -+ -+static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm, -+ struct buffer_head **bh, int b, int offset, int length, -+ struct squashfs_page_actor *output) -+{ -+ struct workspace *wksp = strm; -+ ZSTD_DStream *stream; -+ size_t total_out = 0; -+ size_t zstd_err; -+ int k = 0; -+ ZSTD_inBuffer in_buf = { NULL, 0, 0 }; -+ ZSTD_outBuffer out_buf = { NULL, 0, 0 }; -+ -+ stream = ZSTD_initDStream(wksp->window_size, wksp->mem, wksp->mem_size); -+ -+ if (!stream) { -+ ERROR("Failed to initialize zstd decompressor\n"); -+ goto out; -+ } -+ -+ out_buf.size = PAGE_SIZE; -+ out_buf.dst = squashfs_first_page(output); -+ -+ do { -+ if (in_buf.pos == in_buf.size && k < b) { -+ int avail = min(length, msblk->devblksize - offset); -+ -+ length -= avail; -+ in_buf.src = bh[k]->b_data + offset; -+ in_buf.size = avail; -+ in_buf.pos = 0; -+ offset = 0; -+ } -+ -+ if (out_buf.pos == out_buf.size) { -+ out_buf.dst = squashfs_next_page(output); -+ if (out_buf.dst == NULL) { -+ /* Shouldn't run out of pages -+ * before stream is done. -+ */ -+ squashfs_finish_page(output); -+ goto out; -+ } -+ out_buf.pos = 0; -+ out_buf.size = PAGE_SIZE; -+ } -+ -+ total_out -= out_buf.pos; -+ zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf); -+ total_out += out_buf.pos; /* add the additional data produced */ -+ -+ if (in_buf.pos == in_buf.size && k < b) -+ put_bh(bh[k++]); -+ } while (zstd_err != 0 && !ZSTD_isError(zstd_err)); -+ -+ squashfs_finish_page(output); -+ -+ if (ZSTD_isError(zstd_err)) { -+ ERROR("zstd decompression error: %d\n", -+ (int)ZSTD_getErrorCode(zstd_err)); -+ goto out; -+ } -+ -+ if (k < b) -+ goto out; -+ -+ return (int)total_out; -+ -+out: -+ for (; k < b; k++) -+ put_bh(bh[k]); -+ -+ return -EIO; -+} -+ -+const struct squashfs_decompressor squashfs_zstd_comp_ops = { -+ .init = zstd_init, -+ .free = zstd_free, -+ .decompress = zstd_uncompress, -+ .id = ZSTD_COMPRESSION, -+ .name = "zstd", -+ .supported = 1 -+}; --- -2.9.3 diff --git a/contrib/linux-kernel/0005-crypto-Add-zstd-support.patch b/contrib/linux-kernel/0005-crypto-Add-zstd-support.patch deleted file mode 100644 index 971b0634584..00000000000 --- a/contrib/linux-kernel/0005-crypto-Add-zstd-support.patch +++ /dev/null @@ -1,424 +0,0 @@ -From 308795a7713ca6fcd468b60fba9a2fca99cee6a0 Mon Sep 17 00:00:00 2001 -From: Nick Terrell -Date: Wed, 2 Aug 2017 18:02:13 -0700 -Subject: [PATCH v5 5/5] crypto: Add zstd support - -Adds zstd support to crypto and scompress. Only supports the default -level. - -Signed-off-by: Nick Terrell ---- - crypto/Kconfig | 9 ++ - crypto/Makefile | 1 + - crypto/testmgr.c | 10 +++ - crypto/testmgr.h | 71 +++++++++++++++ - crypto/zstd.c | 265 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ - 5 files changed, 356 insertions(+) - create mode 100644 crypto/zstd.c - -diff --git a/crypto/Kconfig b/crypto/Kconfig -index caa770e..4fc3936 100644 ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -1662,6 +1662,15 @@ config CRYPTO_LZ4HC - help - This is the LZ4 high compression mode algorithm. - -+config CRYPTO_ZSTD -+ tristate "Zstd compression algorithm" -+ select CRYPTO_ALGAPI -+ select CRYPTO_ACOMP2 -+ select ZSTD_COMPRESS -+ select ZSTD_DECOMPRESS -+ help -+ This is the zstd algorithm. -+ - comment "Random Number Generation" - - config CRYPTO_ANSI_CPRNG -diff --git a/crypto/Makefile b/crypto/Makefile -index d41f033..b22e1e8 100644 ---- a/crypto/Makefile -+++ b/crypto/Makefile -@@ -133,6 +133,7 @@ obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o - obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o - obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o - obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o -+obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o - - ecdh_generic-y := ecc.o - ecdh_generic-y += ecdh.o -diff --git a/crypto/testmgr.c b/crypto/testmgr.c -index 7125ba3..8a124d3 100644 ---- a/crypto/testmgr.c -+++ b/crypto/testmgr.c -@@ -3603,6 +3603,16 @@ static const struct alg_test_desc alg_test_descs[] = { - .decomp = __VECS(zlib_deflate_decomp_tv_template) - } - } -+ }, { -+ .alg = "zstd", -+ .test = alg_test_comp, -+ .fips_allowed = 1, -+ .suite = { -+ .comp = { -+ .comp = __VECS(zstd_comp_tv_template), -+ .decomp = __VECS(zstd_decomp_tv_template) -+ } -+ } - } - }; - -diff --git a/crypto/testmgr.h b/crypto/testmgr.h -index 6ceb0e2..e6b5920 100644 ---- a/crypto/testmgr.h -+++ b/crypto/testmgr.h -@@ -34631,4 +34631,75 @@ static const struct comp_testvec lz4hc_decomp_tv_template[] = { - }, - }; - -+static const struct comp_testvec zstd_comp_tv_template[] = { -+ { -+ .inlen = 68, -+ .outlen = 39, -+ .input = "The algorithm is zstd. " -+ "The algorithm is zstd. " -+ "The algorithm is zstd.", -+ .output = "\x28\xb5\x2f\xfd\x00\x50\xf5\x00\x00\xb8\x54\x68\x65" -+ "\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x20\x69\x73" -+ "\x20\x7a\x73\x74\x64\x2e\x20\x01\x00\x55\x73\x36\x01" -+ , -+ }, -+ { -+ .inlen = 244, -+ .outlen = 151, -+ .input = "zstd, short for Zstandard, is a fast lossless " -+ "compression algorithm, targeting real-time " -+ "compression scenarios at zlib-level and better " -+ "compression ratios. The zstd compression library " -+ "provides in-memory compression and decompression " -+ "functions.", -+ .output = "\x28\xb5\x2f\xfd\x00\x50\x75\x04\x00\x42\x4b\x1e\x17" -+ "\x90\x81\x31\x00\xf2\x2f\xe4\x36\xc9\xef\x92\x88\x32" -+ "\xc9\xf2\x24\x94\xd8\x68\x9a\x0f\x00\x0c\xc4\x31\x6f" -+ "\x0d\x0c\x38\xac\x5c\x48\x03\xcd\x63\x67\xc0\xf3\xad" -+ "\x4e\x90\xaa\x78\xa0\xa4\xc5\x99\xda\x2f\xb6\x24\x60" -+ "\xe2\x79\x4b\xaa\xb6\x6b\x85\x0b\xc9\xc6\x04\x66\x86" -+ "\xe2\xcc\xe2\x25\x3f\x4f\x09\xcd\xb8\x9d\xdb\xc1\x90" -+ "\xa9\x11\xbc\x35\x44\x69\x2d\x9c\x64\x4f\x13\x31\x64" -+ "\xcc\xfb\x4d\x95\x93\x86\x7f\x33\x7f\x1a\xef\xe9\x30" -+ "\xf9\x67\xa1\x94\x0a\x69\x0f\x60\xcd\xc3\xab\x99\xdc" -+ "\x42\xed\x97\x05\x00\x33\xc3\x15\x95\x3a\x06\xa0\x0e" -+ "\x20\xa9\x0e\x82\xb9\x43\x45\x01", -+ }, -+}; -+ -+static const struct comp_testvec zstd_decomp_tv_template[] = { -+ { -+ .inlen = 43, -+ .outlen = 68, -+ .input = "\x28\xb5\x2f\xfd\x04\x50\xf5\x00\x00\xb8\x54\x68\x65" -+ "\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x20\x69\x73" -+ "\x20\x7a\x73\x74\x64\x2e\x20\x01\x00\x55\x73\x36\x01" -+ "\x6b\xf4\x13\x35", -+ .output = "The algorithm is zstd. " -+ "The algorithm is zstd. " -+ "The algorithm is zstd.", -+ }, -+ { -+ .inlen = 155, -+ .outlen = 244, -+ .input = "\x28\xb5\x2f\xfd\x04\x50\x75\x04\x00\x42\x4b\x1e\x17" -+ "\x90\x81\x31\x00\xf2\x2f\xe4\x36\xc9\xef\x92\x88\x32" -+ "\xc9\xf2\x24\x94\xd8\x68\x9a\x0f\x00\x0c\xc4\x31\x6f" -+ "\x0d\x0c\x38\xac\x5c\x48\x03\xcd\x63\x67\xc0\xf3\xad" -+ "\x4e\x90\xaa\x78\xa0\xa4\xc5\x99\xda\x2f\xb6\x24\x60" -+ "\xe2\x79\x4b\xaa\xb6\x6b\x85\x0b\xc9\xc6\x04\x66\x86" -+ "\xe2\xcc\xe2\x25\x3f\x4f\x09\xcd\xb8\x9d\xdb\xc1\x90" -+ "\xa9\x11\xbc\x35\x44\x69\x2d\x9c\x64\x4f\x13\x31\x64" -+ "\xcc\xfb\x4d\x95\x93\x86\x7f\x33\x7f\x1a\xef\xe9\x30" -+ "\xf9\x67\xa1\x94\x0a\x69\x0f\x60\xcd\xc3\xab\x99\xdc" -+ "\x42\xed\x97\x05\x00\x33\xc3\x15\x95\x3a\x06\xa0\x0e" -+ "\x20\xa9\x0e\x82\xb9\x43\x45\x01\xaa\x6d\xda\x0d", -+ .output = "zstd, short for Zstandard, is a fast lossless " -+ "compression algorithm, targeting real-time " -+ "compression scenarios at zlib-level and better " -+ "compression ratios. The zstd compression library " -+ "provides in-memory compression and decompression " -+ "functions.", -+ }, -+}; - #endif /* _CRYPTO_TESTMGR_H */ -diff --git a/crypto/zstd.c b/crypto/zstd.c -new file mode 100644 -index 0000000..9a76b3e ---- /dev/null -+++ b/crypto/zstd.c -@@ -0,0 +1,265 @@ -+/* -+ * Cryptographic API. -+ * -+ * Copyright (c) 2017-present, Facebook, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+#define ZSTD_DEF_LEVEL 3 -+ -+struct zstd_ctx { -+ ZSTD_CCtx *cctx; -+ ZSTD_DCtx *dctx; -+ void *cwksp; -+ void *dwksp; -+}; -+ -+static ZSTD_parameters zstd_params(void) -+{ -+ return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0); -+} -+ -+static int zstd_comp_init(struct zstd_ctx *ctx) -+{ -+ int ret = 0; -+ const ZSTD_parameters params = zstd_params(); -+ const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams); -+ -+ ctx->cwksp = vzalloc(wksp_size); -+ if (!ctx->cwksp) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size); -+ if (!ctx->cctx) { -+ ret = -EINVAL; -+ goto out_free; -+ } -+out: -+ return ret; -+out_free: -+ vfree(ctx->cwksp); -+ goto out; -+} -+ -+static int zstd_decomp_init(struct zstd_ctx *ctx) -+{ -+ int ret = 0; -+ const size_t wksp_size = ZSTD_DCtxWorkspaceBound(); -+ -+ ctx->dwksp = vzalloc(wksp_size); -+ if (!ctx->dwksp) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size); -+ if (!ctx->dctx) { -+ ret = -EINVAL; -+ goto out_free; -+ } -+out: -+ return ret; -+out_free: -+ vfree(ctx->dwksp); -+ goto out; -+} -+ -+static void zstd_comp_exit(struct zstd_ctx *ctx) -+{ -+ vfree(ctx->cwksp); -+ ctx->cwksp = NULL; -+ ctx->cctx = NULL; -+} -+ -+static void zstd_decomp_exit(struct zstd_ctx *ctx) -+{ -+ vfree(ctx->dwksp); -+ ctx->dwksp = NULL; -+ ctx->dctx = NULL; -+} -+ -+static int __zstd_init(void *ctx) -+{ -+ int ret; -+ -+ ret = zstd_comp_init(ctx); -+ if (ret) -+ return ret; -+ ret = zstd_decomp_init(ctx); -+ if (ret) -+ zstd_comp_exit(ctx); -+ return ret; -+} -+ -+static void *zstd_alloc_ctx(struct crypto_scomp *tfm) -+{ -+ int ret; -+ struct zstd_ctx *ctx; -+ -+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); -+ if (!ctx) -+ return ERR_PTR(-ENOMEM); -+ -+ ret = __zstd_init(ctx); -+ if (ret) { -+ kfree(ctx); -+ return ERR_PTR(ret); -+ } -+ -+ return ctx; -+} -+ -+static int zstd_init(struct crypto_tfm *tfm) -+{ -+ struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); -+ -+ return __zstd_init(ctx); -+} -+ -+static void __zstd_exit(void *ctx) -+{ -+ zstd_comp_exit(ctx); -+ zstd_decomp_exit(ctx); -+} -+ -+static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx) -+{ -+ __zstd_exit(ctx); -+ kzfree(ctx); -+} -+ -+static void zstd_exit(struct crypto_tfm *tfm) -+{ -+ struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); -+ -+ __zstd_exit(ctx); -+} -+ -+static int __zstd_compress(const u8 *src, unsigned int slen, -+ u8 *dst, unsigned int *dlen, void *ctx) -+{ -+ size_t out_len; -+ struct zstd_ctx *zctx = ctx; -+ const ZSTD_parameters params = zstd_params(); -+ -+ out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params); -+ if (ZSTD_isError(out_len)) -+ return -EINVAL; -+ *dlen = out_len; -+ return 0; -+} -+ -+static int zstd_compress(struct crypto_tfm *tfm, const u8 *src, -+ unsigned int slen, u8 *dst, unsigned int *dlen) -+{ -+ struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); -+ -+ return __zstd_compress(src, slen, dst, dlen, ctx); -+} -+ -+static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src, -+ unsigned int slen, u8 *dst, unsigned int *dlen, -+ void *ctx) -+{ -+ return __zstd_compress(src, slen, dst, dlen, ctx); -+} -+ -+static int __zstd_decompress(const u8 *src, unsigned int slen, -+ u8 *dst, unsigned int *dlen, void *ctx) -+{ -+ size_t out_len; -+ struct zstd_ctx *zctx = ctx; -+ -+ out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen); -+ if (ZSTD_isError(out_len)) -+ return -EINVAL; -+ *dlen = out_len; -+ return 0; -+} -+ -+static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src, -+ unsigned int slen, u8 *dst, unsigned int *dlen) -+{ -+ struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); -+ -+ return __zstd_decompress(src, slen, dst, dlen, ctx); -+} -+ -+static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src, -+ unsigned int slen, u8 *dst, unsigned int *dlen, -+ void *ctx) -+{ -+ return __zstd_decompress(src, slen, dst, dlen, ctx); -+} -+ -+static struct crypto_alg alg = { -+ .cra_name = "zstd", -+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, -+ .cra_ctxsize = sizeof(struct zstd_ctx), -+ .cra_module = THIS_MODULE, -+ .cra_init = zstd_init, -+ .cra_exit = zstd_exit, -+ .cra_u = { .compress = { -+ .coa_compress = zstd_compress, -+ .coa_decompress = zstd_decompress } } -+}; -+ -+static struct scomp_alg scomp = { -+ .alloc_ctx = zstd_alloc_ctx, -+ .free_ctx = zstd_free_ctx, -+ .compress = zstd_scompress, -+ .decompress = zstd_sdecompress, -+ .base = { -+ .cra_name = "zstd", -+ .cra_driver_name = "zstd-scomp", -+ .cra_module = THIS_MODULE, -+ } -+}; -+ -+static int __init zstd_mod_init(void) -+{ -+ int ret; -+ -+ ret = crypto_register_alg(&alg); -+ if (ret) -+ return ret; -+ -+ ret = crypto_register_scomp(&scomp); -+ if (ret) -+ crypto_unregister_alg(&alg); -+ -+ return ret; -+} -+ -+static void __exit zstd_mod_fini(void) -+{ -+ crypto_unregister_alg(&alg); -+ crypto_unregister_scomp(&scomp); -+} -+ -+module_init(zstd_mod_init); -+module_exit(zstd_mod_fini); -+ -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("Zstd Compression Algorithm"); -+MODULE_ALIAS_CRYPTO("zstd"); --- -2.9.3 diff --git a/contrib/linux-kernel/0006-squashfs-tools-Add-zstd-support.patch b/contrib/linux-kernel/0006-squashfs-tools-Add-zstd-support.patch deleted file mode 100644 index 00d24e2b5a8..00000000000 --- a/contrib/linux-kernel/0006-squashfs-tools-Add-zstd-support.patch +++ /dev/null @@ -1,420 +0,0 @@ -From 57a3cf95b276946559f9e044c7352c11303bb9c1 Mon Sep 17 00:00:00 2001 -From: Sean Purcell -Date: Thu, 3 Aug 2017 17:47:03 -0700 -Subject: [PATCH v6] squashfs-tools: Add zstd support - -This patch adds zstd support to squashfs-tools. It works with zstd -versions >= 1.0.0. It was originally written by Sean Purcell. - -Signed-off-by: Sean Purcell -Signed-off-by: Nick Terrell ---- -v4 -> v5: -- Fix patch documentation to reflect that Sean Purcell is the author -- Don't strip trailing whitespace of unrelated code -- Make zstd_display_options() static - -v5 -> v6: -- Fix build instructions in Makefile - - squashfs-tools/Makefile | 20 ++++ - squashfs-tools/compressor.c | 8 ++ - squashfs-tools/squashfs_fs.h | 1 + - squashfs-tools/zstd_wrapper.c | 254 ++++++++++++++++++++++++++++++++++++++++++ - squashfs-tools/zstd_wrapper.h | 48 ++++++++ - 5 files changed, 331 insertions(+) - create mode 100644 squashfs-tools/zstd_wrapper.c - create mode 100644 squashfs-tools/zstd_wrapper.h - -diff --git a/squashfs-tools/Makefile b/squashfs-tools/Makefile -index 52d2582..22fc559 100644 ---- a/squashfs-tools/Makefile -+++ b/squashfs-tools/Makefile -@@ -75,6 +75,18 @@ GZIP_SUPPORT = 1 - #LZMA_SUPPORT = 1 - #LZMA_DIR = ../../../../LZMA/lzma465 - -+ -+########### Building ZSTD support ############ -+# -+# The ZSTD library is supported -+# ZSTD homepage: http://zstd.net -+# ZSTD source repository: https://github.com/facebook/zstd -+# -+# To build using the ZSTD library - install the library and uncomment the -+# ZSTD_SUPPORT line below. -+# -+#ZSTD_SUPPORT = 1 -+ - ######## Specifying default compression ######## - # - # The next line specifies which compression algorithm is used by default -@@ -177,6 +189,14 @@ LIBS += -llz4 - COMPRESSORS += lz4 - endif - -+ifeq ($(ZSTD_SUPPORT),1) -+CFLAGS += -DZSTD_SUPPORT -+MKSQUASHFS_OBJS += zstd_wrapper.o -+UNSQUASHFS_OBJS += zstd_wrapper.o -+LIBS += -lzstd -+COMPRESSORS += zstd -+endif -+ - ifeq ($(XATTR_SUPPORT),1) - ifeq ($(XATTR_DEFAULT),1) - CFLAGS += -DXATTR_SUPPORT -DXATTR_DEFAULT -diff --git a/squashfs-tools/compressor.c b/squashfs-tools/compressor.c -index 525e316..02b5e90 100644 ---- a/squashfs-tools/compressor.c -+++ b/squashfs-tools/compressor.c -@@ -65,6 +65,13 @@ static struct compressor xz_comp_ops = { - extern struct compressor xz_comp_ops; - #endif - -+#ifndef ZSTD_SUPPORT -+static struct compressor zstd_comp_ops = { -+ ZSTD_COMPRESSION, "zstd" -+}; -+#else -+extern struct compressor zstd_comp_ops; -+#endif - - static struct compressor unknown_comp_ops = { - 0, "unknown" -@@ -77,6 +84,7 @@ struct compressor *compressor[] = { - &lzo_comp_ops, - &lz4_comp_ops, - &xz_comp_ops, -+ &zstd_comp_ops, - &unknown_comp_ops - }; - -diff --git a/squashfs-tools/squashfs_fs.h b/squashfs-tools/squashfs_fs.h -index 791fe12..afca918 100644 ---- a/squashfs-tools/squashfs_fs.h -+++ b/squashfs-tools/squashfs_fs.h -@@ -277,6 +277,7 @@ typedef long long squashfs_inode; - #define LZO_COMPRESSION 3 - #define XZ_COMPRESSION 4 - #define LZ4_COMPRESSION 5 -+#define ZSTD_COMPRESSION 6 - - struct squashfs_super_block { - unsigned int s_magic; -diff --git a/squashfs-tools/zstd_wrapper.c b/squashfs-tools/zstd_wrapper.c -new file mode 100644 -index 0000000..dcab75a ---- /dev/null -+++ b/squashfs-tools/zstd_wrapper.c -@@ -0,0 +1,254 @@ -+/* -+ * Copyright (c) 2017 -+ * Phillip Lougher -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2, -+ * or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * zstd_wrapper.c -+ * -+ * Support for ZSTD compression http://zstd.net -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "squashfs_fs.h" -+#include "zstd_wrapper.h" -+#include "compressor.h" -+ -+static int compression_level = ZSTD_DEFAULT_COMPRESSION_LEVEL; -+ -+/* -+ * This function is called by the options parsing code in mksquashfs.c -+ * to parse any -X compressor option. -+ * -+ * This function returns: -+ * >=0 (number of additional args parsed) on success -+ * -1 if the option was unrecognised, or -+ * -2 if the option was recognised, but otherwise bad in -+ * some way (e.g. invalid parameter) -+ * -+ * Note: this function sets internal compressor state, but does not -+ * pass back the results of the parsing other than success/failure. -+ * The zstd_dump_options() function is called later to get the options in -+ * a format suitable for writing to the filesystem. -+ */ -+static int zstd_options(char *argv[], int argc) -+{ -+ if (strcmp(argv[0], "-Xcompression-level") == 0) { -+ if (argc < 2) { -+ fprintf(stderr, "zstd: -Xcompression-level missing " -+ "compression level\n"); -+ fprintf(stderr, "zstd: -Xcompression-level it should " -+ "be 1 <= n <= %d\n", ZSTD_maxCLevel()); -+ goto failed; -+ } -+ -+ compression_level = atoi(argv[1]); -+ if (compression_level < 1 || -+ compression_level > ZSTD_maxCLevel()) { -+ fprintf(stderr, "zstd: -Xcompression-level invalid, it " -+ "should be 1 <= n <= %d\n", ZSTD_maxCLevel()); -+ goto failed; -+ } -+ -+ return 1; -+ } -+ -+ return -1; -+failed: -+ return -2; -+} -+ -+/* -+ * This function is called by mksquashfs to dump the parsed -+ * compressor options in a format suitable for writing to the -+ * compressor options field in the filesystem (stored immediately -+ * after the superblock). -+ * -+ * This function returns a pointer to the compression options structure -+ * to be stored (and the size), or NULL if there are no compression -+ * options. -+ */ -+static void *zstd_dump_options(int block_size, int *size) -+{ -+ static struct zstd_comp_opts comp_opts; -+ -+ /* don't return anything if the options are all default */ -+ if (compression_level == ZSTD_DEFAULT_COMPRESSION_LEVEL) -+ return NULL; -+ -+ comp_opts.compression_level = compression_level; -+ -+ SQUASHFS_INSWAP_COMP_OPTS(&comp_opts); -+ -+ *size = sizeof(comp_opts); -+ return &comp_opts; -+} -+ -+/* -+ * This function is a helper specifically for the append mode of -+ * mksquashfs. Its purpose is to set the internal compressor state -+ * to the stored compressor options in the passed compressor options -+ * structure. -+ * -+ * In effect this function sets up the compressor options -+ * to the same state they were when the filesystem was originally -+ * generated, this is to ensure on appending, the compressor uses -+ * the same compression options that were used to generate the -+ * original filesystem. -+ * -+ * Note, even if there are no compressor options, this function is still -+ * called with an empty compressor structure (size == 0), to explicitly -+ * set the default options, this is to ensure any user supplied -+ * -X options on the appending mksquashfs command line are over-ridden. -+ * -+ * This function returns 0 on successful extraction of options, and -1 on error. -+ */ -+static int zstd_extract_options(int block_size, void *buffer, int size) -+{ -+ struct zstd_comp_opts *comp_opts = buffer; -+ -+ if (size == 0) { -+ /* Set default values */ -+ compression_level = ZSTD_DEFAULT_COMPRESSION_LEVEL; -+ return 0; -+ } -+ -+ /* we expect a comp_opts structure of sufficient size to be present */ -+ if (size < sizeof(*comp_opts)) -+ goto failed; -+ -+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts); -+ -+ if (comp_opts->compression_level < 1 || -+ comp_opts->compression_level > ZSTD_maxCLevel()) { -+ fprintf(stderr, "zstd: bad compression level in compression " -+ "options structure\n"); -+ goto failed; -+ } -+ -+ compression_level = comp_opts->compression_level; -+ -+ return 0; -+ -+failed: -+ fprintf(stderr, "zstd: error reading stored compressor options from " -+ "filesystem!\n"); -+ -+ return -1; -+} -+ -+static void zstd_display_options(void *buffer, int size) -+{ -+ struct zstd_comp_opts *comp_opts = buffer; -+ -+ /* we expect a comp_opts structure of sufficient size to be present */ -+ if (size < sizeof(*comp_opts)) -+ goto failed; -+ -+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts); -+ -+ if (comp_opts->compression_level < 1 || -+ comp_opts->compression_level > ZSTD_maxCLevel()) { -+ fprintf(stderr, "zstd: bad compression level in compression " -+ "options structure\n"); -+ goto failed; -+ } -+ -+ printf("\tcompression-level %d\n", comp_opts->compression_level); -+ -+ return; -+ -+failed: -+ fprintf(stderr, "zstd: error reading stored compressor options from " -+ "filesystem!\n"); -+} -+ -+/* -+ * This function is called by mksquashfs to initialise the -+ * compressor, before compress() is called. -+ * -+ * This function returns 0 on success, and -1 on error. -+ */ -+static int zstd_init(void **strm, int block_size, int datablock) -+{ -+ ZSTD_CCtx *cctx = ZSTD_createCCtx(); -+ -+ if (!cctx) { -+ fprintf(stderr, "zstd: failed to allocate compression " -+ "context!\n"); -+ return -1; -+ } -+ -+ *strm = cctx; -+ return 0; -+} -+ -+static int zstd_compress(void *strm, void *dest, void *src, int size, -+ int block_size, int *error) -+{ -+ const size_t res = ZSTD_compressCCtx((ZSTD_CCtx*)strm, dest, block_size, -+ src, size, compression_level); -+ -+ if (ZSTD_isError(res)) { -+ /* FIXME: -+ * zstd does not expose stable error codes. The error enum may -+ * change between versions. Until upstream zstd stablizes the -+ * error codes, we have no way of knowing why the error occurs. -+ * zstd shouldn't fail to compress any input unless there isn't -+ * enough output space. We assume that is the cause and return -+ * the special error code for not enough output space. -+ */ -+ return 0; -+ } -+ -+ return (int)res; -+} -+ -+static int zstd_uncompress(void *dest, void *src, int size, int outsize, -+ int *error) -+{ -+ const size_t res = ZSTD_decompress(dest, outsize, src, size); -+ -+ if (ZSTD_isError(res)) { -+ fprintf(stderr, "\t%d %d\n", outsize, size); -+ -+ *error = (int)ZSTD_getErrorCode(res); -+ return -1; -+ } -+ -+ return (int)res; -+} -+ -+static void zstd_usage(void) -+{ -+ fprintf(stderr, "\t -Xcompression-level \n"); -+ fprintf(stderr, "\t\t should be 1 .. %d (default " -+ "%d)\n", ZSTD_maxCLevel(), ZSTD_DEFAULT_COMPRESSION_LEVEL); -+} -+ -+struct compressor zstd_comp_ops = { -+ .init = zstd_init, -+ .compress = zstd_compress, -+ .uncompress = zstd_uncompress, -+ .options = zstd_options, -+ .dump_options = zstd_dump_options, -+ .extract_options = zstd_extract_options, -+ .display_options = zstd_display_options, -+ .usage = zstd_usage, -+ .id = ZSTD_COMPRESSION, -+ .name = "zstd", -+ .supported = 1 -+}; -diff --git a/squashfs-tools/zstd_wrapper.h b/squashfs-tools/zstd_wrapper.h -new file mode 100644 -index 0000000..4fbef0a ---- /dev/null -+++ b/squashfs-tools/zstd_wrapper.h -@@ -0,0 +1,48 @@ -+#ifndef ZSTD_WRAPPER_H -+#define ZSTD_WRAPPER_H -+/* -+ * Squashfs -+ * -+ * Copyright (c) 2017 -+ * Phillip Lougher -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2, -+ * or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * zstd_wrapper.h -+ * -+ */ -+ -+#ifndef linux -+#define __BYTE_ORDER BYTE_ORDER -+#define __BIG_ENDIAN BIG_ENDIAN -+#define __LITTLE_ENDIAN LITTLE_ENDIAN -+#else -+#include -+#endif -+ -+#if __BYTE_ORDER == __BIG_ENDIAN -+extern unsigned int inswap_le16(unsigned short); -+extern unsigned int inswap_le32(unsigned int); -+ -+#define SQUASHFS_INSWAP_COMP_OPTS(s) { \ -+ (s)->compression_level = inswap_le32((s)->compression_level); \ -+} -+#else -+#define SQUASHFS_INSWAP_COMP_OPTS(s) -+#endif -+ -+/* Default compression */ -+#define ZSTD_DEFAULT_COMPRESSION_LEVEL 15 -+ -+struct zstd_comp_opts { -+ int compression_level; -+}; -+#endif --- -2.9.5 diff --git a/contrib/linux-kernel/COPYING b/contrib/linux-kernel/COPYING deleted file mode 100644 index ecbc0593737..00000000000 --- a/contrib/linux-kernel/COPYING +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. \ No newline at end of file diff --git a/contrib/linux-kernel/Makefile b/contrib/linux-kernel/Makefile new file mode 100644 index 00000000000..63dd15d958f --- /dev/null +++ b/contrib/linux-kernel/Makefile @@ -0,0 +1,108 @@ +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ################################################################ + +.PHONY: libzstd +libzstd: + rm -rf linux + mkdir -p linux + mkdir -p linux/include/linux + mkdir -p linux/lib/zstd + ../freestanding_lib/freestanding.py \ + --source-lib ../../lib \ + --output-lib linux/lib/zstd \ + --xxhash '' \ + --xxh64-state 'struct xxh64_state' \ + --xxh64-prefix 'xxh64' \ + --rewrite-include '=' \ + --rewrite-include '=' \ + --rewrite-include '"\.\./zstd.h"=' \ + --rewrite-include '"(\.\./)?zstd_errors.h"=' \ + --sed 's,/\*\*\*,/* *,g' \ + --sed 's,/\*\*,/*,g' \ + --spdx \ + -DZSTD_NO_INTRINSICS \ + -DZSTD_NO_UNUSED_FUNCTIONS \ + -DZSTD_LEGACY_SUPPORT=0 \ + -DZSTD_STATIC_LINKING_ONLY \ + -DFSE_STATIC_LINKING_ONLY \ + -DXXH_STATIC_LINKING_ONLY \ + -D__GNUC__ \ + -D__linux__=1 \ + -DSTATIC_BMI2=0 \ + -DZSTD_ADDRESS_SANITIZER=0 \ + -DZSTD_MEMORY_SANITIZER=0 \ + -DZSTD_DATAFLOW_SANITIZER=0 \ + -DZSTD_COMPRESS_HEAPMODE=1 \ + -UNO_PREFETCH \ + -U__cplusplus \ + -UZSTD_DLL_EXPORT \ + -UZSTD_DLL_IMPORT \ + -U__ICCARM__ \ + -UZSTD_MULTITHREAD \ + -U_MSC_VER \ + -U_WIN32 \ + -RZSTDLIB_VISIBLE= \ + -RZSTDERRORLIB_VISIBLE= \ + -RZSTD_FALLTHROUGH=fallthrough \ + -DZSTD_HAVE_WEAK_SYMBOLS=0 \ + -DZSTD_TRACE=0 \ + -DZSTD_NO_TRACE \ + -DZSTD_DISABLE_ASM \ + -DZSTD_LINUX_KERNEL + rm linux/lib/zstd/decompress/huf_decompress_amd64.S + mv linux/lib/zstd/zstd.h linux/include/linux/zstd_lib.h + mv linux/lib/zstd/zstd_errors.h linux/include/linux/ + cp linux_zstd.h linux/include/linux/zstd.h + cp zstd_common_module.c linux/lib/zstd + cp zstd_compress_module.c linux/lib/zstd + cp zstd_decompress_module.c linux/lib/zstd + cp decompress_sources.h linux/lib/zstd + cp linux.mk linux/lib/zstd/Makefile + +LINUX ?= $(HOME)/repos/linux + +.PHONY: import +import: libzstd + rm -f $(LINUX)/include/linux/zstd.h + rm -f $(LINUX)/include/linux/zstd_errors.h + rm -rf $(LINUX)/lib/zstd + cp linux/include/linux/zstd.h $(LINUX)/include/linux + cp linux/include/linux/zstd_lib.h $(LINUX)/include/linux + cp linux/include/linux/zstd_errors.h $(LINUX)/include/linux + cp -r linux/lib/zstd $(LINUX)/lib + +import-upstream: + rm -rf $(LINUX)/lib/zstd + mkdir $(LINUX)/lib/zstd + cp ../../lib/zstd.h $(LINUX)/include/linux/zstd_lib.h + cp -r ../../lib/common $(LINUX)/lib/zstd + cp -r ../../lib/compress $(LINUX)/lib/zstd + cp -r ../../lib/decompress $(LINUX)/lib/zstd + mv $(LINUX)/lib/zstd/zstd_errors.h $(LINUX)/include/linux + rm $(LINUX)/lib/zstd/common/threading.* + rm $(LINUX)/lib/zstd/common/pool.* + rm $(LINUX)/lib/zstd/common/xxhash.* + rm $(LINUX)/lib/zstd/compress/zstdmt_* + +DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ + -Wstrict-prototypes -Wundef -Wpointer-arith \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls -Wmissing-prototypes -Wc++-compat \ + -Wimplicit-fallthrough + +.PHONY: test +test: libzstd + $(MAKE) -C test run-test CFLAGS="-O3 $(CFLAGS) $(DEBUGFLAGS) -Werror" -j + +.PHONY: clean +clean: + $(RM) -rf linux + $(MAKE) -C test clean diff --git a/contrib/linux-kernel/README.md b/contrib/linux-kernel/README.md index 86552b8bd7c..bfa070d1794 100644 --- a/contrib/linux-kernel/README.md +++ b/contrib/linux-kernel/README.md @@ -1,101 +1,14 @@ -# Linux Kernel Patch +# Zstd in the Linux Kernel -There are four pieces, the `xxhash` kernel module, the `zstd_compress` and `zstd_decompress` kernel modules, the BtrFS patch, and the SquashFS patch. -The patches are based off of the linux kernel master branch. +This directory contains the scripts needed to transform upstream zstd into the version imported into the kernel. All the transforms are automated and tested by our continuous integration. -## xxHash kernel module +## Upgrading Zstd in the Linux Kernel -* The patch is located in `xxhash.diff`. -* The header is in `include/linux/xxhash.h`. -* The source is in `lib/xxhash.c`. -* `test/XXHashUserLandTest.cpp` contains tests for the patch in userland by mocking the kernel headers. - I tested the tests by commenting a line of of each branch in `xxhash.c` one line at a time, and made sure the tests failed. - It can be run with the following commands: - ``` - cd test && make googletest && make XXHashUserLandTest && ./XXHashUserLandTest - ``` -* I also benchmarked the `xxhash` module against upstream xxHash, and made sure that they ran at the same speed. - -## Zstd Kernel modules - -* The (large) patch is located in `zstd.diff`, which depends on `xxhash.diff`. -* The header is in `include/linux/zstd.h`. -* It is split up into `zstd_compress` and `zstd_decompress`, which can be loaded independently. -* Source files are in `lib/zstd/`. -* `lib/Kconfig` and `lib/Makefile` need to be modified by applying `lib/Kconfig.diff` and `lib/Makefile.diff` respectively. - These changes are also included in the `zstd.diff`. -* `test/UserlandTest.cpp` contains tests for the patch in userland by mocking the kernel headers. - It can be run with the following commands: - ``` - cd test && make googletest && make UserlandTest && ./UserlandTest - ``` - -## BtrFS - -* The patch is located in `btrfs.diff`. -* Additionally `fs/btrfs/zstd.c` is provided as a source for convenience. -* The patch seems to be working, it doesn't crash the kernel, and compresses at speeds and ratios that are expected. - It could still use some more testing for fringe features, like printing options. - -### Benchmarks - -Benchmarks run on a Ubuntu 14.04 with 2 cores and 4 GiB of RAM. -The VM is running on a Macbook Pro with a 3.1 GHz Intel Core i7 processor, -16 GB of ram, and a SSD. -The kernel running was built from the master branch with the patch. - -The compression benchmark is copying 10 copies of the -unzipped [silesia corpus](http://mattmahoney.net/dc/silesia.html) into a BtrFS -filesystem mounted with `-o compress-force={none, lzo, zlib, zstd}`. -The decompression benchmark is timing how long it takes to `tar` all 10 copies -into `/dev/null`. -The compression ratio is measured by comparing the output of `df` and `du`. -See `btrfs-benchmark.sh` for details. - -| Algorithm | Compression ratio | Compression speed | Decompression speed | -|-----------|-------------------|-------------------|---------------------| -| None | 0.99 | 504 MB/s | 686 MB/s | -| lzo | 1.66 | 398 MB/s | 442 MB/s | -| zlib | 2.58 | 65 MB/s | 241 MB/s | -| zstd 1 | 2.57 | 260 MB/s | 383 MB/s | -| zstd 3 | 2.71 | 174 MB/s | 408 MB/s | -| zstd 6 | 2.87 | 70 MB/s | 398 MB/s | -| zstd 9 | 2.92 | 43 MB/s | 406 MB/s | -| zstd 12 | 2.93 | 21 MB/s | 408 MB/s | -| zstd 15 | 3.01 | 11 MB/s | 354 MB/s | - - -## SquashFS - -* The patch is located in `squashfs.diff` -* Additionally `fs/squashfs/zstd_wrapper.c` is provided as a source for convenience. -* The patch has been tested on the master branch of the kernel. - -### Benchmarks - -Benchmarks run on a Ubuntu 14.04 with 2 cores and 4 GiB of RAM. -The VM is running on a Macbook Pro with a 3.1 GHz Intel Core i7 processor, -16 GB of ram, and a SSD. -The kernel running was built from the master branch with the patch. - -The compression benchmark is the file tree from the SquashFS archive found in the -Ubuntu 16.10 desktop image (ubuntu-16.10-desktop-amd64.iso). -The compression benchmark uses mksquashfs with the default block size (128 KB) -and various compression algorithms/compression levels. -`xz` and `zstd` are also benchmarked with 256 KB blocks. -The decompression benchmark is timing how long it takes to `tar` the file tree -into `/dev/null`. -See `squashfs-benchmark.sh` for details. - -| Algorithm | Compression ratio | Compression speed | Decompression speed | -|----------------|-------------------|-------------------|---------------------| -| gzip | 2.92 | 15 MB/s | 128 MB/s | -| lzo | 2.64 | 9.5 MB/s | 217 MB/s | -| lz4 | 2.12 | 94 MB/s | 218 MB/s | -| xz | 3.43 | 5.5 MB/s | 35 MB/s | -| xz 256 KB | 3.53 | 5.4 MB/s | 40 MB/s | -| zstd 1 | 2.71 | 96 MB/s | 210 MB/s | -| zstd 5 | 2.93 | 69 MB/s | 198 MB/s | -| zstd 10 | 3.01 | 41 MB/s | 225 MB/s | -| zstd 15 | 3.13 | 11.4 MB/s | 224 MB/s | -| zstd 16 256 KB | 3.24 | 8.1 MB/s | 210 MB/s | +1. `cd` into this directory. +2. Run `make libzstd` and read the output. Make sure that all the diffs printed and changes made by the script are correct. +3. Run `make test` and ensure that it passes. +4. Import zstd into the Linux Kernel `make import LINUX=/path/to/linux/repo` +5. Inspect the diff for sanity. +6. Check the Linux Kernel history for zstd. If any patches were made to the kernel version of zstd, but not to upstream zstd, then port them upstream if necessary. +7. Test the diff. Benchmark if necessary. Make sure to test multiple architectures: At least x86, i386, and arm. +8. Submit the patch to the LKML. diff --git a/contrib/linux-kernel/decompress_sources.h b/contrib/linux-kernel/decompress_sources.h new file mode 100644 index 00000000000..8a47eb2a451 --- /dev/null +++ b/contrib/linux-kernel/decompress_sources.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* + * This file includes every .c file needed for decompression. + * It is used by lib/decompress_unzstd.c to include the decompression + * source into the translation-unit, so it can be used for kernel + * decompression. + */ + +/* + * Disable the ASM Huffman implementation because we need to + * include all the sources. + */ +#define ZSTD_DISABLE_ASM 1 + +#include "common/debug.c" +#include "common/entropy_common.c" +#include "common/error_private.c" +#include "common/fse_decompress.c" +#include "common/zstd_common.c" +#include "decompress/huf_decompress.c" +#include "decompress/zstd_ddict.c" +#include "decompress/zstd_decompress.c" +#include "decompress/zstd_decompress_block.c" +#include "zstd_decompress_module.c" diff --git a/contrib/linux-kernel/fs/btrfs/zstd.c b/contrib/linux-kernel/fs/btrfs/zstd.c deleted file mode 100644 index 607ce47b483..00000000000 --- a/contrib/linux-kernel/fs/btrfs/zstd.c +++ /dev/null @@ -1,432 +0,0 @@ -/* - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License v2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "compression.h" - -#define ZSTD_BTRFS_MAX_WINDOWLOG 17 -#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG) -#define ZSTD_BTRFS_DEFAULT_LEVEL 3 - -static ZSTD_parameters zstd_get_btrfs_parameters(size_t src_len) -{ - ZSTD_parameters params = ZSTD_getParams(ZSTD_BTRFS_DEFAULT_LEVEL, - src_len, 0); - - if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG) - params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG; - WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT); - return params; -} - -struct workspace { - void *mem; - size_t size; - char *buf; - struct list_head list; -}; - -static void zstd_free_workspace(struct list_head *ws) -{ - struct workspace *workspace = list_entry(ws, struct workspace, list); - - kvfree(workspace->mem); - kfree(workspace->buf); - kfree(workspace); -} - -static struct list_head *zstd_alloc_workspace(void) -{ - ZSTD_parameters params = - zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT); - struct workspace *workspace; - - workspace = kzalloc(sizeof(*workspace), GFP_KERNEL); - if (!workspace) - return ERR_PTR(-ENOMEM); - - workspace->size = max_t(size_t, - ZSTD_CStreamWorkspaceBound(params.cParams), - ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT)); - workspace->mem = kvmalloc(workspace->size, GFP_KERNEL); - workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!workspace->mem || !workspace->buf) - goto fail; - - INIT_LIST_HEAD(&workspace->list); - - return &workspace->list; -fail: - zstd_free_workspace(&workspace->list); - return ERR_PTR(-ENOMEM); -} - -static int zstd_compress_pages(struct list_head *ws, - struct address_space *mapping, - u64 start, - struct page **pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out) -{ - struct workspace *workspace = list_entry(ws, struct workspace, list); - ZSTD_CStream *stream; - int ret = 0; - int nr_pages = 0; - struct page *in_page = NULL; /* The current page to read */ - struct page *out_page = NULL; /* The current page to write to */ - ZSTD_inBuffer in_buf = { NULL, 0, 0 }; - ZSTD_outBuffer out_buf = { NULL, 0, 0 }; - unsigned long tot_in = 0; - unsigned long tot_out = 0; - unsigned long len = *total_out; - const unsigned long nr_dest_pages = *out_pages; - unsigned long max_out = nr_dest_pages * PAGE_SIZE; - ZSTD_parameters params = zstd_get_btrfs_parameters(len); - - *out_pages = 0; - *total_out = 0; - *total_in = 0; - - /* Initialize the stream */ - stream = ZSTD_initCStream(params, len, workspace->mem, - workspace->size); - if (!stream) { - pr_warn("BTRFS: ZSTD_initCStream failed\n"); - ret = -EIO; - goto out; - } - - /* map in the first page of input data */ - in_page = find_get_page(mapping, start >> PAGE_SHIFT); - in_buf.src = kmap(in_page); - in_buf.pos = 0; - in_buf.size = min_t(size_t, len, PAGE_SIZE); - - - /* Allocate and map in the output buffer */ - out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (out_page == NULL) { - ret = -ENOMEM; - goto out; - } - pages[nr_pages++] = out_page; - out_buf.dst = kmap(out_page); - out_buf.pos = 0; - out_buf.size = min_t(size_t, max_out, PAGE_SIZE); - - while (1) { - size_t ret2; - - ret2 = ZSTD_compressStream(stream, &out_buf, &in_buf); - if (ZSTD_isError(ret2)) { - pr_debug("BTRFS: ZSTD_compressStream returned %d\n", - ZSTD_getErrorCode(ret2)); - ret = -EIO; - goto out; - } - - /* Check to see if we are making it bigger */ - if (tot_in + in_buf.pos > 8192 && - tot_in + in_buf.pos < - tot_out + out_buf.pos) { - ret = -E2BIG; - goto out; - } - - /* We've reached the end of our output range */ - if (out_buf.pos >= max_out) { - tot_out += out_buf.pos; - ret = -E2BIG; - goto out; - } - - /* Check if we need more output space */ - if (out_buf.pos == out_buf.size) { - tot_out += PAGE_SIZE; - max_out -= PAGE_SIZE; - kunmap(out_page); - if (nr_pages == nr_dest_pages) { - out_page = NULL; - ret = -E2BIG; - goto out; - } - out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (out_page == NULL) { - ret = -ENOMEM; - goto out; - } - pages[nr_pages++] = out_page; - out_buf.dst = kmap(out_page); - out_buf.pos = 0; - out_buf.size = min_t(size_t, max_out, PAGE_SIZE); - } - - /* We've reached the end of the input */ - if (in_buf.pos >= len) { - tot_in += in_buf.pos; - break; - } - - /* Check if we need more input */ - if (in_buf.pos == in_buf.size) { - tot_in += PAGE_SIZE; - kunmap(in_page); - put_page(in_page); - - start += PAGE_SIZE; - len -= PAGE_SIZE; - in_page = find_get_page(mapping, start >> PAGE_SHIFT); - in_buf.src = kmap(in_page); - in_buf.pos = 0; - in_buf.size = min_t(size_t, len, PAGE_SIZE); - } - } - while (1) { - size_t ret2; - - ret2 = ZSTD_endStream(stream, &out_buf); - if (ZSTD_isError(ret2)) { - pr_debug("BTRFS: ZSTD_endStream returned %d\n", - ZSTD_getErrorCode(ret2)); - ret = -EIO; - goto out; - } - if (ret2 == 0) { - tot_out += out_buf.pos; - break; - } - if (out_buf.pos >= max_out) { - tot_out += out_buf.pos; - ret = -E2BIG; - goto out; - } - - tot_out += PAGE_SIZE; - max_out -= PAGE_SIZE; - kunmap(out_page); - if (nr_pages == nr_dest_pages) { - out_page = NULL; - ret = -E2BIG; - goto out; - } - out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (out_page == NULL) { - ret = -ENOMEM; - goto out; - } - pages[nr_pages++] = out_page; - out_buf.dst = kmap(out_page); - out_buf.pos = 0; - out_buf.size = min_t(size_t, max_out, PAGE_SIZE); - } - - if (tot_out >= tot_in) { - ret = -E2BIG; - goto out; - } - - ret = 0; - *total_in = tot_in; - *total_out = tot_out; -out: - *out_pages = nr_pages; - /* Cleanup */ - if (in_page) { - kunmap(in_page); - put_page(in_page); - } - if (out_page) - kunmap(out_page); - return ret; -} - -static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) -{ - struct workspace *workspace = list_entry(ws, struct workspace, list); - struct page **pages_in = cb->compressed_pages; - u64 disk_start = cb->start; - struct bio *orig_bio = cb->orig_bio; - size_t srclen = cb->compressed_len; - ZSTD_DStream *stream; - int ret = 0; - unsigned long page_in_index = 0; - unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); - unsigned long buf_start; - unsigned long total_out = 0; - ZSTD_inBuffer in_buf = { NULL, 0, 0 }; - ZSTD_outBuffer out_buf = { NULL, 0, 0 }; - - stream = ZSTD_initDStream( - ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); - if (!stream) { - pr_debug("BTRFS: ZSTD_initDStream failed\n"); - ret = -EIO; - goto done; - } - - in_buf.src = kmap(pages_in[page_in_index]); - in_buf.pos = 0; - in_buf.size = min_t(size_t, srclen, PAGE_SIZE); - - out_buf.dst = workspace->buf; - out_buf.pos = 0; - out_buf.size = PAGE_SIZE; - - while (1) { - size_t ret2; - - ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf); - if (ZSTD_isError(ret2)) { - pr_debug("BTRFS: ZSTD_decompressStream returned %d\n", - ZSTD_getErrorCode(ret2)); - ret = -EIO; - goto done; - } - buf_start = total_out; - total_out += out_buf.pos; - out_buf.pos = 0; - - ret = btrfs_decompress_buf2page(out_buf.dst, buf_start, - total_out, disk_start, orig_bio); - if (ret == 0) - break; - - if (in_buf.pos >= srclen) - break; - - /* Check if we've hit the end of a frame */ - if (ret2 == 0) - break; - - if (in_buf.pos == in_buf.size) { - kunmap(pages_in[page_in_index++]); - if (page_in_index >= total_pages_in) { - in_buf.src = NULL; - ret = -EIO; - goto done; - } - srclen -= PAGE_SIZE; - in_buf.src = kmap(pages_in[page_in_index]); - in_buf.pos = 0; - in_buf.size = min_t(size_t, srclen, PAGE_SIZE); - } - } - ret = 0; - zero_fill_bio(orig_bio); -done: - if (in_buf.src) - kunmap(pages_in[page_in_index]); - return ret; -} - -static int zstd_decompress(struct list_head *ws, unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen) -{ - struct workspace *workspace = list_entry(ws, struct workspace, list); - ZSTD_DStream *stream; - int ret = 0; - size_t ret2; - ZSTD_inBuffer in_buf = { NULL, 0, 0 }; - ZSTD_outBuffer out_buf = { NULL, 0, 0 }; - unsigned long total_out = 0; - unsigned long pg_offset = 0; - char *kaddr; - - stream = ZSTD_initDStream( - ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); - if (!stream) { - pr_warn("BTRFS: ZSTD_initDStream failed\n"); - ret = -EIO; - goto finish; - } - - destlen = min_t(size_t, destlen, PAGE_SIZE); - - in_buf.src = data_in; - in_buf.pos = 0; - in_buf.size = srclen; - - out_buf.dst = workspace->buf; - out_buf.pos = 0; - out_buf.size = PAGE_SIZE; - - ret2 = 1; - while (pg_offset < destlen && in_buf.pos < in_buf.size) { - unsigned long buf_start; - unsigned long buf_offset; - unsigned long bytes; - - /* Check if the frame is over and we still need more input */ - if (ret2 == 0) { - pr_debug("BTRFS: ZSTD_decompressStream ended early\n"); - ret = -EIO; - goto finish; - } - ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf); - if (ZSTD_isError(ret2)) { - pr_debug("BTRFS: ZSTD_decompressStream returned %d\n", - ZSTD_getErrorCode(ret2)); - ret = -EIO; - goto finish; - } - - buf_start = total_out; - total_out += out_buf.pos; - out_buf.pos = 0; - - if (total_out <= start_byte) - continue; - - if (total_out > start_byte && buf_start < start_byte) - buf_offset = start_byte - buf_start; - else - buf_offset = 0; - - bytes = min_t(unsigned long, destlen - pg_offset, - out_buf.size - buf_offset); - - kaddr = kmap_atomic(dest_page); - memcpy(kaddr + pg_offset, out_buf.dst + buf_offset, bytes); - kunmap_atomic(kaddr); - - pg_offset += bytes; - } - ret = 0; -finish: - if (pg_offset < destlen) { - kaddr = kmap_atomic(dest_page); - memset(kaddr + pg_offset, 0, destlen - pg_offset); - kunmap_atomic(kaddr); - } - return ret; -} - -const struct btrfs_compress_op btrfs_zstd_compress = { - .alloc_workspace = zstd_alloc_workspace, - .free_workspace = zstd_free_workspace, - .compress_pages = zstd_compress_pages, - .decompress_bio = zstd_decompress_bio, - .decompress = zstd_decompress, -}; diff --git a/contrib/linux-kernel/fs/squashfs/zstd_wrapper.c b/contrib/linux-kernel/fs/squashfs/zstd_wrapper.c deleted file mode 100644 index eeaabf88115..00000000000 --- a/contrib/linux-kernel/fs/squashfs/zstd_wrapper.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Squashfs - a compressed read only filesystem for Linux - * - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2, - * or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * zstd_wrapper.c - */ - -#include -#include -#include -#include -#include - -#include "squashfs_fs.h" -#include "squashfs_fs_sb.h" -#include "squashfs.h" -#include "decompressor.h" -#include "page_actor.h" - -struct workspace { - void *mem; - size_t mem_size; - size_t window_size; -}; - -static void *zstd_init(struct squashfs_sb_info *msblk, void *buff) -{ - struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL); - - if (wksp == NULL) - goto failed; - wksp->window_size = max_t(size_t, - msblk->block_size, SQUASHFS_METADATA_SIZE); - wksp->mem_size = ZSTD_DStreamWorkspaceBound(wksp->window_size); - wksp->mem = vmalloc(wksp->mem_size); - if (wksp->mem == NULL) - goto failed; - - return wksp; - -failed: - ERROR("Failed to allocate zstd workspace\n"); - kfree(wksp); - return ERR_PTR(-ENOMEM); -} - - -static void zstd_free(void *strm) -{ - struct workspace *wksp = strm; - - if (wksp) - vfree(wksp->mem); - kfree(wksp); -} - - -static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm, - struct buffer_head **bh, int b, int offset, int length, - struct squashfs_page_actor *output) -{ - struct workspace *wksp = strm; - ZSTD_DStream *stream; - size_t total_out = 0; - size_t zstd_err; - int k = 0; - ZSTD_inBuffer in_buf = { NULL, 0, 0 }; - ZSTD_outBuffer out_buf = { NULL, 0, 0 }; - - stream = ZSTD_initDStream(wksp->window_size, wksp->mem, wksp->mem_size); - - if (!stream) { - ERROR("Failed to initialize zstd decompressor\n"); - goto out; - } - - out_buf.size = PAGE_SIZE; - out_buf.dst = squashfs_first_page(output); - - do { - if (in_buf.pos == in_buf.size && k < b) { - int avail = min(length, msblk->devblksize - offset); - - length -= avail; - in_buf.src = bh[k]->b_data + offset; - in_buf.size = avail; - in_buf.pos = 0; - offset = 0; - } - - if (out_buf.pos == out_buf.size) { - out_buf.dst = squashfs_next_page(output); - if (out_buf.dst == NULL) { - /* Shouldn't run out of pages - * before stream is done. - */ - squashfs_finish_page(output); - goto out; - } - out_buf.pos = 0; - out_buf.size = PAGE_SIZE; - } - - total_out -= out_buf.pos; - zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf); - total_out += out_buf.pos; /* add the additional data produced */ - - if (in_buf.pos == in_buf.size && k < b) - put_bh(bh[k++]); - } while (zstd_err != 0 && !ZSTD_isError(zstd_err)); - - squashfs_finish_page(output); - - if (ZSTD_isError(zstd_err)) { - ERROR("zstd decompression error: %d\n", - (int)ZSTD_getErrorCode(zstd_err)); - goto out; - } - - if (k < b) - goto out; - - return (int)total_out; - -out: - for (; k < b; k++) - put_bh(bh[k]); - - return -EIO; -} - -const struct squashfs_decompressor squashfs_zstd_comp_ops = { - .init = zstd_init, - .free = zstd_free, - .decompress = zstd_uncompress, - .id = ZSTD_COMPRESSION, - .name = "zstd", - .supported = 1 -}; diff --git a/contrib/linux-kernel/include/linux/xxhash.h b/contrib/linux-kernel/include/linux/xxhash.h deleted file mode 100644 index 9e1f42cb57e..00000000000 --- a/contrib/linux-kernel/include/linux/xxhash.h +++ /dev/null @@ -1,236 +0,0 @@ -/* - * xxHash - Extremely Fast Hash algorithm - * Copyright (C) 2012-2016, Yann Collet. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - * - * You can contact the author at: - * - xxHash homepage: http://cyan4973.github.io/xxHash/ - * - xxHash source repository: https://github.com/Cyan4973/xxHash - */ - -/* - * Notice extracted from xxHash homepage: - * - * xxHash is an extremely fast Hash algorithm, running at RAM speed limits. - * It also successfully passes all tests from the SMHasher suite. - * - * Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 - * Duo @3GHz) - * - * Name Speed Q.Score Author - * xxHash 5.4 GB/s 10 - * CrapWow 3.2 GB/s 2 Andrew - * MumurHash 3a 2.7 GB/s 10 Austin Appleby - * SpookyHash 2.0 GB/s 10 Bob Jenkins - * SBox 1.4 GB/s 9 Bret Mulvey - * Lookup3 1.2 GB/s 9 Bob Jenkins - * SuperFastHash 1.2 GB/s 1 Paul Hsieh - * CityHash64 1.05 GB/s 10 Pike & Alakuijala - * FNV 0.55 GB/s 5 Fowler, Noll, Vo - * CRC32 0.43 GB/s 9 - * MD5-32 0.33 GB/s 10 Ronald L. Rivest - * SHA1-32 0.28 GB/s 10 - * - * Q.Score is a measure of quality of the hash function. - * It depends on successfully passing SMHasher test set. - * 10 is a perfect score. - * - * A 64-bits version, named xxh64 offers much better speed, - * but for 64-bits applications only. - * Name Speed on 64 bits Speed on 32 bits - * xxh64 13.8 GB/s 1.9 GB/s - * xxh32 6.8 GB/s 6.0 GB/s - */ - -#ifndef XXHASH_H -#define XXHASH_H - -#include - -/*-**************************** - * Simple Hash Functions - *****************************/ - -/** - * xxh32() - calculate the 32-bit hash of the input with a given seed. - * - * @input: The data to hash. - * @length: The length of the data to hash. - * @seed: The seed can be used to alter the result predictably. - * - * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s - * - * Return: The 32-bit hash of the data. - */ -uint32_t xxh32(const void *input, size_t length, uint32_t seed); - -/** - * xxh64() - calculate the 64-bit hash of the input with a given seed. - * - * @input: The data to hash. - * @length: The length of the data to hash. - * @seed: The seed can be used to alter the result predictably. - * - * This function runs 2x faster on 64-bit systems, but slower on 32-bit systems. - * - * Return: The 64-bit hash of the data. - */ -uint64_t xxh64(const void *input, size_t length, uint64_t seed); - -/*-**************************** - * Streaming Hash Functions - *****************************/ - -/* - * These definitions are only meant to allow allocation of XXH state - * statically, on stack, or in a struct for example. - * Do not use members directly. - */ - -/** - * struct xxh32_state - private xxh32 state, do not use members directly - */ -struct xxh32_state { - uint32_t total_len_32; - uint32_t large_len; - uint32_t v1; - uint32_t v2; - uint32_t v3; - uint32_t v4; - uint32_t mem32[4]; - uint32_t memsize; -}; - -/** - * struct xxh32_state - private xxh64 state, do not use members directly - */ -struct xxh64_state { - uint64_t total_len; - uint64_t v1; - uint64_t v2; - uint64_t v3; - uint64_t v4; - uint64_t mem64[4]; - uint32_t memsize; -}; - -/** - * xxh32_reset() - reset the xxh32 state to start a new hashing operation - * - * @state: The xxh32 state to reset. - * @seed: Initialize the hash state with this seed. - * - * Call this function on any xxh32_state to prepare for a new hashing operation. - */ -void xxh32_reset(struct xxh32_state *state, uint32_t seed); - -/** - * xxh32_update() - hash the data given and update the xxh32 state - * - * @state: The xxh32 state to update. - * @input: The data to hash. - * @length: The length of the data to hash. - * - * After calling xxh32_reset() call xxh32_update() as many times as necessary. - * - * Return: Zero on success, otherwise an error code. - */ -int xxh32_update(struct xxh32_state *state, const void *input, size_t length); - -/** - * xxh32_digest() - produce the current xxh32 hash - * - * @state: Produce the current xxh32 hash of this state. - * - * A hash value can be produced at any time. It is still possible to continue - * inserting input into the hash state after a call to xxh32_digest(), and - * generate new hashes later on, by calling xxh32_digest() again. - * - * Return: The xxh32 hash stored in the state. - */ -uint32_t xxh32_digest(const struct xxh32_state *state); - -/** - * xxh64_reset() - reset the xxh64 state to start a new hashing operation - * - * @state: The xxh64 state to reset. - * @seed: Initialize the hash state with this seed. - */ -void xxh64_reset(struct xxh64_state *state, uint64_t seed); - -/** - * xxh64_update() - hash the data given and update the xxh64 state - * @state: The xxh64 state to update. - * @input: The data to hash. - * @length: The length of the data to hash. - * - * After calling xxh64_reset() call xxh64_update() as many times as necessary. - * - * Return: Zero on success, otherwise an error code. - */ -int xxh64_update(struct xxh64_state *state, const void *input, size_t length); - -/** - * xxh64_digest() - produce the current xxh64 hash - * - * @state: Produce the current xxh64 hash of this state. - * - * A hash value can be produced at any time. It is still possible to continue - * inserting input into the hash state after a call to xxh64_digest(), and - * generate new hashes later on, by calling xxh64_digest() again. - * - * Return: The xxh64 hash stored in the state. - */ -uint64_t xxh64_digest(const struct xxh64_state *state); - -/*-************************** - * Utils - ***************************/ - -/** - * xxh32_copy_state() - copy the source state into the destination state - * - * @src: The source xxh32 state. - * @dst: The destination xxh32 state. - */ -void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src); - -/** - * xxh64_copy_state() - copy the source state into the destination state - * - * @src: The source xxh64 state. - * @dst: The destination xxh64 state. - */ -void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src); - -#endif /* XXHASH_H */ diff --git a/contrib/linux-kernel/include/linux/zstd.h b/contrib/linux-kernel/include/linux/zstd.h deleted file mode 100644 index 305efd0934e..00000000000 --- a/contrib/linux-kernel/include/linux/zstd.h +++ /dev/null @@ -1,1155 +0,0 @@ -/* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of https://github.com/facebook/zstd. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - */ - -#ifndef ZSTD_H -#define ZSTD_H - -/* ====== Dependency ======*/ -#include /* size_t */ - - -/*-***************************************************************************** - * Introduction - * - * zstd, short for Zstandard, is a fast lossless compression algorithm, - * targeting real-time compression scenarios at zlib-level and better - * compression ratios. The zstd compression library provides in-memory - * compression and decompression functions. The library supports compression - * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled - * ultra, should be used with caution, as they require more memory. - * Compression can be done in: - * - a single step, reusing a context (described as Explicit memory management) - * - unbounded multiple steps (described as Streaming compression) - * The compression ratio achievable on small data can be highly improved using - * compression with a dictionary in: - * - a single step (described as Simple dictionary API) - * - a single step, reusing a dictionary (described as Fast dictionary API) - ******************************************************************************/ - -/*====== Helper functions ======*/ - -/** - * enum ZSTD_ErrorCode - zstd error codes - * - * Functions that return size_t can be checked for errors using ZSTD_isError() - * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode(). - */ -typedef enum { - ZSTD_error_no_error, - ZSTD_error_GENERIC, - ZSTD_error_prefix_unknown, - ZSTD_error_version_unsupported, - ZSTD_error_parameter_unknown, - ZSTD_error_frameParameter_unsupported, - ZSTD_error_frameParameter_unsupportedBy32bits, - ZSTD_error_frameParameter_windowTooLarge, - ZSTD_error_compressionParameter_unsupported, - ZSTD_error_init_missing, - ZSTD_error_memory_allocation, - ZSTD_error_stage_wrong, - ZSTD_error_dstSize_tooSmall, - ZSTD_error_srcSize_wrong, - ZSTD_error_corruption_detected, - ZSTD_error_checksum_wrong, - ZSTD_error_tableLog_tooLarge, - ZSTD_error_maxSymbolValue_tooLarge, - ZSTD_error_maxSymbolValue_tooSmall, - ZSTD_error_dictionary_corrupted, - ZSTD_error_dictionary_wrong, - ZSTD_error_dictionaryCreation_failed, - ZSTD_error_maxCode -} ZSTD_ErrorCode; - -/** - * ZSTD_maxCLevel() - maximum compression level available - * - * Return: Maximum compression level available. - */ -int ZSTD_maxCLevel(void); -/** - * ZSTD_compressBound() - maximum compressed size in worst case scenario - * @srcSize: The size of the data to compress. - * - * Return: The maximum compressed size in the worst case scenario. - */ -size_t ZSTD_compressBound(size_t srcSize); -/** - * ZSTD_isError() - tells if a size_t function result is an error code - * @code: The function result to check for error. - * - * Return: Non-zero iff the code is an error. - */ -static __attribute__((unused)) unsigned int ZSTD_isError(size_t code) -{ - return code > (size_t)-ZSTD_error_maxCode; -} -/** - * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode - * @functionResult: The result of a function for which ZSTD_isError() is true. - * - * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0 - * if the functionResult isn't an error. - */ -static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode( - size_t functionResult) -{ - if (!ZSTD_isError(functionResult)) - return (ZSTD_ErrorCode)0; - return (ZSTD_ErrorCode)(0 - functionResult); -} - -/** - * enum ZSTD_strategy - zstd compression search strategy - * - * From faster to stronger. - */ -typedef enum { - ZSTD_fast, - ZSTD_dfast, - ZSTD_greedy, - ZSTD_lazy, - ZSTD_lazy2, - ZSTD_btlazy2, - ZSTD_btopt, - ZSTD_btopt2 -} ZSTD_strategy; - -/** - * struct ZSTD_compressionParameters - zstd compression parameters - * @windowLog: Log of the largest match distance. Larger means more - * compression, and more memory needed during decompression. - * @chainLog: Fully searched segment. Larger means more compression, slower, - * and more memory (useless for fast). - * @hashLog: Dispatch table. Larger means more compression, - * slower, and more memory. - * @searchLog: Number of searches. Larger means more compression and slower. - * @searchLength: Match length searched. Larger means faster decompression, - * sometimes less compression. - * @targetLength: Acceptable match size for optimal parser (only). Larger means - * more compression, and slower. - * @strategy: The zstd compression strategy. - */ -typedef struct { - unsigned int windowLog; - unsigned int chainLog; - unsigned int hashLog; - unsigned int searchLog; - unsigned int searchLength; - unsigned int targetLength; - ZSTD_strategy strategy; -} ZSTD_compressionParameters; - -/** - * struct ZSTD_frameParameters - zstd frame parameters - * @contentSizeFlag: Controls whether content size will be present in the frame - * header (when known). - * @checksumFlag: Controls whether a 32-bit checksum is generated at the end - * of the frame for error detection. - * @noDictIDFlag: Controls whether dictID will be saved into the frame header - * when using dictionary compression. - * - * The default value is all fields set to 0. - */ -typedef struct { - unsigned int contentSizeFlag; - unsigned int checksumFlag; - unsigned int noDictIDFlag; -} ZSTD_frameParameters; - -/** - * struct ZSTD_parameters - zstd parameters - * @cParams: The compression parameters. - * @fParams: The frame parameters. - */ -typedef struct { - ZSTD_compressionParameters cParams; - ZSTD_frameParameters fParams; -} ZSTD_parameters; - -/** - * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level - * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). - * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. - * @dictSize: The dictionary size or 0 if a dictionary isn't being used. - * - * Return: The selected ZSTD_compressionParameters. - */ -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, - unsigned long long estimatedSrcSize, size_t dictSize); - -/** - * ZSTD_getParams() - returns ZSTD_parameters for selected level - * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). - * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. - * @dictSize: The dictionary size or 0 if a dictionary isn't being used. - * - * The same as ZSTD_getCParams() except also selects the default frame - * parameters (all zero). - * - * Return: The selected ZSTD_parameters. - */ -ZSTD_parameters ZSTD_getParams(int compressionLevel, - unsigned long long estimatedSrcSize, size_t dictSize); - -/*-************************************* - * Explicit memory management - **************************************/ - -/** - * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx - * @cParams: The compression parameters to be used for compression. - * - * If multiple compression parameters might be used, the caller must call - * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum - * size. - * - * Return: A lower bound on the size of the workspace that is passed to - * ZSTD_initCCtx(). - */ -size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams); - -/** - * struct ZSTD_CCtx - the zstd compression context - * - * When compressing many times it is recommended to allocate a context just once - * and reuse it for each successive compression operation. - */ -typedef struct ZSTD_CCtx_s ZSTD_CCtx; -/** - * ZSTD_initCCtx() - initialize a zstd compression context - * @workspace: The workspace to emplace the context into. It must outlive - * the returned context. - * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to - * determine how large the workspace must be. - * - * Return: A compression context emplaced into workspace. - */ -ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize); - -/** - * ZSTD_compressCCtx() - compress src into dst - * @ctx: The context. Must have been initialized with a workspace at - * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). - * @dst: The buffer to compress src into. - * @dstCapacity: The size of the destination buffer. May be any size, but - * ZSTD_compressBound(srcSize) is guaranteed to be large enough. - * @src: The data to compress. - * @srcSize: The size of the data to compress. - * @params: The parameters to use for compression. See ZSTD_getParams(). - * - * Return: The compressed size or an error, which can be checked using - * ZSTD_isError(). - */ -size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, - const void *src, size_t srcSize, ZSTD_parameters params); - -/** - * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx - * - * Return: A lower bound on the size of the workspace that is passed to - * ZSTD_initDCtx(). - */ -size_t ZSTD_DCtxWorkspaceBound(void); - -/** - * struct ZSTD_DCtx - the zstd decompression context - * - * When decompressing many times it is recommended to allocate a context just - * once and reuse it for each successive decompression operation. - */ -typedef struct ZSTD_DCtx_s ZSTD_DCtx; -/** - * ZSTD_initDCtx() - initialize a zstd decompression context - * @workspace: The workspace to emplace the context into. It must outlive - * the returned context. - * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to - * determine how large the workspace must be. - * - * Return: A decompression context emplaced into workspace. - */ -ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize); - -/** - * ZSTD_decompressDCtx() - decompress zstd compressed src into dst - * @ctx: The decompression context. - * @dst: The buffer to decompress src into. - * @dstCapacity: The size of the destination buffer. Must be at least as large - * as the decompressed size. If the caller cannot upper bound the - * decompressed size, then it's better to use the streaming API. - * @src: The zstd compressed data to decompress. Multiple concatenated - * frames and skippable frames are allowed. - * @srcSize: The exact size of the data to decompress. - * - * Return: The decompressed size or an error, which can be checked using - * ZSTD_isError(). - */ -size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, - const void *src, size_t srcSize); - -/*-************************ - * Simple dictionary API - **************************/ - -/** - * ZSTD_compress_usingDict() - compress src into dst using a dictionary - * @ctx: The context. Must have been initialized with a workspace at - * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). - * @dst: The buffer to compress src into. - * @dstCapacity: The size of the destination buffer. May be any size, but - * ZSTD_compressBound(srcSize) is guaranteed to be large enough. - * @src: The data to compress. - * @srcSize: The size of the data to compress. - * @dict: The dictionary to use for compression. - * @dictSize: The size of the dictionary. - * @params: The parameters to use for compression. See ZSTD_getParams(). - * - * Compression using a predefined dictionary. The same dictionary must be used - * during decompression. - * - * Return: The compressed size or an error, which can be checked using - * ZSTD_isError(). - */ -size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, - const void *src, size_t srcSize, const void *dict, size_t dictSize, - ZSTD_parameters params); - -/** - * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary - * @ctx: The decompression context. - * @dst: The buffer to decompress src into. - * @dstCapacity: The size of the destination buffer. Must be at least as large - * as the decompressed size. If the caller cannot upper bound the - * decompressed size, then it's better to use the streaming API. - * @src: The zstd compressed data to decompress. Multiple concatenated - * frames and skippable frames are allowed. - * @srcSize: The exact size of the data to decompress. - * @dict: The dictionary to use for decompression. The same dictionary - * must've been used to compress the data. - * @dictSize: The size of the dictionary. - * - * Return: The decompressed size or an error, which can be checked using - * ZSTD_isError(). - */ -size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, - const void *src, size_t srcSize, const void *dict, size_t dictSize); - -/*-************************** - * Fast dictionary API - ***************************/ - -/** - * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict - * @cParams: The compression parameters to be used for compression. - * - * Return: A lower bound on the size of the workspace that is passed to - * ZSTD_initCDict(). - */ -size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams); - -/** - * struct ZSTD_CDict - a digested dictionary to be used for compression - */ -typedef struct ZSTD_CDict_s ZSTD_CDict; - -/** - * ZSTD_initCDict() - initialize a digested dictionary for compression - * @dictBuffer: The dictionary to digest. The buffer is referenced by the - * ZSTD_CDict so it must outlive the returned ZSTD_CDict. - * @dictSize: The size of the dictionary. - * @params: The parameters to use for compression. See ZSTD_getParams(). - * @workspace: The workspace. It must outlive the returned ZSTD_CDict. - * @workspaceSize: The workspace size. Must be at least - * ZSTD_CDictWorkspaceBound(params.cParams). - * - * When compressing multiple messages / blocks with the same dictionary it is - * recommended to load it just once. The ZSTD_CDict merely references the - * dictBuffer, so it must outlive the returned ZSTD_CDict. - * - * Return: The digested dictionary emplaced into workspace. - */ -ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize, - ZSTD_parameters params, void *workspace, size_t workspaceSize); - -/** - * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict - * @ctx: The context. Must have been initialized with a workspace at - * least as large as ZSTD_CCtxWorkspaceBound(cParams) where - * cParams are the compression parameters used to initialize the - * cdict. - * @dst: The buffer to compress src into. - * @dstCapacity: The size of the destination buffer. May be any size, but - * ZSTD_compressBound(srcSize) is guaranteed to be large enough. - * @src: The data to compress. - * @srcSize: The size of the data to compress. - * @cdict: The digested dictionary to use for compression. - * @params: The parameters to use for compression. See ZSTD_getParams(). - * - * Compression using a digested dictionary. The same dictionary must be used - * during decompression. - * - * Return: The compressed size or an error, which can be checked using - * ZSTD_isError(). - */ -size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, - const void *src, size_t srcSize, const ZSTD_CDict *cdict); - - -/** - * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict - * - * Return: A lower bound on the size of the workspace that is passed to - * ZSTD_initDDict(). - */ -size_t ZSTD_DDictWorkspaceBound(void); - -/** - * struct ZSTD_DDict - a digested dictionary to be used for decompression - */ -typedef struct ZSTD_DDict_s ZSTD_DDict; - -/** - * ZSTD_initDDict() - initialize a digested dictionary for decompression - * @dictBuffer: The dictionary to digest. The buffer is referenced by the - * ZSTD_DDict so it must outlive the returned ZSTD_DDict. - * @dictSize: The size of the dictionary. - * @workspace: The workspace. It must outlive the returned ZSTD_DDict. - * @workspaceSize: The workspace size. Must be at least - * ZSTD_DDictWorkspaceBound(). - * - * When decompressing multiple messages / blocks with the same dictionary it is - * recommended to load it just once. The ZSTD_DDict merely references the - * dictBuffer, so it must outlive the returned ZSTD_DDict. - * - * Return: The digested dictionary emplaced into workspace. - */ -ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize, - void *workspace, size_t workspaceSize); - -/** - * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict - * @ctx: The decompression context. - * @dst: The buffer to decompress src into. - * @dstCapacity: The size of the destination buffer. Must be at least as large - * as the decompressed size. If the caller cannot upper bound the - * decompressed size, then it's better to use the streaming API. - * @src: The zstd compressed data to decompress. Multiple concatenated - * frames and skippable frames are allowed. - * @srcSize: The exact size of the data to decompress. - * @ddict: The digested dictionary to use for decompression. The same - * dictionary must've been used to compress the data. - * - * Return: The decompressed size or an error, which can be checked using - * ZSTD_isError(). - */ -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, - size_t dstCapacity, const void *src, size_t srcSize, - const ZSTD_DDict *ddict); - - -/*-************************** - * Streaming - ***************************/ - -/** - * struct ZSTD_inBuffer - input buffer for streaming - * @src: Start of the input buffer. - * @size: Size of the input buffer. - * @pos: Position where reading stopped. Will be updated. - * Necessarily 0 <= pos <= size. - */ -typedef struct ZSTD_inBuffer_s { - const void *src; - size_t size; - size_t pos; -} ZSTD_inBuffer; - -/** - * struct ZSTD_outBuffer - output buffer for streaming - * @dst: Start of the output buffer. - * @size: Size of the output buffer. - * @pos: Position where writing stopped. Will be updated. - * Necessarily 0 <= pos <= size. - */ -typedef struct ZSTD_outBuffer_s { - void *dst; - size_t size; - size_t pos; -} ZSTD_outBuffer; - - - -/*-***************************************************************************** - * Streaming compression - HowTo - * - * A ZSTD_CStream object is required to track streaming operation. - * Use ZSTD_initCStream() to initialize a ZSTD_CStream object. - * ZSTD_CStream objects can be reused multiple times on consecutive compression - * operations. It is recommended to re-use ZSTD_CStream in situations where many - * streaming operations will be achieved consecutively. Use one separate - * ZSTD_CStream per thread for parallel execution. - * - * Use ZSTD_compressStream() repetitively to consume input stream. - * The function will automatically update both `pos` fields. - * Note that it may not consume the entire input, in which case `pos < size`, - * and it's up to the caller to present again remaining data. - * It returns a hint for the preferred number of bytes to use as an input for - * the next function call. - * - * At any moment, it's possible to flush whatever data remains within internal - * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might - * still be some content left within the internal buffer if `output->size` is - * too small. It returns the number of bytes left in the internal buffer and - * must be called until it returns 0. - * - * ZSTD_endStream() instructs to finish a frame. It will perform a flush and - * write frame epilogue. The epilogue is required for decoders to consider a - * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush - * the full content if `output->size` is too small. In which case, call again - * ZSTD_endStream() to complete the flush. It returns the number of bytes left - * in the internal buffer and must be called until it returns 0. - ******************************************************************************/ - -/** - * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream - * @cParams: The compression parameters to be used for compression. - * - * Return: A lower bound on the size of the workspace that is passed to - * ZSTD_initCStream() and ZSTD_initCStream_usingCDict(). - */ -size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams); - -/** - * struct ZSTD_CStream - the zstd streaming compression context - */ -typedef struct ZSTD_CStream_s ZSTD_CStream; - -/*===== ZSTD_CStream management functions =====*/ -/** - * ZSTD_initCStream() - initialize a zstd streaming compression context - * @params: The zstd compression parameters. - * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must - * pass the source size (zero means empty source). Otherwise, - * the caller may optionally pass the source size, or zero if - * unknown. - * @workspace: The workspace to emplace the context into. It must outlive - * the returned context. - * @workspaceSize: The size of workspace. - * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine - * how large the workspace must be. - * - * Return: The zstd streaming compression context. - */ -ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, - unsigned long long pledgedSrcSize, void *workspace, - size_t workspaceSize); - -/** - * ZSTD_initCStream_usingCDict() - initialize a streaming compression context - * @cdict: The digested dictionary to use for compression. - * @pledgedSrcSize: Optionally the source size, or zero if unknown. - * @workspace: The workspace to emplace the context into. It must outlive - * the returned context. - * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound() - * with the cParams used to initialize the cdict to determine - * how large the workspace must be. - * - * Return: The zstd streaming compression context. - */ -ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, - unsigned long long pledgedSrcSize, void *workspace, - size_t workspaceSize); - -/*===== Streaming compression functions =====*/ -/** - * ZSTD_resetCStream() - reset the context using parameters from creation - * @zcs: The zstd streaming compression context to reset. - * @pledgedSrcSize: Optionally the source size, or zero if unknown. - * - * Resets the context using the parameters from creation. Skips dictionary - * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame - * content size is always written into the frame header. - * - * Return: Zero or an error, which can be checked using ZSTD_isError(). - */ -size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize); -/** - * ZSTD_compressStream() - streaming compress some of input into output - * @zcs: The zstd streaming compression context. - * @output: Destination buffer. `output->pos` is updated to indicate how much - * compressed data was written. - * @input: Source buffer. `input->pos` is updated to indicate how much data was - * read. Note that it may not consume the entire input, in which case - * `input->pos < input->size`, and it's up to the caller to present - * remaining data again. - * - * The `input` and `output` buffers may be any size. Guaranteed to make some - * forward progress if `input` and `output` are not empty. - * - * Return: A hint for the number of bytes to use as the input for the next - * function call or an error, which can be checked using - * ZSTD_isError(). - */ -size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, - ZSTD_inBuffer *input); -/** - * ZSTD_flushStream() - flush internal buffers into output - * @zcs: The zstd streaming compression context. - * @output: Destination buffer. `output->pos` is updated to indicate how much - * compressed data was written. - * - * ZSTD_flushStream() must be called until it returns 0, meaning all the data - * has been flushed. Since ZSTD_flushStream() causes a block to be ended, - * calling it too often will degrade the compression ratio. - * - * Return: The number of bytes still present within internal buffers or an - * error, which can be checked using ZSTD_isError(). - */ -size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); -/** - * ZSTD_endStream() - flush internal buffers into output and end the frame - * @zcs: The zstd streaming compression context. - * @output: Destination buffer. `output->pos` is updated to indicate how much - * compressed data was written. - * - * ZSTD_endStream() must be called until it returns 0, meaning all the data has - * been flushed and the frame epilogue has been written. - * - * Return: The number of bytes still present within internal buffers or an - * error, which can be checked using ZSTD_isError(). - */ -size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); - -/** - * ZSTD_CStreamInSize() - recommended size for the input buffer - * - * Return: The recommended size for the input buffer. - */ -size_t ZSTD_CStreamInSize(void); -/** - * ZSTD_CStreamOutSize() - recommended size for the output buffer - * - * When the output buffer is at least this large, it is guaranteed to be large - * enough to flush at least one complete compressed block. - * - * Return: The recommended size for the output buffer. - */ -size_t ZSTD_CStreamOutSize(void); - - - -/*-***************************************************************************** - * Streaming decompression - HowTo - * - * A ZSTD_DStream object is required to track streaming operations. - * Use ZSTD_initDStream() to initialize a ZSTD_DStream object. - * ZSTD_DStream objects can be re-used multiple times. - * - * Use ZSTD_decompressStream() repetitively to consume your input. - * The function will update both `pos` fields. - * If `input->pos < input->size`, some input has not been consumed. - * It's up to the caller to present again remaining data. - * If `output->pos < output->size`, decoder has flushed everything it could. - * Returns 0 iff a frame is completely decoded and fully flushed. - * Otherwise it returns a suggested next input size that will never load more - * than the current frame. - ******************************************************************************/ - -/** - * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream - * @maxWindowSize: The maximum window size allowed for compressed frames. - * - * Return: A lower bound on the size of the workspace that is passed to - * ZSTD_initDStream() and ZSTD_initDStream_usingDDict(). - */ -size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize); - -/** - * struct ZSTD_DStream - the zstd streaming decompression context - */ -typedef struct ZSTD_DStream_s ZSTD_DStream; -/*===== ZSTD_DStream management functions =====*/ -/** - * ZSTD_initDStream() - initialize a zstd streaming decompression context - * @maxWindowSize: The maximum window size allowed for compressed frames. - * @workspace: The workspace to emplace the context into. It must outlive - * the returned context. - * @workspaceSize: The size of workspace. - * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine - * how large the workspace must be. - * - * Return: The zstd streaming decompression context. - */ -ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, - size_t workspaceSize); -/** - * ZSTD_initDStream_usingDDict() - initialize streaming decompression context - * @maxWindowSize: The maximum window size allowed for compressed frames. - * @ddict: The digested dictionary to use for decompression. - * @workspace: The workspace to emplace the context into. It must outlive - * the returned context. - * @workspaceSize: The size of workspace. - * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine - * how large the workspace must be. - * - * Return: The zstd streaming decompression context. - */ -ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, - const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize); - -/*===== Streaming decompression functions =====*/ -/** - * ZSTD_resetDStream() - reset the context using parameters from creation - * @zds: The zstd streaming decompression context to reset. - * - * Resets the context using the parameters from creation. Skips dictionary - * loading, since it can be reused. - * - * Return: Zero or an error, which can be checked using ZSTD_isError(). - */ -size_t ZSTD_resetDStream(ZSTD_DStream *zds); -/** - * ZSTD_decompressStream() - streaming decompress some of input into output - * @zds: The zstd streaming decompression context. - * @output: Destination buffer. `output.pos` is updated to indicate how much - * decompressed data was written. - * @input: Source buffer. `input.pos` is updated to indicate how much data was - * read. Note that it may not consume the entire input, in which case - * `input.pos < input.size`, and it's up to the caller to present - * remaining data again. - * - * The `input` and `output` buffers may be any size. Guaranteed to make some - * forward progress if `input` and `output` are not empty. - * ZSTD_decompressStream() will not consume the last byte of the frame until - * the entire frame is flushed. - * - * Return: Returns 0 iff a frame is completely decoded and fully flushed. - * Otherwise returns a hint for the number of bytes to use as the input - * for the next function call or an error, which can be checked using - * ZSTD_isError(). The size hint will never load more than the frame. - */ -size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, - ZSTD_inBuffer *input); - -/** - * ZSTD_DStreamInSize() - recommended size for the input buffer - * - * Return: The recommended size for the input buffer. - */ -size_t ZSTD_DStreamInSize(void); -/** - * ZSTD_DStreamOutSize() - recommended size for the output buffer - * - * When the output buffer is at least this large, it is guaranteed to be large - * enough to flush at least one complete decompressed block. - * - * Return: The recommended size for the output buffer. - */ -size_t ZSTD_DStreamOutSize(void); - - -/* --- Constants ---*/ -#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */ -#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U - -#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) -#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) - -#define ZSTD_WINDOWLOG_MAX_32 27 -#define ZSTD_WINDOWLOG_MAX_64 27 -#define ZSTD_WINDOWLOG_MAX \ - ((unsigned int)(sizeof(size_t) == 4 \ - ? ZSTD_WINDOWLOG_MAX_32 \ - : ZSTD_WINDOWLOG_MAX_64)) -#define ZSTD_WINDOWLOG_MIN 10 -#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX -#define ZSTD_HASHLOG_MIN 6 -#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1) -#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN -#define ZSTD_HASHLOG3_MAX 17 -#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) -#define ZSTD_SEARCHLOG_MIN 1 -/* only for ZSTD_fast, other strategies are limited to 6 */ -#define ZSTD_SEARCHLENGTH_MAX 7 -/* only for ZSTD_btopt, other strategies are limited to 4 */ -#define ZSTD_SEARCHLENGTH_MIN 3 -#define ZSTD_TARGETLENGTH_MIN 4 -#define ZSTD_TARGETLENGTH_MAX 999 - -/* for static allocation */ -#define ZSTD_FRAMEHEADERSIZE_MAX 18 -#define ZSTD_FRAMEHEADERSIZE_MIN 6 -static const size_t ZSTD_frameHeaderSize_prefix = 5; -static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; -static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; -/* magic number + skippable frame length */ -static const size_t ZSTD_skippableHeaderSize = 8; - - -/*-************************************* - * Compressed size functions - **************************************/ - -/** - * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame - * @src: Source buffer. It should point to the start of a zstd encoded frame - * or a skippable frame. - * @srcSize: The size of the source buffer. It must be at least as large as the - * size of the frame. - * - * Return: The compressed size of the frame pointed to by `src` or an error, - * which can be check with ZSTD_isError(). - * Suitable to pass to ZSTD_decompress() or similar functions. - */ -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize); - -/*-************************************* - * Decompressed size functions - **************************************/ -/** - * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header - * @src: It should point to the start of a zstd encoded frame. - * @srcSize: The size of the source buffer. It must be at least as large as the - * frame header. `ZSTD_frameHeaderSize_max` is always large enough. - * - * Return: The frame content size stored in the frame header if known. - * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the - * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input. - */ -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); - -/** - * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames - * @src: It should point to the start of a series of zstd encoded and/or - * skippable frames. - * @srcSize: The exact size of the series of frames. - * - * If any zstd encoded frame in the series doesn't have the frame content size - * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always - * set when using ZSTD_compress(). The decompressed size can be very large. - * If the source is untrusted, the decompressed size could be wrong or - * intentionally modified. Always ensure the result fits within the - * application's authorized limits. ZSTD_findDecompressedSize() handles multiple - * frames, and so it must traverse the input to read each frame header. This is - * efficient as most of the data is skipped, however it does mean that all frame - * data must be present and valid. - * - * Return: Decompressed size of all the data contained in the frames if known. - * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown. - * `ZSTD_CONTENTSIZE_ERROR` if an error occurred. - */ -unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize); - -/*-************************************* - * Advanced compression functions - **************************************/ -/** - * ZSTD_checkCParams() - ensure parameter values remain within authorized range - * @cParams: The zstd compression parameters. - * - * Return: Zero or an error, which can be checked using ZSTD_isError(). - */ -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams); - -/** - * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize - * @srcSize: Optionally the estimated source size, or zero if unknown. - * @dictSize: Optionally the estimated dictionary size, or zero if unknown. - * - * Return: The optimized parameters. - */ -ZSTD_compressionParameters ZSTD_adjustCParams( - ZSTD_compressionParameters cParams, unsigned long long srcSize, - size_t dictSize); - -/*--- Advanced decompression functions ---*/ - -/** - * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame - * @buffer: The source buffer to check. - * @size: The size of the source buffer, must be at least 4 bytes. - * - * Return: True iff the buffer starts with a zstd or skippable frame identifier. - */ -unsigned int ZSTD_isFrame(const void *buffer, size_t size); - -/** - * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary - * @dict: The dictionary buffer. - * @dictSize: The size of the dictionary buffer. - * - * Return: The dictionary id stored within the dictionary or 0 if the - * dictionary is not a zstd dictionary. If it returns 0 the - * dictionary can still be loaded as a content-only dictionary. - */ -unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize); - -/** - * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict - * @ddict: The ddict to find the id of. - * - * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not - * a zstd dictionary. If it returns 0 `ddict` will be loaded as a - * content-only dictionary. - */ -unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict); - -/** - * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame - * @src: Source buffer. It must be a zstd encoded frame. - * @srcSize: The size of the source buffer. It must be at least as large as the - * frame header. `ZSTD_frameHeaderSize_max` is always large enough. - * - * Return: The dictionary id required to decompress the frame stored within - * `src` or 0 if the dictionary id could not be decoded. It can return - * 0 if the frame does not require a dictionary, the dictionary id - * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize` - * is too small. - */ -unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize); - -/** - * struct ZSTD_frameParams - zstd frame parameters stored in the frame header - * @frameContentSize: The frame content size, or 0 if not present. - * @windowSize: The window size, or 0 if the frame is a skippable frame. - * @dictID: The dictionary id, or 0 if not present. - * @checksumFlag: Whether a checksum was used. - */ -typedef struct { - unsigned long long frameContentSize; - unsigned int windowSize; - unsigned int dictID; - unsigned int checksumFlag; -} ZSTD_frameParams; - -/** - * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame - * @fparamsPtr: On success the frame parameters are written here. - * @src: The source buffer. It must point to a zstd or skippable frame. - * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is - * always large enough to succeed. - * - * Return: 0 on success. If more data is required it returns how many bytes - * must be provided to make forward progress. Otherwise it returns - * an error, which can be checked using ZSTD_isError(). - */ -size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, - size_t srcSize); - -/*-***************************************************************************** - * Buffer-less and synchronous inner streaming functions - * - * This is an advanced API, giving full control over buffer management, for - * users which need direct control over memory. - * But it's also a complex one, with many restrictions (documented below). - * Prefer using normal streaming API for an easier experience - ******************************************************************************/ - -/*-***************************************************************************** - * Buffer-less streaming compression (synchronous mode) - * - * A ZSTD_CCtx object is required to track streaming operations. - * Use ZSTD_initCCtx() to initialize a context. - * ZSTD_CCtx object can be re-used multiple times within successive compression - * operations. - * - * Start by initializing a context. - * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary - * compression, - * or ZSTD_compressBegin_advanced(), for finer parameter control. - * It's also possible to duplicate a reference context which has already been - * initialized, using ZSTD_copyCCtx() - * - * Then, consume your input using ZSTD_compressContinue(). - * There are some important considerations to keep in mind when using this - * advanced function : - * - ZSTD_compressContinue() has no internal buffer. It uses externally provided - * buffer only. - * - Interface is synchronous : input is consumed entirely and produce 1+ - * (or more) compressed blocks. - * - Caller must ensure there is enough space in `dst` to store compressed data - * under worst case scenario. Worst case evaluation is provided by - * ZSTD_compressBound(). - * ZSTD_compressContinue() doesn't guarantee recover after a failed - * compression. - * - ZSTD_compressContinue() presumes prior input ***is still accessible and - * unmodified*** (up to maximum distance size, see WindowLog). - * It remembers all previous contiguous blocks, plus one separated memory - * segment (which can itself consists of multiple contiguous blocks) - * - ZSTD_compressContinue() detects that prior input has been overwritten when - * `src` buffer overlaps. In which case, it will "discard" the relevant memory - * section from its history. - * - * Finish a frame with ZSTD_compressEnd(), which will write the last block(s) - * and optional checksum. It's possible to use srcSize==0, in which case, it - * will write a final empty block to end the frame. Without last block mark, - * frames will be considered unfinished (corrupted) by decoders. - * - * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new - * frame. - ******************************************************************************/ - -/*===== Buffer-less streaming compression functions =====*/ -size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel); -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, - size_t dictSize, int compressionLevel); -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, - size_t dictSize, ZSTD_parameters params, - unsigned long long pledgedSrcSize); -size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx, - unsigned long long pledgedSrcSize); -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, - unsigned long long pledgedSrcSize); -size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, - const void *src, size_t srcSize); -size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, - const void *src, size_t srcSize); - - - -/*-***************************************************************************** - * Buffer-less streaming decompression (synchronous mode) - * - * A ZSTD_DCtx object is required to track streaming operations. - * Use ZSTD_initDCtx() to initialize a context. - * A ZSTD_DCtx object can be re-used multiple times. - * - * First typical operation is to retrieve frame parameters, using - * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide - * important information to correctly decode the frame, such as the minimum - * rolling buffer size to allocate to decompress data (`windowSize`), and the - * dictionary ID used. - * Note: content size is optional, it may not be present. 0 means unknown. - * Note that these values could be wrong, either because of data malformation, - * or because an attacker is spoofing deliberate false information. As a - * consequence, check that values remain within valid application range, - * especially `windowSize`, before allocation. Each application can set its own - * limit, depending on local restrictions. For extended interoperability, it is - * recommended to support at least 8 MB. - * Frame parameters are extracted from the beginning of the compressed frame. - * Data fragment must be large enough to ensure successful decoding, typically - * `ZSTD_frameHeaderSize_max` bytes. - * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled. - * >0: `srcSize` is too small, provide at least this many bytes. - * errorCode, which can be tested using ZSTD_isError(). - * - * Start decompression, with ZSTD_decompressBegin() or - * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared - * context, using ZSTD_copyDCtx(). - * - * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() - * alternatively. - * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' - * to ZSTD_decompressContinue(). - * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will - * fail. - * - * The result of ZSTD_decompressContinue() is the number of bytes regenerated - * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an - * error; it just means ZSTD_decompressContinue() has decoded some metadata - * item. It can also be an error code, which can be tested with ZSTD_isError(). - * - * ZSTD_decompressContinue() needs previous data blocks during decompression, up - * to `windowSize`. They should preferably be located contiguously, prior to - * current block. Alternatively, a round buffer of sufficient size is also - * possible. Sufficient size is determined by frame parameters. - * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't - * follow each other, make sure that either the compressor breaks contiguity at - * the same place, or that previous contiguous segment is large enough to - * properly handle maximum back-reference. - * - * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. - * Context can then be reset to start a new decompression. - * - * Note: it's possible to know if next input to present is a header or a block, - * using ZSTD_nextInputType(). This information is not required to properly - * decode a frame. - * - * == Special case: skippable frames == - * - * Skippable frames allow integration of user-defined data into a flow of - * concatenated frames. Skippable frames will be ignored (skipped) by a - * decompressor. The format of skippable frames is as follows: - * a) Skippable frame ID - 4 Bytes, Little endian format, any value from - * 0x184D2A50 to 0x184D2A5F - * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits - * c) Frame Content - any content (User Data) of length equal to Frame Size - * For skippable frames ZSTD_decompressContinue() always returns 0. - * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 - * what means that a frame is skippable. - * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might - * actually be a zstd encoded frame with no content. For purposes of - * decompression, it is valid in both cases to skip the frame using - * ZSTD_findFrameCompressedSize() to find its size in bytes. - * It also returns frame size as fparamsPtr->frameContentSize. - ******************************************************************************/ - -/*===== Buffer-less streaming decompression functions =====*/ -size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx); -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, - size_t dictSize); -void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx); -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx); -size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, - const void *src, size_t srcSize); -typedef enum { - ZSTDnit_frameHeader, - ZSTDnit_blockHeader, - ZSTDnit_block, - ZSTDnit_lastBlock, - ZSTDnit_checksum, - ZSTDnit_skippableFrame -} ZSTD_nextInputType_e; -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx); - -/*-***************************************************************************** - * Block functions - * - * Block functions produce and decode raw zstd blocks, without frame metadata. - * Frame metadata cost is typically ~18 bytes, which can be non-negligible for - * very small blocks (< 100 bytes). User will have to take in charge required - * information to regenerate data, such as compressed and content sizes. - * - * A few rules to respect: - * - Compressing and decompressing require a context structure - * + Use ZSTD_initCCtx() and ZSTD_initDCtx() - * - It is necessary to init context before starting - * + compression : ZSTD_compressBegin() - * + decompression : ZSTD_decompressBegin() - * + variants _usingDict() are also allowed - * + copyCCtx() and copyDCtx() work too - * - Block size is limited, it must be <= ZSTD_getBlockSizeMax() - * + If you need to compress more, cut data into multiple blocks - * + Consider using the regular ZSTD_compress() instead, as frame metadata - * costs become negligible when source size is large. - * - When a block is considered not compressible enough, ZSTD_compressBlock() - * result will be zero. In which case, nothing is produced into `dst`. - * + User must test for such outcome and deal directly with uncompressed data - * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!! - * + In case of multiple successive blocks, decoder must be informed of - * uncompressed block existence to follow proper history. Use - * ZSTD_insertBlock() in such a case. - ******************************************************************************/ - -/* Define for static allocation */ -#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) -/*===== Raw zstd block functions =====*/ -size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx); -size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, - const void *src, size_t srcSize); -size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, - const void *src, size_t srcSize); -size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, - size_t blockSize); - -#endif /* ZSTD_H */ diff --git a/contrib/linux-kernel/kernelize.sh b/contrib/linux-kernel/kernelize.sh deleted file mode 100755 index 21aa2ecdcd1..00000000000 --- a/contrib/linux-kernel/kernelize.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/sh -set -e - -# Constants -SED_COMMANDS="commands.tmp" -CLANG_FORMAT="clang-format-3.9" -INCLUDE='include/linux/' -LIB='lib/zstd/' -SPACES=' ' -TAB=$'\t' -TMP="replacements.tmp" - -function prompt() { - while true; do - read -p "$1 [Y/n]" yn - case $yn in - '' ) yes='yes'; break;; - [Yy]* ) yes='yes'; break;; - [Nn]* ) yes=''; break;; - * ) echo "Please answer yes or no.";; - esac -done -} - -function check_not_present() { - grep "$1" $INCLUDE*.h ${LIB}*.{h,c} && exit 1 || true -} - -function check_not_present_in_file() { - grep "$1" "$2" && exit 1 || true -} - -function check_present_in_file() { - grep "$1" "$2" > /dev/null 2> /dev/null || exit 1 -} - -echo "Files: " $INCLUDE*.h $LIB*.{h,c} - -prompt "Do you wish to replace 4 spaces with a tab?" -if [ ! -z "$yes" ] -then - # Check files for existing tabs - grep "$TAB" $INCLUDE*.h $LIB*.{h,c} && exit 1 || true - # Replace the first tab on every line - sed -i '' "s/^$SPACES/$TAB/" $INCLUDE*.h $LIB*.{h,c} - - # Execute once and then execute as long as replacements are happening - more_work="yes" - while [ ! -z "$more_work" ] - do - rm -f $TMP - # Replaces $SPACES that directly follow a $TAB with a $TAB. - # $TMP will be non-empty if any replacements took place. - sed -i '' "s/$TAB$SPACES/$TAB$TAB/w $TMP" $INCLUDE*.h $LIB*.{h,c} - more_work=$(cat "$TMP") - done - rm -f $TMP -fi - -prompt "Do you wish to replace '{ ' with a tab?" -if [ ! -z "$yes" ] -then - sed -i '' "s/$TAB{ /$TAB{$TAB/g" $INCLUDE*.h $LIB*.{h,c} -fi - -rm -f $SED_COMMANDS -cat > $SED_COMMANDS <= sizeof(bitD->bitContainer), otherwise @return will be an error code. -* -* bits are first added to a local register. -* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. -* Writing data into memory is an explicit operation, performed by the flushBits function. -* Hence keep track how many bits are potentially stored into local register to avoid register overflow. -* After a flushBits, a maximum of 7 bits might still be stored into local register. -* -* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. -* -* Last operation is to close the bitStream. -* The function returns the final size of CStream in bytes. -* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) -*/ - -/*-******************************************** -* bitStream decoding API (read backward) -**********************************************/ -typedef struct { - size_t bitContainer; - unsigned bitsConsumed; - const char *ptr; - const char *start; -} BIT_DStream_t; - -typedef enum { - BIT_DStream_unfinished = 0, - BIT_DStream_endOfBuffer = 1, - BIT_DStream_completed = 2, - BIT_DStream_overflow = 3 -} BIT_DStream_status; /* result of BIT_reloadDStream() */ -/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ - -ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize); -ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits); -ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD); -ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *bitD); - -/* Start by invoking BIT_initDStream(). -* A chunk of the bitStream is then stored into a local register. -* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). -* You can then retrieve bitFields stored into the local register, **in reverse order**. -* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. -* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. -* Otherwise, it can be less than that, so proceed accordingly. -* Checking if DStream has reached its end can be performed with BIT_endOfDStream(). -*/ - -/*-**************************************** -* unsafe API -******************************************/ -ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits); -/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ - -ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC); -/* unsafe version; does not check buffer overflow */ - -ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits); -/* faster, but works only if nbBits >= 1 */ - -/*-************************************************************** -* Internal functions -****************************************************************/ -ZSTD_STATIC unsigned BIT_highbit32(register U32 val) { return 31 - __builtin_clz(val); } - -/*===== Local Constants =====*/ -static const unsigned BIT_mask[] = {0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, - 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF}; /* up to 26 bits */ - -/*-************************************************************** -* bitStream encoding -****************************************************************/ -/*! BIT_initCStream() : - * `dstCapacity` must be > sizeof(void*) - * @return : 0 if success, - otherwise an error code (can be tested using ERR_isError() ) */ -ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *startPtr, size_t dstCapacity) -{ - bitC->bitContainer = 0; - bitC->bitPos = 0; - bitC->startPtr = (char *)startPtr; - bitC->ptr = bitC->startPtr; - bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr); - if (dstCapacity <= sizeof(bitC->ptr)) - return ERROR(dstSize_tooSmall); - return 0; -} - -/*! BIT_addBits() : - can add up to 26 bits into `bitC`. - Does not check for register overflow ! */ -ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits) -{ - bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; - bitC->bitPos += nbBits; -} - -/*! BIT_addBitsFast() : - * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */ -ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits) -{ - bitC->bitContainer |= value << bitC->bitPos; - bitC->bitPos += nbBits; -} - -/*! BIT_flushBitsFast() : - * unsafe version; does not check buffer overflow */ -ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC) -{ - size_t const nbBytes = bitC->bitPos >> 3; - ZSTD_writeLEST(bitC->ptr, bitC->bitContainer); - bitC->ptr += nbBytes; - bitC->bitPos &= 7; - bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */ -} - -/*! BIT_flushBits() : - * safe version; check for buffer overflow, and prevents it. - * note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */ -ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC) -{ - size_t const nbBytes = bitC->bitPos >> 3; - ZSTD_writeLEST(bitC->ptr, bitC->bitContainer); - bitC->ptr += nbBytes; - if (bitC->ptr > bitC->endPtr) - bitC->ptr = bitC->endPtr; - bitC->bitPos &= 7; - bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */ -} - -/*! BIT_closeCStream() : - * @return : size of CStream, in bytes, - or 0 if it could not fit into dstBuffer */ -ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC) -{ - BIT_addBitsFast(bitC, 1, 1); /* endMark */ - BIT_flushBits(bitC); - - if (bitC->ptr >= bitC->endPtr) - return 0; /* doesn't fit within authorized budget : cancel */ - - return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); -} - -/*-******************************************************** -* bitStream decoding -**********************************************************/ -/*! BIT_initDStream() : -* Initialize a BIT_DStream_t. -* `bitD` : a pointer to an already allocated BIT_DStream_t structure. -* `srcSize` must be the *exact* size of the bitStream, in bytes. -* @return : size of stream (== srcSize) or an errorCode if a problem is detected -*/ -ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize) -{ - if (srcSize < 1) { - memset(bitD, 0, sizeof(*bitD)); - return ERROR(srcSize_wrong); - } - - if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ - bitD->start = (const char *)srcBuffer; - bitD->ptr = (const char *)srcBuffer + srcSize - sizeof(bitD->bitContainer); - bitD->bitContainer = ZSTD_readLEST(bitD->ptr); - { - BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1]; - bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ - if (lastByte == 0) - return ERROR(GENERIC); /* endMark not present */ - } - } else { - bitD->start = (const char *)srcBuffer; - bitD->ptr = bitD->start; - bitD->bitContainer = *(const BYTE *)(bitD->start); - switch (srcSize) { - case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16); - case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24); - case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32); - case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24; - case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16; - case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8; - default:; - } - { - BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1]; - bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; - if (lastByte == 0) - return ERROR(GENERIC); /* endMark not present */ - } - bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize) * 8; - } - - return srcSize; -} - -ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; } - -ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; } - -ZSTD_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { return bitContainer & BIT_mask[nbBits]; } - -/*! BIT_lookBits() : - * Provides next n bits from local register. - * local register is not modified. - * On 32-bits, maxNbBits==24. - * On 64-bits, maxNbBits==56. - * @return : value extracted - */ -ZSTD_STATIC size_t BIT_lookBits(const BIT_DStream_t *bitD, U32 nbBits) -{ - U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1; - return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask - nbBits) & bitMask); -} - -/*! BIT_lookBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ -ZSTD_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t *bitD, U32 nbBits) -{ - U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1; - return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask + 1) - nbBits) & bitMask); -} - -ZSTD_STATIC void BIT_skipBits(BIT_DStream_t *bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } - -/*! BIT_readBits() : - * Read (consume) next n bits from local register and update. - * Pay attention to not read more than nbBits contained into local register. - * @return : extracted value. - */ -ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, U32 nbBits) -{ - size_t const value = BIT_lookBits(bitD, nbBits); - BIT_skipBits(bitD, nbBits); - return value; -} - -/*! BIT_readBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ -ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, U32 nbBits) -{ - size_t const value = BIT_lookBitsFast(bitD, nbBits); - BIT_skipBits(bitD, nbBits); - return value; -} - -/*! BIT_reloadDStream() : -* Refill `bitD` from buffer previously set in BIT_initDStream() . -* This function is safe, it guarantees it will not read beyond src buffer. -* @return : status of `BIT_DStream_t` internal register. - if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */ -ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD) -{ - if (bitD->bitsConsumed > (sizeof(bitD->bitContainer) * 8)) /* should not happen => corruption detected */ - return BIT_DStream_overflow; - - if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { - bitD->ptr -= bitD->bitsConsumed >> 3; - bitD->bitsConsumed &= 7; - bitD->bitContainer = ZSTD_readLEST(bitD->ptr); - return BIT_DStream_unfinished; - } - if (bitD->ptr == bitD->start) { - if (bitD->bitsConsumed < sizeof(bitD->bitContainer) * 8) - return BIT_DStream_endOfBuffer; - return BIT_DStream_completed; - } - { - U32 nbBytes = bitD->bitsConsumed >> 3; - BIT_DStream_status result = BIT_DStream_unfinished; - if (bitD->ptr - nbBytes < bitD->start) { - nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ - result = BIT_DStream_endOfBuffer; - } - bitD->ptr -= nbBytes; - bitD->bitsConsumed -= nbBytes * 8; - bitD->bitContainer = ZSTD_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ - return result; - } -} - -/*! BIT_endOfDStream() : -* @return Tells if DStream has exactly reached its end (all bits consumed). -*/ -ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *DStream) -{ - return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8)); -} - -#endif /* BITSTREAM_H_MODULE */ diff --git a/contrib/linux-kernel/lib/zstd/compress.c b/contrib/linux-kernel/lib/zstd/compress.c deleted file mode 100644 index 43535b8db55..00000000000 --- a/contrib/linux-kernel/lib/zstd/compress.c +++ /dev/null @@ -1,3482 +0,0 @@ -/** - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of https://github.com/facebook/zstd. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - */ - -/*-************************************* -* Dependencies -***************************************/ -#include "fse.h" -#include "huf.h" -#include "mem.h" -#include "zstd_internal.h" /* includes zstd.h */ -#include -#include -#include /* memset */ - -/*-************************************* -* Constants -***************************************/ -static const U32 g_searchStrength = 8; /* control skip over incompressible data */ -#define HASH_READ_SIZE 8 -typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; - -/*-************************************* -* Helper functions -***************************************/ -size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; } - -/*-************************************* -* Sequence storage -***************************************/ -static void ZSTD_resetSeqStore(seqStore_t *ssPtr) -{ - ssPtr->lit = ssPtr->litStart; - ssPtr->sequences = ssPtr->sequencesStart; - ssPtr->longLengthID = 0; -} - -/*-************************************* -* Context memory management -***************************************/ -struct ZSTD_CCtx_s { - const BYTE *nextSrc; /* next block here to continue on curr prefix */ - const BYTE *base; /* All regular indexes relative to this position */ - const BYTE *dictBase; /* extDict indexes relative to this position */ - U32 dictLimit; /* below that point, need extDict */ - U32 lowLimit; /* below that point, no more data */ - U32 nextToUpdate; /* index from which to continue dictionary update */ - U32 nextToUpdate3; /* index from which to continue dictionary update */ - U32 hashLog3; /* dispatch table : larger == faster, more memory */ - U32 loadedDictEnd; /* index of end of dictionary */ - U32 forceWindow; /* force back-references to respect limit of 1< 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog); - size_t const h3Size = ((size_t)1) << hashLog3; - size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); - size_t const optSpace = - ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t)); - size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace + - (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0); - - return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize); -} - -static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem) -{ - ZSTD_CCtx *cctx; - if (!customMem.customAlloc || !customMem.customFree) - return NULL; - cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem); - if (!cctx) - return NULL; - memset(cctx, 0, sizeof(ZSTD_CCtx)); - cctx->customMem = customMem; - return cctx; -} - -ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize) -{ - ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); - ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem); - if (cctx) { - cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize); - } - return cctx; -} - -size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx) -{ - if (cctx == NULL) - return 0; /* support free on NULL */ - ZSTD_free(cctx->workSpace, cctx->customMem); - ZSTD_free(cctx, cctx->customMem); - return 0; /* reserved as a potential error code in the future */ -} - -const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); } - -static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; } - -/** ZSTD_checkParams() : - ensure param values remain within authorized range. - @return : 0, or an error code if one value is beyond authorized range */ -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) -{ -#define CLAMPCHECK(val, min, max) \ - { \ - if ((val < min) | (val > max)) \ - return ERROR(compressionParameter_unsupported); \ - } - CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); - CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); - CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); - CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); - CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); - CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX); - if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2) - return ERROR(compressionParameter_unsupported); - return 0; -} - -/** ZSTD_cycleLog() : - * condition for correct operation : hashLog > 1 */ -static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) -{ - U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); - return hashLog - btScale; -} - -/** ZSTD_adjustCParams() : - optimize `cPar` for a given input (`srcSize` and `dictSize`). - mostly downsizing to reduce memory consumption and initialization. - Both `srcSize` and `dictSize` are optional (use 0 if unknown), - but if both are 0, no optimization can be done. - Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */ -ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) -{ - if (srcSize + dictSize == 0) - return cPar; /* no size information available : no adjustment */ - - /* resize params, to use less memory when necessary */ - { - U32 const minSrcSize = (srcSize == 0) ? 500 : 0; - U64 const rSize = srcSize + dictSize + minSrcSize; - if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) { - U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1); - if (cPar.windowLog > srcLog) - cPar.windowLog = srcLog; - } - } - if (cPar.hashLog > cPar.windowLog) - cPar.hashLog = cPar.windowLog; - { - U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); - if (cycleLog > cPar.windowLog) - cPar.chainLog -= (cycleLog - cPar.windowLog); - } - - if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) - cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */ - - return cPar; -} - -static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2) -{ - return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) & - (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3)); -} - -/*! ZSTD_continueCCtx() : - reuse CCtx without reset (note : requires no dictionary) */ -static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize) -{ - U32 const end = (U32)(cctx->nextSrc - cctx->base); - cctx->params = params; - cctx->frameContentSize = frameContentSize; - cctx->lowLimit = end; - cctx->dictLimit = end; - cctx->nextToUpdate = end + 1; - cctx->stage = ZSTDcs_init; - cctx->dictID = 0; - cctx->loadedDictEnd = 0; - { - int i; - for (i = 0; i < ZSTD_REP_NUM; i++) - cctx->rep[i] = repStartValue[i]; - } - cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */ - xxh64_reset(&cctx->xxhState, 0); - return 0; -} - -typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e; - -/*! ZSTD_resetCCtx_advanced() : - note : `params` must be validated */ -static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp) -{ - if (crp == ZSTDcrp_continue) - if (ZSTD_equivalentParams(params, zc->params)) { - zc->flagStaticTables = 0; - zc->flagStaticHufTable = HUF_repeat_none; - return ZSTD_continueCCtx(zc, params, frameContentSize); - } - - { - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog); - U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4; - size_t const maxNbSeq = blockSize / divider; - size_t const tokenSpace = blockSize + 11 * maxNbSeq; - size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog); - size_t const hSize = ((size_t)1) << params.cParams.hashLog; - U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog); - size_t const h3Size = ((size_t)1) << hashLog3; - size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); - void *ptr; - - /* Check if workSpace is large enough, alloc a new one if needed */ - { - size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + - (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t)); - size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace + - (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0); - if (zc->workSpaceSize < neededSpace) { - ZSTD_free(zc->workSpace, zc->customMem); - zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem); - if (zc->workSpace == NULL) - return ERROR(memory_allocation); - zc->workSpaceSize = neededSpace; - } - } - - if (crp != ZSTDcrp_noMemset) - memset(zc->workSpace, 0, tableSpace); /* reset tables only */ - xxh64_reset(&zc->xxhState, 0); - zc->hashLog3 = hashLog3; - zc->hashTable = (U32 *)(zc->workSpace); - zc->chainTable = zc->hashTable + hSize; - zc->hashTable3 = zc->chainTable + chainSize; - ptr = zc->hashTable3 + h3Size; - zc->hufTable = (HUF_CElt *)ptr; - zc->flagStaticTables = 0; - zc->flagStaticHufTable = HUF_repeat_none; - ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */ - - zc->nextToUpdate = 1; - zc->nextSrc = NULL; - zc->base = NULL; - zc->dictBase = NULL; - zc->dictLimit = 0; - zc->lowLimit = 0; - zc->params = params; - zc->blockSize = blockSize; - zc->frameContentSize = frameContentSize; - { - int i; - for (i = 0; i < ZSTD_REP_NUM; i++) - zc->rep[i] = repStartValue[i]; - } - - if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) { - zc->seqStore.litFreq = (U32 *)ptr; - zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits); - zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1); - zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1); - ptr = zc->seqStore.offCodeFreq + (MaxOff + 1); - zc->seqStore.matchTable = (ZSTD_match_t *)ptr; - ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1; - zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr; - ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1; - zc->seqStore.litLengthSum = 0; - } - zc->seqStore.sequencesStart = (seqDef *)ptr; - ptr = zc->seqStore.sequencesStart + maxNbSeq; - zc->seqStore.llCode = (BYTE *)ptr; - zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq; - zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq; - zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq; - - zc->stage = ZSTDcs_init; - zc->dictID = 0; - zc->loadedDictEnd = 0; - - return 0; - } -} - -/* ZSTD_invalidateRepCodes() : - * ensures next compression will not use repcodes from previous block. - * Note : only works with regular variant; - * do not use with extDict variant ! */ -void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx) -{ - int i; - for (i = 0; i < ZSTD_REP_NUM; i++) - cctx->rep[i] = 0; -} - -/*! ZSTD_copyCCtx() : -* Duplicate an existing context `srcCCtx` into another one `dstCCtx`. -* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). -* @return : 0, or an error code */ -size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize) -{ - if (srcCCtx->stage != ZSTDcs_init) - return ERROR(stage_wrong); - - memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); - { - ZSTD_parameters params = srcCCtx->params; - params.fParams.contentSizeFlag = (pledgedSrcSize > 0); - ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset); - } - - /* copy tables */ - { - size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog); - size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog; - size_t const h3Size = (size_t)1 << srcCCtx->hashLog3; - size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); - memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace); - } - - /* copy dictionary offsets */ - dstCCtx->nextToUpdate = srcCCtx->nextToUpdate; - dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3; - dstCCtx->nextSrc = srcCCtx->nextSrc; - dstCCtx->base = srcCCtx->base; - dstCCtx->dictBase = srcCCtx->dictBase; - dstCCtx->dictLimit = srcCCtx->dictLimit; - dstCCtx->lowLimit = srcCCtx->lowLimit; - dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd; - dstCCtx->dictID = srcCCtx->dictID; - - /* copy entropy tables */ - dstCCtx->flagStaticTables = srcCCtx->flagStaticTables; - dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable; - if (srcCCtx->flagStaticTables) { - memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable)); - memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable)); - memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable)); - } - if (srcCCtx->flagStaticHufTable) { - memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4); - } - - return 0; -} - -/*! ZSTD_reduceTable() : -* reduce table indexes by `reducerValue` */ -static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue) -{ - U32 u; - for (u = 0; u < size; u++) { - if (table[u] < reducerValue) - table[u] = 0; - else - table[u] -= reducerValue; - } -} - -/*! ZSTD_reduceIndex() : -* rescale all indexes to avoid future overflow (indexes are U32) */ -static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue) -{ - { - U32 const hSize = 1 << zc->params.cParams.hashLog; - ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); - } - - { - U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog); - ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); - } - - { - U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0; - ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); - } -} - -/*-******************************************************* -* Block entropic compression -*********************************************************/ - -/* See doc/zstd_compression_format.md for detailed format description */ - -size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - if (srcSize + ZSTD_blockHeaderSize > dstCapacity) - return ERROR(dstSize_tooSmall); - memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize); - ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw); - return ZSTD_blockHeaderSize + srcSize; -} - -static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - BYTE *const ostart = (BYTE * const)dst; - U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095); - - if (srcSize + flSize > dstCapacity) - return ERROR(dstSize_tooSmall); - - switch (flSize) { - case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break; - case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break; - default: /*note : should not be necessary : flSize is within {1,2,3} */ - case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break; - } - - memcpy(ostart + flSize, src, srcSize); - return srcSize + flSize; -} - -static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - BYTE *const ostart = (BYTE * const)dst; - U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095); - - (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ - - switch (flSize) { - case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break; - case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break; - default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */ - case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break; - } - - ostart[flSize] = *(const BYTE *)src; - return flSize + 1; -} - -static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; } - -static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - size_t const minGain = ZSTD_minGain(srcSize); - size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); - BYTE *const ostart = (BYTE *)dst; - U32 singleStream = srcSize < 256; - symbolEncodingType_e hType = set_compressed; - size_t cLitSize; - -/* small ? don't even attempt compression (speed opt) */ -#define LITERAL_NOENTROPY 63 - { - size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; - if (srcSize <= minLitSize) - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } - - if (dstCapacity < lhSize + 1) - return ERROR(dstSize_tooSmall); /* not enough space for compression */ - { - HUF_repeat repeat = zc->flagStaticHufTable; - int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0; - if (repeat == HUF_repeat_valid && lhSize == 3) - singleStream = 1; - cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters, - sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat) - : HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters, - sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat); - if (repeat != HUF_repeat_none) { - hType = set_repeat; - } /* reused the existing table */ - else { - zc->flagStaticHufTable = HUF_repeat_check; - } /* now have a table to reuse */ - } - - if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) { - zc->flagStaticHufTable = HUF_repeat_none; - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } - if (cLitSize == 1) { - zc->flagStaticHufTable = HUF_repeat_none; - return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); - } - - /* Build header */ - switch (lhSize) { - case 3: /* 2 - 2 - 10 - 10 */ - { - U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14); - ZSTD_writeLE24(ostart, lhc); - break; - } - case 4: /* 2 - 2 - 14 - 14 */ - { - U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18); - ZSTD_writeLE32(ostart, lhc); - break; - } - default: /* should not be necessary, lhSize is only {3,4,5} */ - case 5: /* 2 - 2 - 18 - 18 */ - { - U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22); - ZSTD_writeLE32(ostart, lhc); - ostart[4] = (BYTE)(cLitSize >> 10); - break; - } - } - return lhSize + cLitSize; -} - -static const BYTE LL_Code[64] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18, - 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, - 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24}; - -static const BYTE ML_Code[128] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, - 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}; - -void ZSTD_seqToCodes(const seqStore_t *seqStorePtr) -{ - BYTE const LL_deltaCode = 19; - BYTE const ML_deltaCode = 36; - const seqDef *const sequences = seqStorePtr->sequencesStart; - BYTE *const llCodeTable = seqStorePtr->llCode; - BYTE *const ofCodeTable = seqStorePtr->ofCode; - BYTE *const mlCodeTable = seqStorePtr->mlCode; - U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - U32 u; - for (u = 0; u < nbSeq; u++) { - U32 const llv = sequences[u].litLength; - U32 const mlv = sequences[u].matchLength; - llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv]; - ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset); - mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv]; - } - if (seqStorePtr->longLengthID == 1) - llCodeTable[seqStorePtr->longLengthPos] = MaxLL; - if (seqStorePtr->longLengthID == 2) - mlCodeTable[seqStorePtr->longLengthPos] = MaxML; -} - -ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity) -{ - const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN; - const seqStore_t *seqStorePtr = &(zc->seqStore); - FSE_CTable *CTable_LitLength = zc->litlengthCTable; - FSE_CTable *CTable_OffsetBits = zc->offcodeCTable; - FSE_CTable *CTable_MatchLength = zc->matchlengthCTable; - U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ - const seqDef *const sequences = seqStorePtr->sequencesStart; - const BYTE *const ofCodeTable = seqStorePtr->ofCode; - const BYTE *const llCodeTable = seqStorePtr->llCode; - const BYTE *const mlCodeTable = seqStorePtr->mlCode; - BYTE *const ostart = (BYTE *)dst; - BYTE *const oend = ostart + dstCapacity; - BYTE *op = ostart; - size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; - BYTE *seqHead; - - U32 *count; - S16 *norm; - U32 *workspace; - size_t workspaceSize = sizeof(zc->tmpCounters); - { - size_t spaceUsed32 = 0; - count = (U32 *)zc->tmpCounters + spaceUsed32; - spaceUsed32 += MaxSeq + 1; - norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32); - spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2; - - workspace = (U32 *)zc->tmpCounters + spaceUsed32; - workspaceSize -= (spaceUsed32 << 2); - } - - /* Compress literals */ - { - const BYTE *const literals = seqStorePtr->litStart; - size_t const litSize = seqStorePtr->lit - literals; - size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize); - if (ZSTD_isError(cSize)) - return cSize; - op += cSize; - } - - /* Sequences Header */ - if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) - return ERROR(dstSize_tooSmall); - if (nbSeq < 0x7F) - *op++ = (BYTE)nbSeq; - else if (nbSeq < LONGNBSEQ) - op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2; - else - op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3; - if (nbSeq == 0) - return op - ostart; - - /* seqHead : flags for FSE encoding type */ - seqHead = op++; - -#define MIN_SEQ_FOR_DYNAMIC_FSE 64 -#define MAX_SEQ_FOR_STATIC_FSE 1000 - - /* convert length/distances into codes */ - ZSTD_seqToCodes(seqStorePtr); - - /* CTable for Literal Lengths */ - { - U32 max = MaxLL; - size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace); - if ((mostFrequent == nbSeq) && (nbSeq > 2)) { - *op++ = llCodeTable[0]; - FSE_buildCTable_rle(CTable_LitLength, (BYTE)max); - LLtype = set_rle; - } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { - LLtype = set_repeat; - } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) { - FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize); - LLtype = set_basic; - } else { - size_t nbSeq_1 = nbSeq; - const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max); - if (count[llCodeTable[nbSeq - 1]] > 1) { - count[llCodeTable[nbSeq - 1]]--; - nbSeq_1--; - } - FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); - { - size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ - if (FSE_isError(NCountSize)) - return NCountSize; - op += NCountSize; - } - FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize); - LLtype = set_compressed; - } - } - - /* CTable for Offsets */ - { - U32 max = MaxOff; - size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace); - if ((mostFrequent == nbSeq) && (nbSeq > 2)) { - *op++ = ofCodeTable[0]; - FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max); - Offtype = set_rle; - } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { - Offtype = set_repeat; - } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) { - FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize); - Offtype = set_basic; - } else { - size_t nbSeq_1 = nbSeq; - const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max); - if (count[ofCodeTable[nbSeq - 1]] > 1) { - count[ofCodeTable[nbSeq - 1]]--; - nbSeq_1--; - } - FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); - { - size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ - if (FSE_isError(NCountSize)) - return NCountSize; - op += NCountSize; - } - FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize); - Offtype = set_compressed; - } - } - - /* CTable for MatchLengths */ - { - U32 max = MaxML; - size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace); - if ((mostFrequent == nbSeq) && (nbSeq > 2)) { - *op++ = *mlCodeTable; - FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max); - MLtype = set_rle; - } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { - MLtype = set_repeat; - } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) { - FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize); - MLtype = set_basic; - } else { - size_t nbSeq_1 = nbSeq; - const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max); - if (count[mlCodeTable[nbSeq - 1]] > 1) { - count[mlCodeTable[nbSeq - 1]]--; - nbSeq_1--; - } - FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); - { - size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ - if (FSE_isError(NCountSize)) - return NCountSize; - op += NCountSize; - } - FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize); - MLtype = set_compressed; - } - } - - *seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2)); - zc->flagStaticTables = 0; - - /* Encoding Sequences */ - { - BIT_CStream_t blockStream; - FSE_CState_t stateMatchLength; - FSE_CState_t stateOffsetBits; - FSE_CState_t stateLitLength; - - CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */ - - /* first symbols */ - FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]); - FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]); - FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]); - BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]); - if (ZSTD_32bits()) - BIT_flushBits(&blockStream); - BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]); - if (ZSTD_32bits()) - BIT_flushBits(&blockStream); - if (longOffsets) { - U32 const ofBits = ofCodeTable[nbSeq - 1]; - int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1); - if (extraBits) { - BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits); - BIT_flushBits(&blockStream); - } - BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits); - } else { - BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]); - } - BIT_flushBits(&blockStream); - - { - size_t n; - for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */ - BYTE const llCode = llCodeTable[n]; - BYTE const ofCode = ofCodeTable[n]; - BYTE const mlCode = mlCodeTable[n]; - U32 const llBits = LL_bits[llCode]; - U32 const ofBits = ofCode; /* 32b*/ /* 64b*/ - U32 const mlBits = ML_bits[mlCode]; - /* (7)*/ /* (7)*/ - FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */ - FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */ - if (ZSTD_32bits()) - BIT_flushBits(&blockStream); /* (7)*/ - FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */ - if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog))) - BIT_flushBits(&blockStream); /* (7)*/ - BIT_addBits(&blockStream, sequences[n].litLength, llBits); - if (ZSTD_32bits() && ((llBits + mlBits) > 24)) - BIT_flushBits(&blockStream); - BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); - if (ZSTD_32bits()) - BIT_flushBits(&blockStream); /* (7)*/ - if (longOffsets) { - int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1); - if (extraBits) { - BIT_addBits(&blockStream, sequences[n].offset, extraBits); - BIT_flushBits(&blockStream); /* (7)*/ - } - BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */ - } else { - BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ - } - BIT_flushBits(&blockStream); /* (7)*/ - } - } - - FSE_flushCState(&blockStream, &stateMatchLength); - FSE_flushCState(&blockStream, &stateOffsetBits); - FSE_flushCState(&blockStream, &stateLitLength); - - { - size_t const streamSize = BIT_closeCStream(&blockStream); - if (streamSize == 0) - return ERROR(dstSize_tooSmall); /* not enough space */ - op += streamSize; - } - } - return op - ostart; -} - -ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize) -{ - size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity); - size_t const minGain = ZSTD_minGain(srcSize); - size_t const maxCSize = srcSize - minGain; - /* If the srcSize <= dstCapacity, then there is enough space to write a - * raw uncompressed block. Since we ran out of space, the block must not - * be compressible, so fall back to a raw uncompressed block. - */ - int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity; - int i; - - if (ZSTD_isError(cSize) && !uncompressibleError) - return cSize; - if (cSize >= maxCSize || uncompressibleError) { - zc->flagStaticHufTable = HUF_repeat_none; - return 0; - } - /* confirm repcodes */ - for (i = 0; i < ZSTD_REP_NUM; i++) - zc->rep[i] = zc->repToConfirm[i]; - return cSize; -} - -/*! ZSTD_storeSeq() : - Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. - `offsetCode` : distance to match, or 0 == repCode. - `matchCode` : matchLength - MINMATCH -*/ -ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode) -{ - /* copy Literals */ - ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); - seqStorePtr->lit += litLength; - - /* literal Length */ - if (litLength > 0xFFFF) { - seqStorePtr->longLengthID = 1; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - } - seqStorePtr->sequences[0].litLength = (U16)litLength; - - /* match offset */ - seqStorePtr->sequences[0].offset = offsetCode + 1; - - /* match Length */ - if (matchCode > 0xFFFF) { - seqStorePtr->longLengthID = 2; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - } - seqStorePtr->sequences[0].matchLength = (U16)matchCode; - - seqStorePtr->sequences++; -} - -/*-************************************* -* Match length counter -***************************************/ -static unsigned ZSTD_NbCommonBytes(register size_t val) -{ - if (ZSTD_isLittleEndian()) { - if (ZSTD_64bits()) { - return (__builtin_ctzll((U64)val) >> 3); - } else { /* 32 bits */ - return (__builtin_ctz((U32)val) >> 3); - } - } else { /* Big Endian CPU */ - if (ZSTD_64bits()) { - return (__builtin_clzll(val) >> 3); - } else { /* 32 bits */ - return (__builtin_clz((U32)val) >> 3); - } - } -} - -static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit) -{ - const BYTE *const pStart = pIn; - const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1); - - while (pIn < pInLoopLimit) { - size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn); - if (!diff) { - pIn += sizeof(size_t); - pMatch += sizeof(size_t); - continue; - } - pIn += ZSTD_NbCommonBytes(diff); - return (size_t)(pIn - pStart); - } - if (ZSTD_64bits()) - if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) { - pIn += 4; - pMatch += 4; - } - if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) { - pIn += 2; - pMatch += 2; - } - if ((pIn < pInLimit) && (*pMatch == *pIn)) - pIn++; - return (size_t)(pIn - pStart); -} - -/** ZSTD_count_2segments() : -* can count match length with `ip` & `match` in 2 different segments. -* convention : on reaching mEnd, match count continue starting from iStart -*/ -static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart) -{ - const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd); - size_t const matchLength = ZSTD_count(ip, match, vEnd); - if (match + matchLength != mEnd) - return matchLength; - return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd); -} - -/*-************************************* -* Hashes -***************************************/ -static const U32 prime3bytes = 506832829U; -static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); } -ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */ - -static const U32 prime4bytes = 2654435761U; -static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); } -static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); } - -static const U64 prime5bytes = 889523592379ULL; -static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); } -static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); } - -static const U64 prime6bytes = 227718039650203ULL; -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); } -static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); } - -static const U64 prime7bytes = 58295818150454627ULL; -static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); } -static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); } - -static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); } -static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); } - -static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls) -{ - switch (mls) { - // case 3: return ZSTD_hash3Ptr(p, hBits); - default: - case 4: return ZSTD_hash4Ptr(p, hBits); - case 5: return ZSTD_hash5Ptr(p, hBits); - case 6: return ZSTD_hash6Ptr(p, hBits); - case 7: return ZSTD_hash7Ptr(p, hBits); - case 8: return ZSTD_hash8Ptr(p, hBits); - } -} - -/*-************************************* -* Fast Scan -***************************************/ -static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls) -{ - U32 *const hashTable = zc->hashTable; - U32 const hBits = zc->params.cParams.hashLog; - const BYTE *const base = zc->base; - const BYTE *ip = base + zc->nextToUpdate; - const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE; - const size_t fastHashFillStep = 3; - - while (ip <= iend) { - hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); - ip += fastHashFillStep; - } -} - -FORCE_INLINE -void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls) -{ - U32 *const hashTable = cctx->hashTable; - U32 const hBits = cctx->params.cParams.hashLog; - seqStore_t *seqStorePtr = &(cctx->seqStore); - const BYTE *const base = cctx->base; - const BYTE *const istart = (const BYTE *)src; - const BYTE *ip = istart; - const BYTE *anchor = istart; - const U32 lowestIndex = cctx->dictLimit; - const BYTE *const lowest = base + lowestIndex; - const BYTE *const iend = istart + srcSize; - const BYTE *const ilimit = iend - HASH_READ_SIZE; - U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1]; - U32 offsetSaved = 0; - - /* init */ - ip += (ip == lowest); - { - U32 const maxRep = (U32)(ip - lowest); - if (offset_2 > maxRep) - offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) - offsetSaved = offset_1, offset_1 = 0; - } - - /* Main Search Loop */ - while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ - size_t mLength; - size_t const h = ZSTD_hashPtr(ip, hBits, mls); - U32 const curr = (U32)(ip - base); - U32 const matchIndex = hashTable[h]; - const BYTE *match = base + matchIndex; - hashTable[h] = curr; /* update hash table */ - - if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { - mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; - ip++; - ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); - } else { - U32 offset; - if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) { - ip += ((ip - anchor) >> g_searchStrength) + 1; - continue; - } - mLength = ZSTD_count(ip + 4, match + 4, iend) + 4; - offset = (U32)(ip - match); - while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) { - ip--; - match--; - mLength++; - } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - - ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); - } - - /* match found */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Fill Table */ - hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base); - /* check immediate repcode */ - while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; - { - U32 const tmpOff = offset_2; - offset_2 = offset_1; - offset_1 = tmpOff; - } /* swap offset_2 <=> offset_1 */ - hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH); - ip += rLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } - } - } - - /* save reps for next block */ - cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; - cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; - - /* Last Literals */ - { - size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - -static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ - const U32 mls = ctx->params.cParams.searchLength; - switch (mls) { - default: /* includes case 3 */ - case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return; - case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return; - case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return; - case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return; - } -} - -static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls) -{ - U32 *hashTable = ctx->hashTable; - const U32 hBits = ctx->params.cParams.hashLog; - seqStore_t *seqStorePtr = &(ctx->seqStore); - const BYTE *const base = ctx->base; - const BYTE *const dictBase = ctx->dictBase; - const BYTE *const istart = (const BYTE *)src; - const BYTE *ip = istart; - const BYTE *anchor = istart; - const U32 lowestIndex = ctx->lowLimit; - const BYTE *const dictStart = dictBase + lowestIndex; - const U32 dictLimit = ctx->dictLimit; - const BYTE *const lowPrefixPtr = base + dictLimit; - const BYTE *const dictEnd = dictBase + dictLimit; - const BYTE *const iend = istart + srcSize; - const BYTE *const ilimit = iend - 8; - U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; - - /* Search Loop */ - while (ip < ilimit) { /* < instead of <=, because (ip+1) */ - const size_t h = ZSTD_hashPtr(ip, hBits, mls); - const U32 matchIndex = hashTable[h]; - const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base; - const BYTE *match = matchBase + matchIndex; - const U32 curr = (U32)(ip - base); - const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ - const BYTE *repBase = repIndex < dictLimit ? dictBase : base; - const BYTE *repMatch = repBase + repIndex; - size_t mLength; - hashTable[h] = curr; /* update hash table */ - - if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) && - (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) { - const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32; - ip++; - ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); - } else { - if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) { - ip += ((ip - anchor) >> g_searchStrength) + 1; - continue; - } - { - const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend; - const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; - U32 offset; - mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32; - while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) { - ip--; - match--; - mLength++; - } /* catch up */ - offset = curr - matchIndex; - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); - } - } - - /* found a match : store it */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Fill Table */ - hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; - hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base); - /* check immediate repcode */ - while (ip <= ilimit) { - U32 const curr2 = (U32)(ip - base); - U32 const repIndex2 = curr2 - offset_2; - const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; - if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ - && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) { - const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; - size_t repLength2 = - ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; - U32 tmpOffset = offset_2; - offset_2 = offset_1; - offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH); - hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } - } - } - - /* save reps for next block */ - ctx->repToConfirm[0] = offset_1; - ctx->repToConfirm[1] = offset_2; - - /* Last Literals */ - { - size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - -static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ - U32 const mls = ctx->params.cParams.searchLength; - switch (mls) { - default: /* includes case 3 */ - case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return; - case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return; - case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return; - case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return; - } -} - -/*-************************************* -* Double Fast -***************************************/ -static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls) -{ - U32 *const hashLarge = cctx->hashTable; - U32 const hBitsL = cctx->params.cParams.hashLog; - U32 *const hashSmall = cctx->chainTable; - U32 const hBitsS = cctx->params.cParams.chainLog; - const BYTE *const base = cctx->base; - const BYTE *ip = base + cctx->nextToUpdate; - const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE; - const size_t fastHashFillStep = 3; - - while (ip <= iend) { - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); - hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); - ip += fastHashFillStep; - } -} - -FORCE_INLINE -void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls) -{ - U32 *const hashLong = cctx->hashTable; - const U32 hBitsL = cctx->params.cParams.hashLog; - U32 *const hashSmall = cctx->chainTable; - const U32 hBitsS = cctx->params.cParams.chainLog; - seqStore_t *seqStorePtr = &(cctx->seqStore); - const BYTE *const base = cctx->base; - const BYTE *const istart = (const BYTE *)src; - const BYTE *ip = istart; - const BYTE *anchor = istart; - const U32 lowestIndex = cctx->dictLimit; - const BYTE *const lowest = base + lowestIndex; - const BYTE *const iend = istart + srcSize; - const BYTE *const ilimit = iend - HASH_READ_SIZE; - U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1]; - U32 offsetSaved = 0; - - /* init */ - ip += (ip == lowest); - { - U32 const maxRep = (U32)(ip - lowest); - if (offset_2 > maxRep) - offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) - offsetSaved = offset_1, offset_1 = 0; - } - - /* Main Search Loop */ - while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ - size_t mLength; - size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); - size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); - U32 const curr = (U32)(ip - base); - U32 const matchIndexL = hashLong[h2]; - U32 const matchIndexS = hashSmall[h]; - const BYTE *matchLong = base + matchIndexL; - const BYTE *match = base + matchIndexS; - hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ - - if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */ - mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; - ip++; - ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); - } else { - U32 offset; - if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) { - mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8; - offset = (U32)(ip - matchLong); - while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) { - ip--; - matchLong--; - mLength++; - } /* catch up */ - } else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) { - size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); - U32 const matchIndex3 = hashLong[h3]; - const BYTE *match3 = base + matchIndex3; - hashLong[h3] = curr + 1; - if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) { - mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8; - ip++; - offset = (U32)(ip - match3); - while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) { - ip--; - match3--; - mLength++; - } /* catch up */ - } else { - mLength = ZSTD_count(ip + 4, match + 4, iend) + 4; - offset = (U32)(ip - match); - while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) { - ip--; - match--; - mLength++; - } /* catch up */ - } - } else { - ip += ((ip - anchor) >> g_searchStrength) + 1; - continue; - } - - offset_2 = offset_1; - offset_1 = offset; - - ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); - } - - /* match found */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Fill Table */ - hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = - curr + 2; /* here because curr+2 could be > iend-8 */ - hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base); - - /* check immediate repcode */ - while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; - { - U32 const tmpOff = offset_2; - offset_2 = offset_1; - offset_1 = tmpOff; - } /* swap offset_2 <=> offset_1 */ - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH); - ip += rLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } - } - } - - /* save reps for next block */ - cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; - cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; - - /* Last Literals */ - { - size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - -static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ - const U32 mls = ctx->params.cParams.searchLength; - switch (mls) { - default: /* includes case 3 */ - case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return; - case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return; - case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return; - case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return; - } -} - -static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls) -{ - U32 *const hashLong = ctx->hashTable; - U32 const hBitsL = ctx->params.cParams.hashLog; - U32 *const hashSmall = ctx->chainTable; - U32 const hBitsS = ctx->params.cParams.chainLog; - seqStore_t *seqStorePtr = &(ctx->seqStore); - const BYTE *const base = ctx->base; - const BYTE *const dictBase = ctx->dictBase; - const BYTE *const istart = (const BYTE *)src; - const BYTE *ip = istart; - const BYTE *anchor = istart; - const U32 lowestIndex = ctx->lowLimit; - const BYTE *const dictStart = dictBase + lowestIndex; - const U32 dictLimit = ctx->dictLimit; - const BYTE *const lowPrefixPtr = base + dictLimit; - const BYTE *const dictEnd = dictBase + dictLimit; - const BYTE *const iend = istart + srcSize; - const BYTE *const ilimit = iend - 8; - U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; - - /* Search Loop */ - while (ip < ilimit) { /* < instead of <=, because (ip+1) */ - const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); - const U32 matchIndex = hashSmall[hSmall]; - const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base; - const BYTE *match = matchBase + matchIndex; - - const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); - const U32 matchLongIndex = hashLong[hLong]; - const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base; - const BYTE *matchLong = matchLongBase + matchLongIndex; - - const U32 curr = (U32)(ip - base); - const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ - const BYTE *repBase = repIndex < dictLimit ? dictBase : base; - const BYTE *repMatch = repBase + repIndex; - size_t mLength; - hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ - - if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) && - (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) { - const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4; - ip++; - ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); - } else { - if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) { - const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend; - const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr; - U32 offset; - mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8; - offset = curr - matchLongIndex; - while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) { - ip--; - matchLong--; - mLength++; - } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); - - } else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) { - size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); - U32 const matchIndex3 = hashLong[h3]; - const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base; - const BYTE *match3 = match3Base + matchIndex3; - U32 offset; - hashLong[h3] = curr + 1; - if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) { - const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend; - const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr; - mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8; - ip++; - offset = curr + 1 - matchIndex3; - while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) { - ip--; - match3--; - mLength++; - } /* catch up */ - } else { - const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend; - const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; - mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4; - offset = curr - matchIndex; - while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) { - ip--; - match--; - mLength++; - } /* catch up */ - } - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); - - } else { - ip += ((ip - anchor) >> g_searchStrength) + 1; - continue; - } - } - - /* found a match : store it */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Fill Table */ - hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2; - hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2; - hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base); - hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base); - /* check immediate repcode */ - while (ip <= ilimit) { - U32 const curr2 = (U32)(ip - base); - U32 const repIndex2 = curr2 - offset_2; - const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; - if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ - && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) { - const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; - size_t const repLength2 = - ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; - U32 tmpOffset = offset_2; - offset_2 = offset_1; - offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH); - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2; - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } - } - } - - /* save reps for next block */ - ctx->repToConfirm[0] = offset_1; - ctx->repToConfirm[1] = offset_2; - - /* Last Literals */ - { - size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - -static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ - U32 const mls = ctx->params.cParams.searchLength; - switch (mls) { - default: /* includes case 3 */ - case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return; - case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return; - case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return; - case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return; - } -} - -/*-************************************* -* Binary Tree search -***************************************/ -/** ZSTD_insertBt1() : add one or multiple positions to tree. -* ip : assumed <= iend-8 . -* @return : nb of positions added */ -static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict) -{ - U32 *const hashTable = zc->hashTable; - U32 const hashLog = zc->params.cParams.hashLog; - size_t const h = ZSTD_hashPtr(ip, hashLog, mls); - U32 *const bt = zc->chainTable; - U32 const btLog = zc->params.cParams.chainLog - 1; - U32 const btMask = (1 << btLog) - 1; - U32 matchIndex = hashTable[h]; - size_t commonLengthSmaller = 0, commonLengthLarger = 0; - const BYTE *const base = zc->base; - const BYTE *const dictBase = zc->dictBase; - const U32 dictLimit = zc->dictLimit; - const BYTE *const dictEnd = dictBase + dictLimit; - const BYTE *const prefixStart = base + dictLimit; - const BYTE *match; - const U32 curr = (U32)(ip - base); - const U32 btLow = btMask >= curr ? 0 : curr - btMask; - U32 *smallerPtr = bt + 2 * (curr & btMask); - U32 *largerPtr = smallerPtr + 1; - U32 dummy32; /* to be nullified at the end */ - U32 const windowLow = zc->lowLimit; - U32 matchEndIdx = curr + 8; - size_t bestLength = 8; - - hashTable[h] = curr; /* Update Hash Table */ - - while (nbCompares-- && (matchIndex > windowLow)) { - U32 *const nextPtr = bt + 2 * (matchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - - if ((!extDict) || (matchIndex + matchLength >= dictLimit)) { - match = base + matchIndex; - if (match[matchLength] == ip[matchLength]) - matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1; - } else { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); - if (matchIndex + matchLength >= dictLimit) - match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ - } - - if (matchLength > bestLength) { - bestLength = matchLength; - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (U32)matchLength; - } - - if (ip + matchLength == iend) /* equal : no way to know if inf or sup */ - break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ - - if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */ - /* match is smaller than curr */ - *smallerPtr = matchIndex; /* update smaller idx */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - if (matchIndex <= btLow) { - smallerPtr = &dummy32; - break; - } /* beyond tree size, stop the search */ - smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */ - matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ - } else { - /* match is larger than curr */ - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) { - largerPtr = &dummy32; - break; - } /* beyond tree size, stop the search */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - } - } - - *smallerPtr = *largerPtr = 0; - if (bestLength > 384) - return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ - if (matchEndIdx > curr + 8) - return matchEndIdx - curr - 8; - return 1; -} - -static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls, - U32 extDict) -{ - U32 *const hashTable = zc->hashTable; - U32 const hashLog = zc->params.cParams.hashLog; - size_t const h = ZSTD_hashPtr(ip, hashLog, mls); - U32 *const bt = zc->chainTable; - U32 const btLog = zc->params.cParams.chainLog - 1; - U32 const btMask = (1 << btLog) - 1; - U32 matchIndex = hashTable[h]; - size_t commonLengthSmaller = 0, commonLengthLarger = 0; - const BYTE *const base = zc->base; - const BYTE *const dictBase = zc->dictBase; - const U32 dictLimit = zc->dictLimit; - const BYTE *const dictEnd = dictBase + dictLimit; - const BYTE *const prefixStart = base + dictLimit; - const U32 curr = (U32)(ip - base); - const U32 btLow = btMask >= curr ? 0 : curr - btMask; - const U32 windowLow = zc->lowLimit; - U32 *smallerPtr = bt + 2 * (curr & btMask); - U32 *largerPtr = bt + 2 * (curr & btMask) + 1; - U32 matchEndIdx = curr + 8; - U32 dummy32; /* to be nullified at the end */ - size_t bestLength = 0; - - hashTable[h] = curr; /* Update Hash Table */ - - while (nbCompares-- && (matchIndex > windowLow)) { - U32 *const nextPtr = bt + 2 * (matchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - const BYTE *match; - - if ((!extDict) || (matchIndex + matchLength >= dictLimit)) { - match = base + matchIndex; - if (match[matchLength] == ip[matchLength]) - matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1; - } else { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); - if (matchIndex + matchLength >= dictLimit) - match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ - } - - if (matchLength > bestLength) { - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (U32)matchLength; - if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1))) - bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex; - if (ip + matchLength == iend) /* equal : no way to know if inf or sup */ - break; /* drop, to guarantee consistency (miss a little bit of compression) */ - } - - if (match[matchLength] < ip[matchLength]) { - /* match is smaller than curr */ - *smallerPtr = matchIndex; /* update smaller idx */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - if (matchIndex <= btLow) { - smallerPtr = &dummy32; - break; - } /* beyond tree size, stop the search */ - smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */ - matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ - } else { - /* match is larger than curr */ - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) { - largerPtr = &dummy32; - break; - } /* beyond tree size, stop the search */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - } - } - - *smallerPtr = *largerPtr = 0; - - zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1; - return bestLength; -} - -static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls) -{ - const BYTE *const base = zc->base; - const U32 target = (U32)(ip - base); - U32 idx = zc->nextToUpdate; - - while (idx < target) - idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0); -} - -/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ -static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls) -{ - if (ip < zc->base + zc->nextToUpdate) - return 0; /* skipped area */ - ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); - return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0); -} - -static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */ - const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch) -{ - switch (matchLengthSearch) { - default: /* includes case 3 */ - case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); - case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); - case 7: - case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); - } -} - -static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls) -{ - const BYTE *const base = zc->base; - const U32 target = (U32)(ip - base); - U32 idx = zc->nextToUpdate; - - while (idx < target) - idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1); -} - -/** Tree updater, providing best match */ -static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, - const U32 mls) -{ - if (ip < zc->base + zc->nextToUpdate) - return 0; /* skipped area */ - ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); - return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1); -} - -static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */ - const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, - const U32 matchLengthSearch) -{ - switch (matchLengthSearch) { - default: /* includes case 3 */ - case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); - case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); - case 7: - case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); - } -} - -/* ********************************* -* Hash Chain -***********************************/ -#define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask] - -/* Update chains up to ip (excluded) - Assumption : always within prefix (i.e. not within extDict) */ -FORCE_INLINE -U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls) -{ - U32 *const hashTable = zc->hashTable; - const U32 hashLog = zc->params.cParams.hashLog; - U32 *const chainTable = zc->chainTable; - const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1; - const BYTE *const base = zc->base; - const U32 target = (U32)(ip - base); - U32 idx = zc->nextToUpdate; - - while (idx < target) { /* catch up */ - size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); - NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; - hashTable[h] = idx; - idx++; - } - - zc->nextToUpdate = target; - return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; -} - -/* inlining is important to hardwire a hot branch (template emulation) */ -FORCE_INLINE -size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */ - const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls, - const U32 extDict) -{ - U32 *const chainTable = zc->chainTable; - const U32 chainSize = (1 << zc->params.cParams.chainLog); - const U32 chainMask = chainSize - 1; - const BYTE *const base = zc->base; - const BYTE *const dictBase = zc->dictBase; - const U32 dictLimit = zc->dictLimit; - const BYTE *const prefixStart = base + dictLimit; - const BYTE *const dictEnd = dictBase + dictLimit; - const U32 lowLimit = zc->lowLimit; - const U32 curr = (U32)(ip - base); - const U32 minChain = curr > chainSize ? curr - chainSize : 0; - int nbAttempts = maxNbAttempts; - size_t ml = EQUAL_READ32 - 1; - - /* HC4 match finder */ - U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls); - - for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) { - const BYTE *match; - size_t currMl = 0; - if ((!extDict) || matchIndex >= dictLimit) { - match = base + matchIndex; - if (match[ml] == ip[ml]) /* potentially better */ - currMl = ZSTD_count(ip, match, iLimit); - } else { - match = dictBase + matchIndex; - if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ - currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32; - } - - /* save best solution */ - if (currMl > ml) { - ml = currMl; - *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE; - if (ip + currMl == iLimit) - break; /* best possible, and avoid read overflow*/ - } - - if (matchIndex <= minChain) - break; - matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); - } - - return ml; -} - -FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, - const U32 matchLengthSearch) -{ - switch (matchLengthSearch) { - default: /* includes case 3 */ - case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0); - case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0); - case 7: - case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0); - } -} - -FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, - const U32 matchLengthSearch) -{ - switch (matchLengthSearch) { - default: /* includes case 3 */ - case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1); - case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1); - case 7: - case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1); - } -} - -/* ******************************* -* Common parser - lazy strategy -*********************************/ -FORCE_INLINE -void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth) -{ - seqStore_t *seqStorePtr = &(ctx->seqStore); - const BYTE *const istart = (const BYTE *)src; - const BYTE *ip = istart; - const BYTE *anchor = istart; - const BYTE *const iend = istart + srcSize; - const BYTE *const ilimit = iend - 8; - const BYTE *const base = ctx->base + ctx->dictLimit; - - U32 const maxSearches = 1 << ctx->params.cParams.searchLog; - U32 const mls = ctx->params.cParams.searchLength; - - typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch); - searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS; - U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0; - - /* init */ - ip += (ip == base); - ctx->nextToUpdate3 = ctx->nextToUpdate; - { - U32 const maxRep = (U32)(ip - base); - if (offset_2 > maxRep) - savedOffset = offset_2, offset_2 = 0; - if (offset_1 > maxRep) - savedOffset = offset_1, offset_1 = 0; - } - - /* Match Loop */ - while (ip < ilimit) { - size_t matchLength = 0; - size_t offset = 0; - const BYTE *start = ip + 1; - - /* check repCode */ - if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) { - /* repcode : we take it */ - matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32; - if (depth == 0) - goto _storeSequence; - } - - /* first search (depth 0) */ - { - size_t offsetFound = 99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); - if (ml2 > matchLength) - matchLength = ml2, start = ip, offset = offsetFound; - } - - if (matchLength < EQUAL_READ32) { - ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ - continue; - } - - /* let's try to find a better solution */ - if (depth >= 1) - while (ip < ilimit) { - ip++; - if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) { - size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32; - int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1); - if ((mlRep >= EQUAL_READ32) && (gain2 > gain1)) - matchLength = mlRep, offset = 0, start = ip; - } - { - size_t offset2 = 99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); - int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ - int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4); - if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; - continue; /* search a better one */ - } - } - - /* let's find an even better one */ - if ((depth == 2) && (ip < ilimit)) { - ip++; - if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) { - size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32; - int const gain2 = (int)(ml2 * 4); - int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1); - if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) - matchLength = ml2, offset = 0, start = ip; - } - { - size_t offset2 = 99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); - int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ - int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7); - if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; - continue; - } - } - } - break; /* nothing found : store previous solution */ - } - - /* NOTE: - * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. - * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which - * overflows the pointer, which is undefined behavior. - */ - /* catch up */ - if (offset) { - while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) && - (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */ - { - start--; - matchLength++; - } - offset_2 = offset_1; - offset_1 = (U32)(offset - ZSTD_REP_MOVE); - } - - /* store sequence */ -_storeSequence: - { - size_t const litLength = start - anchor; - ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH); - anchor = ip = start + matchLength; - } - - /* check immediate repcode */ - while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) { - /* store sequence */ - matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32; - offset = offset_2; - offset_2 = offset_1; - offset_1 = (U32)offset; /* swap repcodes */ - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH); - ip += matchLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } - } - - /* Save reps for next block */ - ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset; - ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; - - /* Last Literals */ - { - size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - -static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); } - -static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); } - -static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); } - -static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); } - -FORCE_INLINE -void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth) -{ - seqStore_t *seqStorePtr = &(ctx->seqStore); - const BYTE *const istart = (const BYTE *)src; - const BYTE *ip = istart; - const BYTE *anchor = istart; - const BYTE *const iend = istart + srcSize; - const BYTE *const ilimit = iend - 8; - const BYTE *const base = ctx->base; - const U32 dictLimit = ctx->dictLimit; - const U32 lowestIndex = ctx->lowLimit; - const BYTE *const prefixStart = base + dictLimit; - const BYTE *const dictBase = ctx->dictBase; - const BYTE *const dictEnd = dictBase + dictLimit; - const BYTE *const dictStart = dictBase + ctx->lowLimit; - - const U32 maxSearches = 1 << ctx->params.cParams.searchLog; - const U32 mls = ctx->params.cParams.searchLength; - - typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch); - searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS; - - U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; - - /* init */ - ctx->nextToUpdate3 = ctx->nextToUpdate; - ip += (ip == prefixStart); - - /* Match Loop */ - while (ip < ilimit) { - size_t matchLength = 0; - size_t offset = 0; - const BYTE *start = ip + 1; - U32 curr = (U32)(ip - base); - - /* check repCode */ - { - const U32 repIndex = (U32)(curr + 1 - offset_1); - const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; - const BYTE *const repMatch = repBase + repIndex; - if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ - if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) { - /* repcode detected we should take it */ - const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; - matchLength = - ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; - if (depth == 0) - goto _storeSequence; - } - } - - /* first search (depth 0) */ - { - size_t offsetFound = 99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); - if (ml2 > matchLength) - matchLength = ml2, start = ip, offset = offsetFound; - } - - if (matchLength < EQUAL_READ32) { - ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ - continue; - } - - /* let's try to find a better solution */ - if (depth >= 1) - while (ip < ilimit) { - ip++; - curr++; - /* check repCode */ - if (offset) { - const U32 repIndex = (U32)(curr - offset_1); - const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; - const BYTE *const repMatch = repBase + repIndex; - if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ - if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) { - /* repcode detected */ - const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; - size_t const repLength = - ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + - EQUAL_READ32; - int const gain2 = (int)(repLength * 3); - int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1); - if ((repLength >= EQUAL_READ32) && (gain2 > gain1)) - matchLength = repLength, offset = 0, start = ip; - } - } - - /* search match, depth 1 */ - { - size_t offset2 = 99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); - int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ - int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4); - if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; - continue; /* search a better one */ - } - } - - /* let's find an even better one */ - if ((depth == 2) && (ip < ilimit)) { - ip++; - curr++; - /* check repCode */ - if (offset) { - const U32 repIndex = (U32)(curr - offset_1); - const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; - const BYTE *const repMatch = repBase + repIndex; - if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ - if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) { - /* repcode detected */ - const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; - size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, - repEnd, prefixStart) + - EQUAL_READ32; - int gain2 = (int)(repLength * 4); - int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1); - if ((repLength >= EQUAL_READ32) && (gain2 > gain1)) - matchLength = repLength, offset = 0, start = ip; - } - } - - /* search match, depth 2 */ - { - size_t offset2 = 99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); - int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ - int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7); - if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; - continue; - } - } - } - break; /* nothing found : store previous solution */ - } - - /* catch up */ - if (offset) { - U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE)); - const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; - const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; - while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) { - start--; - match--; - matchLength++; - } /* catch up */ - offset_2 = offset_1; - offset_1 = (U32)(offset - ZSTD_REP_MOVE); - } - - /* store sequence */ - _storeSequence : { - size_t const litLength = start - anchor; - ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH); - anchor = ip = start + matchLength; - } - - /* check immediate repcode */ - while (ip <= ilimit) { - const U32 repIndex = (U32)((ip - base) - offset_2); - const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; - const BYTE *const repMatch = repBase + repIndex; - if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ - if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) { - /* repcode detected we should take it */ - const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; - matchLength = - ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; - offset = offset_2; - offset_2 = offset_1; - offset_1 = (U32)offset; /* swap offset history */ - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH); - ip += matchLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } - break; - } - } - - /* Save reps for next block */ - ctx->repToConfirm[0] = offset_1; - ctx->repToConfirm[1] = offset_2; - - /* Last Literals */ - { - size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - -void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); } - -static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1); -} - -static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2); -} - -static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2); -} - -/* The optimal parser */ -#include "zstd_opt.h" - -static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ -#ifdef ZSTD_OPT_H_91842398743 - ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0); -#else - (void)ctx; - (void)src; - (void)srcSize; - return; -#endif -} - -static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ -#ifdef ZSTD_OPT_H_91842398743 - ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1); -#else - (void)ctx; - (void)src; - (void)srcSize; - return; -#endif -} - -static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ -#ifdef ZSTD_OPT_H_91842398743 - ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0); -#else - (void)ctx; - (void)src; - (void)srcSize; - return; -#endif -} - -static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) -{ -#ifdef ZSTD_OPT_H_91842398743 - ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1); -#else - (void)ctx; - (void)src; - (void)srcSize; - return; -#endif -} - -typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize); - -static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict) -{ - static const ZSTD_blockCompressor blockCompressor[2][8] = { - {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, - ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2}, - {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict, - ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}}; - - return blockCompressor[extDict][(U32)strat]; -} - -static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit); - const BYTE *const base = zc->base; - const BYTE *const istart = (const BYTE *)src; - const U32 curr = (U32)(istart - base); - if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1) - return 0; /* don't even attempt compression below a certain srcSize */ - ZSTD_resetSeqStore(&(zc->seqStore)); - if (curr > zc->nextToUpdate + 384) - zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */ - blockCompressor(zc, src, srcSize); - return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize); -} - -/*! ZSTD_compress_generic() : -* Compress a chunk of data into one or multiple blocks. -* All blocks will be terminated, all input will be consumed. -* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. -* Frame is supposed already started (header already produced) -* @return : compressed size, or an error code -*/ -static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk) -{ - size_t blockSize = cctx->blockSize; - size_t remaining = srcSize; - const BYTE *ip = (const BYTE *)src; - BYTE *const ostart = (BYTE *)dst; - BYTE *op = ostart; - U32 const maxDist = 1 << cctx->params.cParams.windowLog; - - if (cctx->params.fParams.checksumFlag && srcSize) - xxh64_update(&cctx->xxhState, src, srcSize); - - while (remaining) { - U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); - size_t cSize; - - if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) - return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */ - if (remaining < blockSize) - blockSize = remaining; - - /* preemptive overflow correction */ - if (cctx->lowLimit > (3U << 29)) { - U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1; - U32 const curr = (U32)(ip - cctx->base); - U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog); - U32 const correction = curr - newCurr; - ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30); - ZSTD_reduceIndex(cctx, correction); - cctx->base += correction; - cctx->dictBase += correction; - cctx->lowLimit -= correction; - cctx->dictLimit -= correction; - if (cctx->nextToUpdate < correction) - cctx->nextToUpdate = 0; - else - cctx->nextToUpdate -= correction; - } - - if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) { - /* enforce maxDist */ - U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist; - if (cctx->lowLimit < newLowLimit) - cctx->lowLimit = newLowLimit; - if (cctx->dictLimit < cctx->lowLimit) - cctx->dictLimit = cctx->lowLimit; - } - - cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize); - if (ZSTD_isError(cSize)) - return cSize; - - if (cSize == 0) { /* block is not compressible */ - U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3); - if (blockSize + ZSTD_blockHeaderSize > dstCapacity) - return ERROR(dstSize_tooSmall); - ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */ - memcpy(op + ZSTD_blockHeaderSize, ip, blockSize); - cSize = ZSTD_blockHeaderSize + blockSize; - } else { - U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3); - ZSTD_writeLE24(op, cBlockHeader24); - cSize += ZSTD_blockHeaderSize; - } - - remaining -= blockSize; - dstCapacity -= cSize; - ip += blockSize; - op += cSize; - } - - if (lastFrameChunk && (op > ostart)) - cctx->stage = ZSTDcs_ending; - return op - ostart; -} - -static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID) -{ - BYTE *const op = (BYTE *)dst; - U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */ - U32 const checksumFlag = params.fParams.checksumFlag > 0; - U32 const windowSize = 1U << params.cParams.windowLog; - U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); - BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); - U32 const fcsCode = - params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */ - BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6)); - size_t pos; - - if (dstCapacity < ZSTD_frameHeaderSize_max) - return ERROR(dstSize_tooSmall); - - ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER); - op[4] = frameHeaderDescriptionByte; - pos = 5; - if (!singleSegment) - op[pos++] = windowLogByte; - switch (dictIDSizeCode) { - default: /* impossible */ - case 0: break; - case 1: - op[pos] = (BYTE)(dictID); - pos++; - break; - case 2: - ZSTD_writeLE16(op + pos, (U16)dictID); - pos += 2; - break; - case 3: - ZSTD_writeLE32(op + pos, dictID); - pos += 4; - break; - } - switch (fcsCode) { - default: /* impossible */ - case 0: - if (singleSegment) - op[pos++] = (BYTE)(pledgedSrcSize); - break; - case 1: - ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256)); - pos += 2; - break; - case 2: - ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize)); - pos += 4; - break; - case 3: - ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize)); - pos += 8; - break; - } - return pos; -} - -static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk) -{ - const BYTE *const ip = (const BYTE *)src; - size_t fhSize = 0; - - if (cctx->stage == ZSTDcs_created) - return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ - - if (frame && (cctx->stage == ZSTDcs_init)) { - fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID); - if (ZSTD_isError(fhSize)) - return fhSize; - dstCapacity -= fhSize; - dst = (char *)dst + fhSize; - cctx->stage = ZSTDcs_ongoing; - } - - /* Check if blocks follow each other */ - if (src != cctx->nextSrc) { - /* not contiguous */ - ptrdiff_t const delta = cctx->nextSrc - ip; - cctx->lowLimit = cctx->dictLimit; - cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base); - cctx->dictBase = cctx->base; - cctx->base -= delta; - cctx->nextToUpdate = cctx->dictLimit; - if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) - cctx->lowLimit = cctx->dictLimit; /* too small extDict */ - } - - /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ - if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) { - ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase; - U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx; - cctx->lowLimit = lowLimitMax; - } - - cctx->nextSrc = ip + srcSize; - - if (srcSize) { - size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) - : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize); - if (ZSTD_isError(cSize)) - return cSize; - return cSize + fhSize; - } else - return fhSize; -} - -size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0); -} - -size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); } - -size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx); - if (srcSize > blockSizeMax) - return ERROR(srcSize_wrong); - return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0); -} - -/*! ZSTD_loadDictionaryContent() : - * @return : 0, or an error code - */ -static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize) -{ - const BYTE *const ip = (const BYTE *)src; - const BYTE *const iend = ip + srcSize; - - /* input becomes curr prefix */ - zc->lowLimit = zc->dictLimit; - zc->dictLimit = (U32)(zc->nextSrc - zc->base); - zc->dictBase = zc->base; - zc->base += ip - zc->nextSrc; - zc->nextToUpdate = zc->dictLimit; - zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base); - - zc->nextSrc = iend; - if (srcSize <= HASH_READ_SIZE) - return 0; - - switch (zc->params.cParams.strategy) { - case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break; - - case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break; - - case ZSTD_greedy: - case ZSTD_lazy: - case ZSTD_lazy2: - if (srcSize >= HASH_READ_SIZE) - ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength); - break; - - case ZSTD_btlazy2: - case ZSTD_btopt: - case ZSTD_btopt2: - if (srcSize >= HASH_READ_SIZE) - ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength); - break; - - default: - return ERROR(GENERIC); /* strategy doesn't exist; impossible */ - } - - zc->nextToUpdate = (U32)(iend - zc->base); - return 0; -} - -/* Dictionaries that assign zero probability to symbols that show up causes problems - when FSE encoding. Refuse dictionaries that assign zero probability to symbols - that we may encounter during compression. - NOTE: This behavior is not standard and could be improved in the future. */ -static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) -{ - U32 s; - if (dictMaxSymbolValue < maxSymbolValue) - return ERROR(dictionary_corrupted); - for (s = 0; s <= maxSymbolValue; ++s) { - if (normalizedCounter[s] == 0) - return ERROR(dictionary_corrupted); - } - return 0; -} - -/* Dictionary format : - * See : - * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format - */ -/*! ZSTD_loadZstdDictionary() : - * @return : 0, or an error code - * assumptions : magic number supposed already checked - * dictSize supposed > 8 - */ -static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize) -{ - const BYTE *dictPtr = (const BYTE *)dict; - const BYTE *const dictEnd = dictPtr + dictSize; - short offcodeNCount[MaxOff + 1]; - unsigned offcodeMaxValue = MaxOff; - - dictPtr += 4; /* skip magic number */ - cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr); - dictPtr += 4; - - { - size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters)); - if (HUF_isError(hufHeaderSize)) - return ERROR(dictionary_corrupted); - dictPtr += hufHeaderSize; - } - - { - unsigned offcodeLog; - size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr); - if (FSE_isError(offcodeHeaderSize)) - return ERROR(dictionary_corrupted); - if (offcodeLog > OffFSELog) - return ERROR(dictionary_corrupted); - /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ - CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)), - dictionary_corrupted); - dictPtr += offcodeHeaderSize; - } - - { - short matchlengthNCount[MaxML + 1]; - unsigned matchlengthMaxValue = MaxML, matchlengthLog; - size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr); - if (FSE_isError(matchlengthHeaderSize)) - return ERROR(dictionary_corrupted); - if (matchlengthLog > MLFSELog) - return ERROR(dictionary_corrupted); - /* Every match length code must have non-zero probability */ - CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML)); - CHECK_E( - FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)), - dictionary_corrupted); - dictPtr += matchlengthHeaderSize; - } - - { - short litlengthNCount[MaxLL + 1]; - unsigned litlengthMaxValue = MaxLL, litlengthLog; - size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr); - if (FSE_isError(litlengthHeaderSize)) - return ERROR(dictionary_corrupted); - if (litlengthLog > LLFSELog) - return ERROR(dictionary_corrupted); - /* Every literal length code must have non-zero probability */ - CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL)); - CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)), - dictionary_corrupted); - dictPtr += litlengthHeaderSize; - } - - if (dictPtr + 12 > dictEnd) - return ERROR(dictionary_corrupted); - cctx->rep[0] = ZSTD_readLE32(dictPtr + 0); - cctx->rep[1] = ZSTD_readLE32(dictPtr + 4); - cctx->rep[2] = ZSTD_readLE32(dictPtr + 8); - dictPtr += 12; - - { - size_t const dictContentSize = (size_t)(dictEnd - dictPtr); - U32 offcodeMax = MaxOff; - if (dictContentSize <= ((U32)-1) - 128 KB) { - U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ - offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ - } - /* All offset values <= dictContentSize + 128 KB must be representable */ - CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); - /* All repCodes must be <= dictContentSize and != 0*/ - { - U32 u; - for (u = 0; u < 3; u++) { - if (cctx->rep[u] == 0) - return ERROR(dictionary_corrupted); - if (cctx->rep[u] > dictContentSize) - return ERROR(dictionary_corrupted); - } - } - - cctx->flagStaticTables = 1; - cctx->flagStaticHufTable = HUF_repeat_valid; - return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize); - } -} - -/** ZSTD_compress_insertDictionary() : -* @return : 0, or an error code */ -static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize) -{ - if ((dict == NULL) || (dictSize <= 8)) - return 0; - - /* dict as pure content */ - if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict)) - return ZSTD_loadDictionaryContent(cctx, dict, dictSize); - - /* dict as zstd dictionary */ - return ZSTD_loadZstdDictionary(cctx, dict, dictSize); -} - -/*! ZSTD_compressBegin_internal() : -* @return : 0, or an error code */ -static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize) -{ - ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue; - CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp)); - return ZSTD_compress_insertDictionary(cctx, dict, dictSize); -} - -/*! ZSTD_compressBegin_advanced() : -* @return : 0, or an error code */ -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) -{ - /* compression parameters verification and optimization */ - CHECK_F(ZSTD_checkCParams(params.cParams)); - return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize); -} - -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel) -{ - ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize); - return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0); -} - -size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); } - -/*! ZSTD_writeEpilogue() : -* Ends a frame. -* @return : nb of bytes written into dst (or an error code) */ -static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity) -{ - BYTE *const ostart = (BYTE *)dst; - BYTE *op = ostart; - size_t fhSize = 0; - - if (cctx->stage == ZSTDcs_created) - return ERROR(stage_wrong); /* init missing */ - - /* special case : empty frame */ - if (cctx->stage == ZSTDcs_init) { - fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0); - if (ZSTD_isError(fhSize)) - return fhSize; - dstCapacity -= fhSize; - op += fhSize; - cctx->stage = ZSTDcs_ongoing; - } - - if (cctx->stage != ZSTDcs_ending) { - /* write one last empty block, make it the "last" block */ - U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0; - if (dstCapacity < 4) - return ERROR(dstSize_tooSmall); - ZSTD_writeLE32(op, cBlockHeader24); - op += ZSTD_blockHeaderSize; - dstCapacity -= ZSTD_blockHeaderSize; - } - - if (cctx->params.fParams.checksumFlag) { - U32 const checksum = (U32)xxh64_digest(&cctx->xxhState); - if (dstCapacity < 4) - return ERROR(dstSize_tooSmall); - ZSTD_writeLE32(op, checksum); - op += 4; - } - - cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ - return op - ostart; -} - -size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - size_t endResult; - size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1); - if (ZSTD_isError(cSize)) - return cSize; - endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize); - if (ZSTD_isError(endResult)) - return endResult; - return cSize + endResult; -} - -static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, - ZSTD_parameters params) -{ - CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize)); - return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); -} - -size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, - ZSTD_parameters params) -{ - return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); -} - -size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params) -{ - return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params); -} - -/* ===== Dictionary API ===== */ - -struct ZSTD_CDict_s { - void *dictBuffer; - const void *dictContent; - size_t dictContentSize; - ZSTD_CCtx *refContext; -}; /* typedef'd tp ZSTD_CDict within "zstd.h" */ - -size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); } - -static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem) -{ - if (!customMem.customAlloc || !customMem.customFree) - return NULL; - - { - ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem); - ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem); - - if (!cdict || !cctx) { - ZSTD_free(cdict, customMem); - ZSTD_freeCCtx(cctx); - return NULL; - } - - if ((byReference) || (!dictBuffer) || (!dictSize)) { - cdict->dictBuffer = NULL; - cdict->dictContent = dictBuffer; - } else { - void *const internalBuffer = ZSTD_malloc(dictSize, customMem); - if (!internalBuffer) { - ZSTD_free(cctx, customMem); - ZSTD_free(cdict, customMem); - return NULL; - } - memcpy(internalBuffer, dictBuffer, dictSize); - cdict->dictBuffer = internalBuffer; - cdict->dictContent = internalBuffer; - } - - { - size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); - if (ZSTD_isError(errorCode)) { - ZSTD_free(cdict->dictBuffer, customMem); - ZSTD_free(cdict, customMem); - ZSTD_freeCCtx(cctx); - return NULL; - } - } - - cdict->refContext = cctx; - cdict->dictContentSize = dictSize; - return cdict; - } -} - -ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize) -{ - ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); - return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem); -} - -size_t ZSTD_freeCDict(ZSTD_CDict *cdict) -{ - if (cdict == NULL) - return 0; /* support free on NULL */ - { - ZSTD_customMem const cMem = cdict->refContext->customMem; - ZSTD_freeCCtx(cdict->refContext); - ZSTD_free(cdict->dictBuffer, cMem); - ZSTD_free(cdict, cMem); - return 0; - } -} - -static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); } - -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize) -{ - if (cdict->dictContentSize) - CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize)) - else { - ZSTD_parameters params = cdict->refContext->params; - params.fParams.contentSizeFlag = (pledgedSrcSize > 0); - CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize)); - } - return 0; -} - -/*! ZSTD_compress_usingCDict() : -* Compression using a digested Dictionary. -* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. -* Note that compression level is decided during dictionary creation */ -size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict) -{ - CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize)); - - if (cdict->refContext->params.fParams.contentSizeFlag == 1) { - cctx->params.fParams.contentSizeFlag = 1; - cctx->frameContentSize = srcSize; - } else { - cctx->params.fParams.contentSizeFlag = 0; - } - - return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); -} - -/* ****************************************************************** -* Streaming -********************************************************************/ - -typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage; - -struct ZSTD_CStream_s { - ZSTD_CCtx *cctx; - ZSTD_CDict *cdictLocal; - const ZSTD_CDict *cdict; - char *inBuff; - size_t inBuffSize; - size_t inToCompress; - size_t inBuffPos; - size_t inBuffTarget; - size_t blockSize; - char *outBuff; - size_t outBuffSize; - size_t outBuffContentSize; - size_t outBuffFlushedSize; - ZSTD_cStreamStage stage; - U32 checksum; - U32 frameEnded; - U64 pledgedSrcSize; - U64 inputProcessed; - ZSTD_parameters params; - ZSTD_customMem customMem; -}; /* typedef'd to ZSTD_CStream within "zstd.h" */ - -size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams) -{ - size_t const inBuffSize = (size_t)1 << cParams.windowLog; - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize); - size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; - - return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize); -} - -ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem) -{ - ZSTD_CStream *zcs; - - if (!customMem.customAlloc || !customMem.customFree) - return NULL; - - zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem); - if (zcs == NULL) - return NULL; - memset(zcs, 0, sizeof(ZSTD_CStream)); - memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem)); - zcs->cctx = ZSTD_createCCtx_advanced(customMem); - if (zcs->cctx == NULL) { - ZSTD_freeCStream(zcs); - return NULL; - } - return zcs; -} - -size_t ZSTD_freeCStream(ZSTD_CStream *zcs) -{ - if (zcs == NULL) - return 0; /* support free on NULL */ - { - ZSTD_customMem const cMem = zcs->customMem; - ZSTD_freeCCtx(zcs->cctx); - zcs->cctx = NULL; - ZSTD_freeCDict(zcs->cdictLocal); - zcs->cdictLocal = NULL; - ZSTD_free(zcs->inBuff, cMem); - zcs->inBuff = NULL; - ZSTD_free(zcs->outBuff, cMem); - zcs->outBuff = NULL; - ZSTD_free(zcs, cMem); - return 0; - } -} - -/*====== Initialization ======*/ - -size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } -size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; } - -static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize) -{ - if (zcs->inBuffSize == 0) - return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */ - - if (zcs->cdict) - CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize)) - else - CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize)); - - zcs->inToCompress = 0; - zcs->inBuffPos = 0; - zcs->inBuffTarget = zcs->blockSize; - zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; - zcs->stage = zcss_load; - zcs->frameEnded = 0; - zcs->pledgedSrcSize = pledgedSrcSize; - zcs->inputProcessed = 0; - return 0; /* ready to go */ -} - -size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize) -{ - - zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0); - - return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); -} - -static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) -{ - /* allocate buffers */ - { - size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog; - if (zcs->inBuffSize < neededInBuffSize) { - zcs->inBuffSize = neededInBuffSize; - ZSTD_free(zcs->inBuff, zcs->customMem); - zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem); - if (zcs->inBuff == NULL) - return ERROR(memory_allocation); - } - zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize); - } - if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) { - zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1; - ZSTD_free(zcs->outBuff, zcs->customMem); - zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem); - if (zcs->outBuff == NULL) - return ERROR(memory_allocation); - } - - if (dict && dictSize >= 8) { - ZSTD_freeCDict(zcs->cdictLocal); - zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem); - if (zcs->cdictLocal == NULL) - return ERROR(memory_allocation); - zcs->cdict = zcs->cdictLocal; - } else - zcs->cdict = NULL; - - zcs->checksum = params.fParams.checksumFlag > 0; - zcs->params = params; - - return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); -} - -ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize) -{ - ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); - ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem); - if (zcs) { - size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize); - if (ZSTD_isError(code)) { - return NULL; - } - } - return zcs; -} - -ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize) -{ - ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict); - ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize); - if (zcs) { - zcs->cdict = cdict; - if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) { - return NULL; - } - } - return zcs; -} - -/*====== Compression ======*/ - -typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e; - -ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - size_t const length = MIN(dstCapacity, srcSize); - memcpy(dst, src, length); - return length; -} - -static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush) -{ - U32 someMoreWork = 1; - const char *const istart = (const char *)src; - const char *const iend = istart + *srcSizePtr; - const char *ip = istart; - char *const ostart = (char *)dst; - char *const oend = ostart + *dstCapacityPtr; - char *op = ostart; - - while (someMoreWork) { - switch (zcs->stage) { - case zcss_init: - return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */ - - case zcss_load: - /* complete inBuffer */ - { - size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; - size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip); - zcs->inBuffPos += loaded; - ip += loaded; - if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) { - someMoreWork = 0; - break; /* not enough input to get a full block : stop there, wait for more */ - } - } - /* compress curr block (note : this stage cannot be stopped in the middle) */ - { - void *cDst; - size_t cSize; - size_t const iSize = zcs->inBuffPos - zcs->inToCompress; - size_t oSize = oend - op; - if (oSize >= ZSTD_compressBound(iSize)) - cDst = op; /* compress directly into output buffer (avoid flush stage) */ - else - cDst = zcs->outBuff, oSize = zcs->outBuffSize; - cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) - : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); - if (ZSTD_isError(cSize)) - return cSize; - if (flush == zsf_end) - zcs->frameEnded = 1; - /* prepare next block */ - zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; - if (zcs->inBuffTarget > zcs->inBuffSize) - zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */ - zcs->inToCompress = zcs->inBuffPos; - if (cDst == op) { - op += cSize; - break; - } /* no need to flush */ - zcs->outBuffContentSize = cSize; - zcs->outBuffFlushedSize = 0; - zcs->stage = zcss_flush; /* pass-through to flush stage */ - } - - case zcss_flush: { - size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; - size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); - op += flushed; - zcs->outBuffFlushedSize += flushed; - if (toFlush != flushed) { - someMoreWork = 0; - break; - } /* dst too small to store flushed data : stop there */ - zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; - zcs->stage = zcss_load; - break; - } - - case zcss_final: - someMoreWork = 0; /* do nothing */ - break; - - default: - return ERROR(GENERIC); /* impossible */ - } - } - - *srcSizePtr = ip - istart; - *dstCapacityPtr = op - ostart; - zcs->inputProcessed += *srcSizePtr; - if (zcs->frameEnded) - return 0; - { - size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; - if (hintInSize == 0) - hintInSize = zcs->blockSize; - return hintInSize; - } -} - -size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input) -{ - size_t sizeRead = input->size - input->pos; - size_t sizeWritten = output->size - output->pos; - size_t const result = - ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather); - input->pos += sizeRead; - output->pos += sizeWritten; - return result; -} - -/*====== Finalize ======*/ - -/*! ZSTD_flushStream() : -* @return : amount of data remaining to flush */ -size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output) -{ - size_t srcSize = 0; - size_t sizeWritten = output->size - output->pos; - size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize, - &srcSize, /* use a valid src address instead of NULL */ - zsf_flush); - output->pos += sizeWritten; - if (ZSTD_isError(result)) - return result; - return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */ -} - -size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output) -{ - BYTE *const ostart = (BYTE *)(output->dst) + output->pos; - BYTE *const oend = (BYTE *)(output->dst) + output->size; - BYTE *op = ostart; - - if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize)) - return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */ - - if (zcs->stage != zcss_final) { - /* flush whatever remains */ - size_t srcSize = 0; - size_t sizeWritten = output->size - output->pos; - size_t const notEnded = - ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */ - size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; - op += sizeWritten; - if (remainingToFlush) { - output->pos += sizeWritten; - return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4); - } - /* create epilogue */ - zcs->stage = zcss_final; - zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL, - 0); /* write epilogue, including final empty block, into outBuff */ - } - - /* flush epilogue */ - { - size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; - size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); - op += flushed; - zcs->outBuffFlushedSize += flushed; - output->pos += op - ostart; - if (toFlush == flushed) - zcs->stage = zcss_init; /* end reached */ - return toFlush - flushed; - } -} - -/*-===== Pre-defined compression levels =====-*/ - -#define ZSTD_DEFAULT_CLEVEL 1 -#define ZSTD_MAX_CLEVEL 22 -int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } - -static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = { - { - /* "default" */ - /* W, C, H, S, L, TL, strat */ - {18, 12, 12, 1, 7, 16, ZSTD_fast}, /* level 0 - never used */ - {19, 13, 14, 1, 7, 16, ZSTD_fast}, /* level 1 */ - {19, 15, 16, 1, 6, 16, ZSTD_fast}, /* level 2 */ - {20, 16, 17, 1, 5, 16, ZSTD_dfast}, /* level 3.*/ - {20, 18, 18, 1, 5, 16, ZSTD_dfast}, /* level 4.*/ - {20, 15, 18, 3, 5, 16, ZSTD_greedy}, /* level 5 */ - {21, 16, 19, 2, 5, 16, ZSTD_lazy}, /* level 6 */ - {21, 17, 20, 3, 5, 16, ZSTD_lazy}, /* level 7 */ - {21, 18, 20, 3, 5, 16, ZSTD_lazy2}, /* level 8 */ - {21, 20, 20, 3, 5, 16, ZSTD_lazy2}, /* level 9 */ - {21, 19, 21, 4, 5, 16, ZSTD_lazy2}, /* level 10 */ - {22, 20, 22, 4, 5, 16, ZSTD_lazy2}, /* level 11 */ - {22, 20, 22, 5, 5, 16, ZSTD_lazy2}, /* level 12 */ - {22, 21, 22, 5, 5, 16, ZSTD_lazy2}, /* level 13 */ - {22, 21, 22, 6, 5, 16, ZSTD_lazy2}, /* level 14 */ - {22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */ - {23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */ - {23, 21, 22, 4, 5, 24, ZSTD_btopt}, /* level 17 */ - {23, 23, 22, 6, 5, 32, ZSTD_btopt}, /* level 18 */ - {23, 23, 22, 6, 3, 48, ZSTD_btopt}, /* level 19 */ - {25, 25, 23, 7, 3, 64, ZSTD_btopt2}, /* level 20 */ - {26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */ - {27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */ - }, - { - /* for srcSize <= 256 KB */ - /* W, C, H, S, L, T, strat */ - {0, 0, 0, 0, 0, 0, ZSTD_fast}, /* level 0 - not used */ - {18, 13, 14, 1, 6, 8, ZSTD_fast}, /* level 1 */ - {18, 14, 13, 1, 5, 8, ZSTD_dfast}, /* level 2 */ - {18, 16, 15, 1, 5, 8, ZSTD_dfast}, /* level 3 */ - {18, 15, 17, 1, 5, 8, ZSTD_greedy}, /* level 4.*/ - {18, 16, 17, 4, 5, 8, ZSTD_greedy}, /* level 5.*/ - {18, 16, 17, 3, 5, 8, ZSTD_lazy}, /* level 6.*/ - {18, 17, 17, 4, 4, 8, ZSTD_lazy}, /* level 7 */ - {18, 17, 17, 4, 4, 8, ZSTD_lazy2}, /* level 8 */ - {18, 17, 17, 5, 4, 8, ZSTD_lazy2}, /* level 9 */ - {18, 17, 17, 6, 4, 8, ZSTD_lazy2}, /* level 10 */ - {18, 18, 17, 6, 4, 8, ZSTD_lazy2}, /* level 11.*/ - {18, 18, 17, 7, 4, 8, ZSTD_lazy2}, /* level 12.*/ - {18, 19, 17, 6, 4, 8, ZSTD_btlazy2}, /* level 13 */ - {18, 18, 18, 4, 4, 16, ZSTD_btopt}, /* level 14.*/ - {18, 18, 18, 4, 3, 16, ZSTD_btopt}, /* level 15.*/ - {18, 19, 18, 6, 3, 32, ZSTD_btopt}, /* level 16.*/ - {18, 19, 18, 8, 3, 64, ZSTD_btopt}, /* level 17.*/ - {18, 19, 18, 9, 3, 128, ZSTD_btopt}, /* level 18.*/ - {18, 19, 18, 10, 3, 256, ZSTD_btopt}, /* level 19.*/ - {18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/ - {18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/ - {18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/ - }, - { - /* for srcSize <= 128 KB */ - /* W, C, H, S, L, T, strat */ - {17, 12, 12, 1, 7, 8, ZSTD_fast}, /* level 0 - not used */ - {17, 12, 13, 1, 6, 8, ZSTD_fast}, /* level 1 */ - {17, 13, 16, 1, 5, 8, ZSTD_fast}, /* level 2 */ - {17, 16, 16, 2, 5, 8, ZSTD_dfast}, /* level 3 */ - {17, 13, 15, 3, 4, 8, ZSTD_greedy}, /* level 4 */ - {17, 15, 17, 4, 4, 8, ZSTD_greedy}, /* level 5 */ - {17, 16, 17, 3, 4, 8, ZSTD_lazy}, /* level 6 */ - {17, 15, 17, 4, 4, 8, ZSTD_lazy2}, /* level 7 */ - {17, 17, 17, 4, 4, 8, ZSTD_lazy2}, /* level 8 */ - {17, 17, 17, 5, 4, 8, ZSTD_lazy2}, /* level 9 */ - {17, 17, 17, 6, 4, 8, ZSTD_lazy2}, /* level 10 */ - {17, 17, 17, 7, 4, 8, ZSTD_lazy2}, /* level 11 */ - {17, 17, 17, 8, 4, 8, ZSTD_lazy2}, /* level 12 */ - {17, 18, 17, 6, 4, 8, ZSTD_btlazy2}, /* level 13.*/ - {17, 17, 17, 7, 3, 8, ZSTD_btopt}, /* level 14.*/ - {17, 17, 17, 7, 3, 16, ZSTD_btopt}, /* level 15.*/ - {17, 18, 17, 7, 3, 32, ZSTD_btopt}, /* level 16.*/ - {17, 18, 17, 7, 3, 64, ZSTD_btopt}, /* level 17.*/ - {17, 18, 17, 7, 3, 256, ZSTD_btopt}, /* level 18.*/ - {17, 18, 17, 8, 3, 256, ZSTD_btopt}, /* level 19.*/ - {17, 18, 17, 9, 3, 256, ZSTD_btopt2}, /* level 20.*/ - {17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/ - {17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/ - }, - { - /* for srcSize <= 16 KB */ - /* W, C, H, S, L, T, strat */ - {14, 12, 12, 1, 7, 6, ZSTD_fast}, /* level 0 - not used */ - {14, 14, 14, 1, 6, 6, ZSTD_fast}, /* level 1 */ - {14, 14, 14, 1, 4, 6, ZSTD_fast}, /* level 2 */ - {14, 14, 14, 1, 4, 6, ZSTD_dfast}, /* level 3.*/ - {14, 14, 14, 4, 4, 6, ZSTD_greedy}, /* level 4.*/ - {14, 14, 14, 3, 4, 6, ZSTD_lazy}, /* level 5.*/ - {14, 14, 14, 4, 4, 6, ZSTD_lazy2}, /* level 6 */ - {14, 14, 14, 5, 4, 6, ZSTD_lazy2}, /* level 7 */ - {14, 14, 14, 6, 4, 6, ZSTD_lazy2}, /* level 8.*/ - {14, 15, 14, 6, 4, 6, ZSTD_btlazy2}, /* level 9.*/ - {14, 15, 14, 3, 3, 6, ZSTD_btopt}, /* level 10.*/ - {14, 15, 14, 6, 3, 8, ZSTD_btopt}, /* level 11.*/ - {14, 15, 14, 6, 3, 16, ZSTD_btopt}, /* level 12.*/ - {14, 15, 14, 6, 3, 24, ZSTD_btopt}, /* level 13.*/ - {14, 15, 15, 6, 3, 48, ZSTD_btopt}, /* level 14.*/ - {14, 15, 15, 6, 3, 64, ZSTD_btopt}, /* level 15.*/ - {14, 15, 15, 6, 3, 96, ZSTD_btopt}, /* level 16.*/ - {14, 15, 15, 6, 3, 128, ZSTD_btopt}, /* level 17.*/ - {14, 15, 15, 6, 3, 256, ZSTD_btopt}, /* level 18.*/ - {14, 15, 15, 7, 3, 256, ZSTD_btopt}, /* level 19.*/ - {14, 15, 15, 8, 3, 256, ZSTD_btopt2}, /* level 20.*/ - {14, 15, 15, 9, 3, 256, ZSTD_btopt2}, /* level 21.*/ - {14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/ - }, -}; - -/*! ZSTD_getCParams() : -* @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`. -* Size values are optional, provide 0 if not known or unused */ -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) -{ - ZSTD_compressionParameters cp; - size_t const addedSize = srcSize ? 0 : 500; - U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1; - U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */ - if (compressionLevel <= 0) - compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */ - if (compressionLevel > ZSTD_MAX_CLEVEL) - compressionLevel = ZSTD_MAX_CLEVEL; - cp = ZSTD_defaultCParameters[tableID][compressionLevel]; - if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */ - if (cp.windowLog > ZSTD_WINDOWLOG_MAX) - cp.windowLog = ZSTD_WINDOWLOG_MAX; - if (cp.chainLog > ZSTD_CHAINLOG_MAX) - cp.chainLog = ZSTD_CHAINLOG_MAX; - if (cp.hashLog > ZSTD_HASHLOG_MAX) - cp.hashLog = ZSTD_HASHLOG_MAX; - } - cp = ZSTD_adjustCParams(cp, srcSize, dictSize); - return cp; -} - -/*! ZSTD_getParams() : -* same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`). -* All fields of `ZSTD_frameParameters` are set to default (0) */ -ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) -{ - ZSTD_parameters params; - ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize); - memset(¶ms, 0, sizeof(params)); - params.cParams = cParams; - return params; -} - -EXPORT_SYMBOL(ZSTD_maxCLevel); -EXPORT_SYMBOL(ZSTD_compressBound); - -EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound); -EXPORT_SYMBOL(ZSTD_initCCtx); -EXPORT_SYMBOL(ZSTD_compressCCtx); -EXPORT_SYMBOL(ZSTD_compress_usingDict); - -EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound); -EXPORT_SYMBOL(ZSTD_initCDict); -EXPORT_SYMBOL(ZSTD_compress_usingCDict); - -EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound); -EXPORT_SYMBOL(ZSTD_initCStream); -EXPORT_SYMBOL(ZSTD_initCStream_usingCDict); -EXPORT_SYMBOL(ZSTD_resetCStream); -EXPORT_SYMBOL(ZSTD_compressStream); -EXPORT_SYMBOL(ZSTD_flushStream); -EXPORT_SYMBOL(ZSTD_endStream); -EXPORT_SYMBOL(ZSTD_CStreamInSize); -EXPORT_SYMBOL(ZSTD_CStreamOutSize); - -EXPORT_SYMBOL(ZSTD_getCParams); -EXPORT_SYMBOL(ZSTD_getParams); -EXPORT_SYMBOL(ZSTD_checkCParams); -EXPORT_SYMBOL(ZSTD_adjustCParams); - -EXPORT_SYMBOL(ZSTD_compressBegin); -EXPORT_SYMBOL(ZSTD_compressBegin_usingDict); -EXPORT_SYMBOL(ZSTD_compressBegin_advanced); -EXPORT_SYMBOL(ZSTD_copyCCtx); -EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict); -EXPORT_SYMBOL(ZSTD_compressContinue); -EXPORT_SYMBOL(ZSTD_compressEnd); - -EXPORT_SYMBOL(ZSTD_getBlockSizeMax); -EXPORT_SYMBOL(ZSTD_compressBlock); - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("Zstd Compressor"); diff --git a/contrib/linux-kernel/lib/zstd/decompress.c b/contrib/linux-kernel/lib/zstd/decompress.c deleted file mode 100644 index 72df4828dc8..00000000000 --- a/contrib/linux-kernel/lib/zstd/decompress.c +++ /dev/null @@ -1,2526 +0,0 @@ -/** - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of https://github.com/facebook/zstd. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - */ - -/* *************************************************************** -* Tuning parameters -*****************************************************************/ -/*! -* MAXWINDOWSIZE_DEFAULT : -* maximum window size accepted by DStream, by default. -* Frames requiring more memory will be rejected. -*/ -#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT -#define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */ -#endif - -/*-******************************************************* -* Dependencies -*********************************************************/ -#include "fse.h" -#include "huf.h" -#include "mem.h" /* low level memory routines */ -#include "zstd_internal.h" -#include -#include -#include /* memcpy, memmove, memset */ - -#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0) - -/*-************************************* -* Macros -***************************************/ -#define ZSTD_isError ERR_isError /* for inlining */ -#define FSE_isError ERR_isError -#define HUF_isError ERR_isError - -/*_******************************************************* -* Memory operations -**********************************************************/ -static void ZSTD_copy4(void *dst, const void *src) { memcpy(dst, src, 4); } - -/*-************************************************************* -* Context management -***************************************************************/ -typedef enum { - ZSTDds_getFrameHeaderSize, - ZSTDds_decodeFrameHeader, - ZSTDds_decodeBlockHeader, - ZSTDds_decompressBlock, - ZSTDds_decompressLastBlock, - ZSTDds_checkChecksum, - ZSTDds_decodeSkippableHeader, - ZSTDds_skipFrame -} ZSTD_dStage; - -typedef struct { - FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; - FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; - FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)]; - HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ - U64 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32 / 2]; - U32 rep[ZSTD_REP_NUM]; -} ZSTD_entropyTables_t; - -struct ZSTD_DCtx_s { - const FSE_DTable *LLTptr; - const FSE_DTable *MLTptr; - const FSE_DTable *OFTptr; - const HUF_DTable *HUFptr; - ZSTD_entropyTables_t entropy; - const void *previousDstEnd; /* detect continuity */ - const void *base; /* start of curr segment */ - const void *vBase; /* virtual start of previous segment if it was just before curr one */ - const void *dictEnd; /* end of previous segment */ - size_t expected; - ZSTD_frameParams fParams; - blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */ - ZSTD_dStage stage; - U32 litEntropy; - U32 fseEntropy; - struct xxh64_state xxhState; - size_t headerSize; - U32 dictID; - const BYTE *litPtr; - ZSTD_customMem customMem; - size_t litSize; - size_t rleSize; - BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH]; - BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; -}; /* typedef'd to ZSTD_DCtx within "zstd.h" */ - -size_t ZSTD_DCtxWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); } - -size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx) -{ - dctx->expected = ZSTD_frameHeaderSize_prefix; - dctx->stage = ZSTDds_getFrameHeaderSize; - dctx->previousDstEnd = NULL; - dctx->base = NULL; - dctx->vBase = NULL; - dctx->dictEnd = NULL; - dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ - dctx->litEntropy = dctx->fseEntropy = 0; - dctx->dictID = 0; - ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); - memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ - dctx->LLTptr = dctx->entropy.LLTable; - dctx->MLTptr = dctx->entropy.MLTable; - dctx->OFTptr = dctx->entropy.OFTable; - dctx->HUFptr = dctx->entropy.hufTable; - return 0; -} - -ZSTD_DCtx *ZSTD_createDCtx_advanced(ZSTD_customMem customMem) -{ - ZSTD_DCtx *dctx; - - if (!customMem.customAlloc || !customMem.customFree) - return NULL; - - dctx = (ZSTD_DCtx *)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem); - if (!dctx) - return NULL; - memcpy(&dctx->customMem, &customMem, sizeof(customMem)); - ZSTD_decompressBegin(dctx); - return dctx; -} - -ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize) -{ - ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); - return ZSTD_createDCtx_advanced(stackMem); -} - -size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx) -{ - if (dctx == NULL) - return 0; /* support free on NULL */ - ZSTD_free(dctx, dctx->customMem); - return 0; /* reserved as a potential error code in the future */ -} - -void ZSTD_copyDCtx(ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx) -{ - size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max; - memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */ -} - -static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict); - -/*-************************************************************* -* Decompression section -***************************************************************/ - -/*! ZSTD_isFrame() : - * Tells if the content of `buffer` starts with a valid Frame Identifier. - * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. - * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. - * Note 3 : Skippable Frame Identifiers are considered valid. */ -unsigned ZSTD_isFrame(const void *buffer, size_t size) -{ - if (size < 4) - return 0; - { - U32 const magic = ZSTD_readLE32(buffer); - if (magic == ZSTD_MAGICNUMBER) - return 1; - if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) - return 1; - } - return 0; -} - -/** ZSTD_frameHeaderSize() : -* srcSize must be >= ZSTD_frameHeaderSize_prefix. -* @return : size of the Frame Header */ -static size_t ZSTD_frameHeaderSize(const void *src, size_t srcSize) -{ - if (srcSize < ZSTD_frameHeaderSize_prefix) - return ERROR(srcSize_wrong); - { - BYTE const fhd = ((const BYTE *)src)[4]; - U32 const dictID = fhd & 3; - U32 const singleSegment = (fhd >> 5) & 1; - U32 const fcsId = fhd >> 6; - return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId); - } -} - -/** ZSTD_getFrameParams() : -* decode Frame Header, or require larger `srcSize`. -* @return : 0, `fparamsPtr` is correctly filled, -* >0, `srcSize` is too small, result is expected `srcSize`, -* or an error code, which can be tested using ZSTD_isError() */ -size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, size_t srcSize) -{ - const BYTE *ip = (const BYTE *)src; - - if (srcSize < ZSTD_frameHeaderSize_prefix) - return ZSTD_frameHeaderSize_prefix; - if (ZSTD_readLE32(src) != ZSTD_MAGICNUMBER) { - if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { - if (srcSize < ZSTD_skippableHeaderSize) - return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */ - memset(fparamsPtr, 0, sizeof(*fparamsPtr)); - fparamsPtr->frameContentSize = ZSTD_readLE32((const char *)src + 4); - fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */ - return 0; - } - return ERROR(prefix_unknown); - } - - /* ensure there is enough `srcSize` to fully read/decode frame header */ - { - size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize); - if (srcSize < fhsize) - return fhsize; - } - - { - BYTE const fhdByte = ip[4]; - size_t pos = 5; - U32 const dictIDSizeCode = fhdByte & 3; - U32 const checksumFlag = (fhdByte >> 2) & 1; - U32 const singleSegment = (fhdByte >> 5) & 1; - U32 const fcsID = fhdByte >> 6; - U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; - U32 windowSize = 0; - U32 dictID = 0; - U64 frameContentSize = 0; - if ((fhdByte & 0x08) != 0) - return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */ - if (!singleSegment) { - BYTE const wlByte = ip[pos++]; - U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; - if (windowLog > ZSTD_WINDOWLOG_MAX) - return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */ - windowSize = (1U << windowLog); - windowSize += (windowSize >> 3) * (wlByte & 7); - } - - switch (dictIDSizeCode) { - default: /* impossible */ - case 0: break; - case 1: - dictID = ip[pos]; - pos++; - break; - case 2: - dictID = ZSTD_readLE16(ip + pos); - pos += 2; - break; - case 3: - dictID = ZSTD_readLE32(ip + pos); - pos += 4; - break; - } - switch (fcsID) { - default: /* impossible */ - case 0: - if (singleSegment) - frameContentSize = ip[pos]; - break; - case 1: frameContentSize = ZSTD_readLE16(ip + pos) + 256; break; - case 2: frameContentSize = ZSTD_readLE32(ip + pos); break; - case 3: frameContentSize = ZSTD_readLE64(ip + pos); break; - } - if (!windowSize) - windowSize = (U32)frameContentSize; - if (windowSize > windowSizeMax) - return ERROR(frameParameter_windowTooLarge); - fparamsPtr->frameContentSize = frameContentSize; - fparamsPtr->windowSize = windowSize; - fparamsPtr->dictID = dictID; - fparamsPtr->checksumFlag = checksumFlag; - } - return 0; -} - -/** ZSTD_getFrameContentSize() : -* compatible with legacy mode -* @return : decompressed size of the single frame pointed to be `src` if known, otherwise -* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined -* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) -{ - { - ZSTD_frameParams fParams; - if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0) - return ZSTD_CONTENTSIZE_ERROR; - if (fParams.windowSize == 0) { - /* Either skippable or empty frame, size == 0 either way */ - return 0; - } else if (fParams.frameContentSize != 0) { - return fParams.frameContentSize; - } else { - return ZSTD_CONTENTSIZE_UNKNOWN; - } - } -} - -/** ZSTD_findDecompressedSize() : - * compatible with legacy mode - * `srcSize` must be the exact length of some number of ZSTD compressed and/or - * skippable frames - * @return : decompressed size of the frames contained */ -unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize) -{ - { - unsigned long long totalDstSize = 0; - while (srcSize >= ZSTD_frameHeaderSize_prefix) { - const U32 magicNumber = ZSTD_readLE32(src); - - if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { - size_t skippableSize; - if (srcSize < ZSTD_skippableHeaderSize) - return ERROR(srcSize_wrong); - skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize; - if (srcSize < skippableSize) { - return ZSTD_CONTENTSIZE_ERROR; - } - - src = (const BYTE *)src + skippableSize; - srcSize -= skippableSize; - continue; - } - - { - unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); - if (ret >= ZSTD_CONTENTSIZE_ERROR) - return ret; - - /* check for overflow */ - if (totalDstSize + ret < totalDstSize) - return ZSTD_CONTENTSIZE_ERROR; - totalDstSize += ret; - } - { - size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); - if (ZSTD_isError(frameSrcSize)) { - return ZSTD_CONTENTSIZE_ERROR; - } - - src = (const BYTE *)src + frameSrcSize; - srcSize -= frameSrcSize; - } - } - - if (srcSize) { - return ZSTD_CONTENTSIZE_ERROR; - } - - return totalDstSize; - } -} - -/** ZSTD_decodeFrameHeader() : -* `headerSize` must be the size provided by ZSTD_frameHeaderSize(). -* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ -static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx *dctx, const void *src, size_t headerSize) -{ - size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize); - if (ZSTD_isError(result)) - return result; /* invalid header */ - if (result > 0) - return ERROR(srcSize_wrong); /* headerSize too small */ - if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) - return ERROR(dictionary_wrong); - if (dctx->fParams.checksumFlag) - xxh64_reset(&dctx->xxhState, 0); - return 0; -} - -typedef struct { - blockType_e blockType; - U32 lastBlock; - U32 origSize; -} blockProperties_t; - -/*! ZSTD_getcBlockSize() : -* Provides the size of compressed block from block header `src` */ -size_t ZSTD_getcBlockSize(const void *src, size_t srcSize, blockProperties_t *bpPtr) -{ - if (srcSize < ZSTD_blockHeaderSize) - return ERROR(srcSize_wrong); - { - U32 const cBlockHeader = ZSTD_readLE24(src); - U32 const cSize = cBlockHeader >> 3; - bpPtr->lastBlock = cBlockHeader & 1; - bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); - bpPtr->origSize = cSize; /* only useful for RLE */ - if (bpPtr->blockType == bt_rle) - return 1; - if (bpPtr->blockType == bt_reserved) - return ERROR(corruption_detected); - return cSize; - } -} - -static size_t ZSTD_copyRawBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - if (srcSize > dstCapacity) - return ERROR(dstSize_tooSmall); - memcpy(dst, src, srcSize); - return srcSize; -} - -static size_t ZSTD_setRleBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, size_t regenSize) -{ - if (srcSize != 1) - return ERROR(srcSize_wrong); - if (regenSize > dstCapacity) - return ERROR(dstSize_tooSmall); - memset(dst, *(const BYTE *)src, regenSize); - return regenSize; -} - -/*! ZSTD_decodeLiteralsBlock() : - @return : nb of bytes read from src (< srcSize ) */ -size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ -{ - if (srcSize < MIN_CBLOCK_SIZE) - return ERROR(corruption_detected); - - { - const BYTE *const istart = (const BYTE *)src; - symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); - - switch (litEncType) { - case set_repeat: - if (dctx->litEntropy == 0) - return ERROR(dictionary_corrupted); - /* fall-through */ - case set_compressed: - if (srcSize < 5) - return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ - { - size_t lhSize, litSize, litCSize; - U32 singleStream = 0; - U32 const lhlCode = (istart[0] >> 2) & 3; - U32 const lhc = ZSTD_readLE32(istart); - switch (lhlCode) { - case 0: - case 1: - default: /* note : default is impossible, since lhlCode into [0..3] */ - /* 2 - 2 - 10 - 10 */ - singleStream = !lhlCode; - lhSize = 3; - litSize = (lhc >> 4) & 0x3FF; - litCSize = (lhc >> 14) & 0x3FF; - break; - case 2: - /* 2 - 2 - 14 - 14 */ - lhSize = 4; - litSize = (lhc >> 4) & 0x3FFF; - litCSize = lhc >> 18; - break; - case 3: - /* 2 - 2 - 18 - 18 */ - lhSize = 5; - litSize = (lhc >> 4) & 0x3FFFF; - litCSize = (lhc >> 22) + (istart[4] << 10); - break; - } - if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) - return ERROR(corruption_detected); - if (litCSize + lhSize > srcSize) - return ERROR(corruption_detected); - - if (HUF_isError( - (litEncType == set_repeat) - ? (singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr) - : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr)) - : (singleStream - ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize, - dctx->entropy.workspace, sizeof(dctx->entropy.workspace)) - : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize, - dctx->entropy.workspace, sizeof(dctx->entropy.workspace))))) - return ERROR(corruption_detected); - - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - dctx->litEntropy = 1; - if (litEncType == set_compressed) - dctx->HUFptr = dctx->entropy.hufTable; - memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); - return litCSize + lhSize; - } - - case set_basic: { - size_t litSize, lhSize; - U32 const lhlCode = ((istart[0]) >> 2) & 3; - switch (lhlCode) { - case 0: - case 2: - default: /* note : default is impossible, since lhlCode into [0..3] */ - lhSize = 1; - litSize = istart[0] >> 3; - break; - case 1: - lhSize = 2; - litSize = ZSTD_readLE16(istart) >> 4; - break; - case 3: - lhSize = 3; - litSize = ZSTD_readLE24(istart) >> 4; - break; - } - - if (lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ - if (litSize + lhSize > srcSize) - return ERROR(corruption_detected); - memcpy(dctx->litBuffer, istart + lhSize, litSize); - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); - return lhSize + litSize; - } - /* direct reference into compressed stream */ - dctx->litPtr = istart + lhSize; - dctx->litSize = litSize; - return lhSize + litSize; - } - - case set_rle: { - U32 const lhlCode = ((istart[0]) >> 2) & 3; - size_t litSize, lhSize; - switch (lhlCode) { - case 0: - case 2: - default: /* note : default is impossible, since lhlCode into [0..3] */ - lhSize = 1; - litSize = istart[0] >> 3; - break; - case 1: - lhSize = 2; - litSize = ZSTD_readLE16(istart) >> 4; - break; - case 3: - lhSize = 3; - litSize = ZSTD_readLE24(istart) >> 4; - if (srcSize < 4) - return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ - break; - } - if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) - return ERROR(corruption_detected); - memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - return lhSize + 1; - } - default: - return ERROR(corruption_detected); /* impossible */ - } - } -} - -typedef union { - FSE_decode_t realData; - U32 alignedBy4; -} FSE_decode_t4; - -static const FSE_decode_t4 LL_defaultDTable[(1 << LL_DEFAULTNORMLOG) + 1] = { - {{LL_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */ - {{0, 0, 4}}, /* 0 : base, symbol, bits */ - {{16, 0, 4}}, - {{32, 1, 5}}, - {{0, 3, 5}}, - {{0, 4, 5}}, - {{0, 6, 5}}, - {{0, 7, 5}}, - {{0, 9, 5}}, - {{0, 10, 5}}, - {{0, 12, 5}}, - {{0, 14, 6}}, - {{0, 16, 5}}, - {{0, 18, 5}}, - {{0, 19, 5}}, - {{0, 21, 5}}, - {{0, 22, 5}}, - {{0, 24, 5}}, - {{32, 25, 5}}, - {{0, 26, 5}}, - {{0, 27, 6}}, - {{0, 29, 6}}, - {{0, 31, 6}}, - {{32, 0, 4}}, - {{0, 1, 4}}, - {{0, 2, 5}}, - {{32, 4, 5}}, - {{0, 5, 5}}, - {{32, 7, 5}}, - {{0, 8, 5}}, - {{32, 10, 5}}, - {{0, 11, 5}}, - {{0, 13, 6}}, - {{32, 16, 5}}, - {{0, 17, 5}}, - {{32, 19, 5}}, - {{0, 20, 5}}, - {{32, 22, 5}}, - {{0, 23, 5}}, - {{0, 25, 4}}, - {{16, 25, 4}}, - {{32, 26, 5}}, - {{0, 28, 6}}, - {{0, 30, 6}}, - {{48, 0, 4}}, - {{16, 1, 4}}, - {{32, 2, 5}}, - {{32, 3, 5}}, - {{32, 5, 5}}, - {{32, 6, 5}}, - {{32, 8, 5}}, - {{32, 9, 5}}, - {{32, 11, 5}}, - {{32, 12, 5}}, - {{0, 15, 6}}, - {{32, 17, 5}}, - {{32, 18, 5}}, - {{32, 20, 5}}, - {{32, 21, 5}}, - {{32, 23, 5}}, - {{32, 24, 5}}, - {{0, 35, 6}}, - {{0, 34, 6}}, - {{0, 33, 6}}, - {{0, 32, 6}}, -}; /* LL_defaultDTable */ - -static const FSE_decode_t4 ML_defaultDTable[(1 << ML_DEFAULTNORMLOG) + 1] = { - {{ML_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */ - {{0, 0, 6}}, /* 0 : base, symbol, bits */ - {{0, 1, 4}}, - {{32, 2, 5}}, - {{0, 3, 5}}, - {{0, 5, 5}}, - {{0, 6, 5}}, - {{0, 8, 5}}, - {{0, 10, 6}}, - {{0, 13, 6}}, - {{0, 16, 6}}, - {{0, 19, 6}}, - {{0, 22, 6}}, - {{0, 25, 6}}, - {{0, 28, 6}}, - {{0, 31, 6}}, - {{0, 33, 6}}, - {{0, 35, 6}}, - {{0, 37, 6}}, - {{0, 39, 6}}, - {{0, 41, 6}}, - {{0, 43, 6}}, - {{0, 45, 6}}, - {{16, 1, 4}}, - {{0, 2, 4}}, - {{32, 3, 5}}, - {{0, 4, 5}}, - {{32, 6, 5}}, - {{0, 7, 5}}, - {{0, 9, 6}}, - {{0, 12, 6}}, - {{0, 15, 6}}, - {{0, 18, 6}}, - {{0, 21, 6}}, - {{0, 24, 6}}, - {{0, 27, 6}}, - {{0, 30, 6}}, - {{0, 32, 6}}, - {{0, 34, 6}}, - {{0, 36, 6}}, - {{0, 38, 6}}, - {{0, 40, 6}}, - {{0, 42, 6}}, - {{0, 44, 6}}, - {{32, 1, 4}}, - {{48, 1, 4}}, - {{16, 2, 4}}, - {{32, 4, 5}}, - {{32, 5, 5}}, - {{32, 7, 5}}, - {{32, 8, 5}}, - {{0, 11, 6}}, - {{0, 14, 6}}, - {{0, 17, 6}}, - {{0, 20, 6}}, - {{0, 23, 6}}, - {{0, 26, 6}}, - {{0, 29, 6}}, - {{0, 52, 6}}, - {{0, 51, 6}}, - {{0, 50, 6}}, - {{0, 49, 6}}, - {{0, 48, 6}}, - {{0, 47, 6}}, - {{0, 46, 6}}, -}; /* ML_defaultDTable */ - -static const FSE_decode_t4 OF_defaultDTable[(1 << OF_DEFAULTNORMLOG) + 1] = { - {{OF_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */ - {{0, 0, 5}}, /* 0 : base, symbol, bits */ - {{0, 6, 4}}, - {{0, 9, 5}}, - {{0, 15, 5}}, - {{0, 21, 5}}, - {{0, 3, 5}}, - {{0, 7, 4}}, - {{0, 12, 5}}, - {{0, 18, 5}}, - {{0, 23, 5}}, - {{0, 5, 5}}, - {{0, 8, 4}}, - {{0, 14, 5}}, - {{0, 20, 5}}, - {{0, 2, 5}}, - {{16, 7, 4}}, - {{0, 11, 5}}, - {{0, 17, 5}}, - {{0, 22, 5}}, - {{0, 4, 5}}, - {{16, 8, 4}}, - {{0, 13, 5}}, - {{0, 19, 5}}, - {{0, 1, 5}}, - {{16, 6, 4}}, - {{0, 10, 5}}, - {{0, 16, 5}}, - {{0, 28, 5}}, - {{0, 27, 5}}, - {{0, 26, 5}}, - {{0, 25, 5}}, - {{0, 24, 5}}, -}; /* OF_defaultDTable */ - -/*! ZSTD_buildSeqTable() : - @return : nb bytes read from src, - or an error code if it fails, testable with ZSTD_isError() -*/ -static size_t ZSTD_buildSeqTable(FSE_DTable *DTableSpace, const FSE_DTable **DTablePtr, symbolEncodingType_e type, U32 max, U32 maxLog, const void *src, - size_t srcSize, const FSE_decode_t4 *defaultTable, U32 flagRepeatTable, void *workspace, size_t workspaceSize) -{ - const void *const tmpPtr = defaultTable; /* bypass strict aliasing */ - switch (type) { - case set_rle: - if (!srcSize) - return ERROR(srcSize_wrong); - if ((*(const BYTE *)src) > max) - return ERROR(corruption_detected); - FSE_buildDTable_rle(DTableSpace, *(const BYTE *)src); - *DTablePtr = DTableSpace; - return 1; - case set_basic: *DTablePtr = (const FSE_DTable *)tmpPtr; return 0; - case set_repeat: - if (!flagRepeatTable) - return ERROR(corruption_detected); - return 0; - default: /* impossible */ - case set_compressed: { - U32 tableLog; - S16 *norm = (S16 *)workspace; - size_t const spaceUsed32 = ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2; - - if ((spaceUsed32 << 2) > workspaceSize) - return ERROR(GENERIC); - workspace = (U32 *)workspace + spaceUsed32; - workspaceSize -= (spaceUsed32 << 2); - { - size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); - if (FSE_isError(headerSize)) - return ERROR(corruption_detected); - if (tableLog > maxLog) - return ERROR(corruption_detected); - FSE_buildDTable_wksp(DTableSpace, norm, max, tableLog, workspace, workspaceSize); - *DTablePtr = DTableSpace; - return headerSize; - } - } - } -} - -size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize) -{ - const BYTE *const istart = (const BYTE *const)src; - const BYTE *const iend = istart + srcSize; - const BYTE *ip = istart; - - /* check */ - if (srcSize < MIN_SEQUENCES_SIZE) - return ERROR(srcSize_wrong); - - /* SeqHead */ - { - int nbSeq = *ip++; - if (!nbSeq) { - *nbSeqPtr = 0; - return 1; - } - if (nbSeq > 0x7F) { - if (nbSeq == 0xFF) { - if (ip + 2 > iend) - return ERROR(srcSize_wrong); - nbSeq = ZSTD_readLE16(ip) + LONGNBSEQ, ip += 2; - } else { - if (ip >= iend) - return ERROR(srcSize_wrong); - nbSeq = ((nbSeq - 0x80) << 8) + *ip++; - } - } - *nbSeqPtr = nbSeq; - } - - /* FSE table descriptors */ - if (ip + 4 > iend) - return ERROR(srcSize_wrong); /* minimum possible size */ - { - symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); - symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); - symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); - ip++; - - /* Build DTables */ - { - size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend - ip, - LL_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace)); - if (ZSTD_isError(llhSize)) - return ERROR(corruption_detected); - ip += llhSize; - } - { - size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend - ip, - OF_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace)); - if (ZSTD_isError(ofhSize)) - return ERROR(corruption_detected); - ip += ofhSize; - } - { - size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend - ip, - ML_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace)); - if (ZSTD_isError(mlhSize)) - return ERROR(corruption_detected); - ip += mlhSize; - } - } - - return ip - istart; -} - -typedef struct { - size_t litLength; - size_t matchLength; - size_t offset; - const BYTE *match; -} seq_t; - -typedef struct { - BIT_DStream_t DStream; - FSE_DState_t stateLL; - FSE_DState_t stateOffb; - FSE_DState_t stateML; - size_t prevOffset[ZSTD_REP_NUM]; - const BYTE *base; - size_t pos; - uPtrDiff gotoDict; -} seqState_t; - -FORCE_NOINLINE -size_t ZSTD_execSequenceLast7(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base, - const BYTE *const vBase, const BYTE *const dictEnd) -{ - BYTE *const oLitEnd = op + sequence.litLength; - size_t const sequenceLength = sequence.litLength + sequence.matchLength; - BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ - BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH; - const BYTE *const iLitEnd = *litPtr + sequence.litLength; - const BYTE *match = oLitEnd - sequence.offset; - - /* check */ - if (oMatchEnd > oend) - return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ - if (iLitEnd > litLimit) - return ERROR(corruption_detected); /* over-read beyond lit buffer */ - if (oLitEnd <= oend_w) - return ERROR(GENERIC); /* Precondition */ - - /* copy literals */ - if (op < oend_w) { - ZSTD_wildcopy(op, *litPtr, oend_w - op); - *litPtr += oend_w - op; - op = oend_w; - } - while (op < oLitEnd) - *op++ = *(*litPtr)++; - - /* copy Match */ - if (sequence.offset > (size_t)(oLitEnd - base)) { - /* offset beyond prefix */ - if (sequence.offset > (size_t)(oLitEnd - vBase)) - return ERROR(corruption_detected); - match = dictEnd - (base - match); - if (match + sequence.matchLength <= dictEnd) { - memmove(oLitEnd, match, sequence.matchLength); - return sequenceLength; - } - /* span extDict & currPrefixSegment */ - { - size_t const length1 = dictEnd - match; - memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = base; - } - } - while (op < oMatchEnd) - *op++ = *match++; - return sequenceLength; -} - -static seq_t ZSTD_decodeSequence(seqState_t *seqState) -{ - seq_t seq; - - U32 const llCode = FSE_peekSymbol(&seqState->stateLL); - U32 const mlCode = FSE_peekSymbol(&seqState->stateML); - U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ - - U32 const llBits = LL_bits[llCode]; - U32 const mlBits = ML_bits[mlCode]; - U32 const ofBits = ofCode; - U32 const totalBits = llBits + mlBits + ofBits; - - static const U32 LL_base[MaxLL + 1] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, - 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000}; - - static const U32 ML_base[MaxML + 1] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, - 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003}; - - static const U32 OF_base[MaxOff + 1] = {0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, - 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, - 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD}; - - /* sequence */ - { - size_t offset; - if (!ofCode) - offset = 0; - else { - offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ - if (ZSTD_32bits()) - BIT_reloadDStream(&seqState->DStream); - } - - if (ofCode <= 1) { - offset += (llCode == 0); - if (offset) { - size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; - temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ - if (offset != 1) - seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset = temp; - } else { - offset = seqState->prevOffset[0]; - } - } else { - seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset; - } - seq.offset = offset; - } - - seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ - if (ZSTD_32bits() && (mlBits + llBits > 24)) - BIT_reloadDStream(&seqState->DStream); - - seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ - if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog))) - BIT_reloadDStream(&seqState->DStream); - - /* ANS state update */ - FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ - FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ - if (ZSTD_32bits()) - BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ - FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ - - seq.match = NULL; - - return seq; -} - -FORCE_INLINE -size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base, - const BYTE *const vBase, const BYTE *const dictEnd) -{ - BYTE *const oLitEnd = op + sequence.litLength; - size_t const sequenceLength = sequence.litLength + sequence.matchLength; - BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ - BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH; - const BYTE *const iLitEnd = *litPtr + sequence.litLength; - const BYTE *match = oLitEnd - sequence.offset; - - /* check */ - if (oMatchEnd > oend) - return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ - if (iLitEnd > litLimit) - return ERROR(corruption_detected); /* over-read beyond lit buffer */ - if (oLitEnd > oend_w) - return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); - - /* copy Literals */ - ZSTD_copy8(op, *litPtr); - if (sequence.litLength > 8) - ZSTD_wildcopy(op + 8, (*litPtr) + 8, - sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ - op = oLitEnd; - *litPtr = iLitEnd; /* update for next sequence */ - - /* copy Match */ - if (sequence.offset > (size_t)(oLitEnd - base)) { - /* offset beyond prefix */ - if (sequence.offset > (size_t)(oLitEnd - vBase)) - return ERROR(corruption_detected); - match = dictEnd + (match - base); - if (match + sequence.matchLength <= dictEnd) { - memmove(oLitEnd, match, sequence.matchLength); - return sequenceLength; - } - /* span extDict & currPrefixSegment */ - { - size_t const length1 = dictEnd - match; - memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = base; - if (op > oend_w || sequence.matchLength < MINMATCH) { - U32 i; - for (i = 0; i < sequence.matchLength; ++i) - op[i] = match[i]; - return sequenceLength; - } - } - } - /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ - - /* match within prefix */ - if (sequence.offset < 8) { - /* close range match, overlap */ - static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ - static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */ - int const sub2 = dec64table[sequence.offset]; - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += dec32table[sequence.offset]; - ZSTD_copy4(op + 4, match); - match -= sub2; - } else { - ZSTD_copy8(op, match); - } - op += 8; - match += 8; - - if (oMatchEnd > oend - (16 - MINMATCH)) { - if (op < oend_w) { - ZSTD_wildcopy(op, match, oend_w - op); - match += oend_w - op; - op = oend_w; - } - while (op < oMatchEnd) - *op++ = *match++; - } else { - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */ - } - return sequenceLength; -} - -static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize) -{ - const BYTE *ip = (const BYTE *)seqStart; - const BYTE *const iend = ip + seqSize; - BYTE *const ostart = (BYTE * const)dst; - BYTE *const oend = ostart + maxDstSize; - BYTE *op = ostart; - const BYTE *litPtr = dctx->litPtr; - const BYTE *const litEnd = litPtr + dctx->litSize; - const BYTE *const base = (const BYTE *)(dctx->base); - const BYTE *const vBase = (const BYTE *)(dctx->vBase); - const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd); - int nbSeq; - - /* Build Decoding Tables */ - { - size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); - if (ZSTD_isError(seqHSize)) - return seqHSize; - ip += seqHSize; - } - - /* Regen sequences */ - if (nbSeq) { - seqState_t seqState; - dctx->fseEntropy = 1; - { - U32 i; - for (i = 0; i < ZSTD_REP_NUM; i++) - seqState.prevOffset[i] = dctx->entropy.rep[i]; - } - CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected); - FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); - FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); - FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); - - for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq;) { - nbSeq--; - { - seq_t const sequence = ZSTD_decodeSequence(&seqState); - size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); - if (ZSTD_isError(oneSeqSize)) - return oneSeqSize; - op += oneSeqSize; - } - } - - /* check if reached exact end */ - if (nbSeq) - return ERROR(corruption_detected); - /* save reps for next block */ - { - U32 i; - for (i = 0; i < ZSTD_REP_NUM; i++) - dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); - } - } - - /* last literal segment */ - { - size_t const lastLLSize = litEnd - litPtr; - if (lastLLSize > (size_t)(oend - op)) - return ERROR(dstSize_tooSmall); - memcpy(op, litPtr, lastLLSize); - op += lastLLSize; - } - - return op - ostart; -} - -FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t *seqState, int const longOffsets) -{ - seq_t seq; - - U32 const llCode = FSE_peekSymbol(&seqState->stateLL); - U32 const mlCode = FSE_peekSymbol(&seqState->stateML); - U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ - - U32 const llBits = LL_bits[llCode]; - U32 const mlBits = ML_bits[mlCode]; - U32 const ofBits = ofCode; - U32 const totalBits = llBits + mlBits + ofBits; - - static const U32 LL_base[MaxLL + 1] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, - 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000}; - - static const U32 ML_base[MaxML + 1] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, - 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003}; - - static const U32 OF_base[MaxOff + 1] = {0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, - 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, - 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD}; - - /* sequence */ - { - size_t offset; - if (!ofCode) - offset = 0; - else { - if (longOffsets) { - int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN); - offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); - if (ZSTD_32bits() || extraBits) - BIT_reloadDStream(&seqState->DStream); - if (extraBits) - offset += BIT_readBitsFast(&seqState->DStream, extraBits); - } else { - offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ - if (ZSTD_32bits()) - BIT_reloadDStream(&seqState->DStream); - } - } - - if (ofCode <= 1) { - offset += (llCode == 0); - if (offset) { - size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; - temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ - if (offset != 1) - seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset = temp; - } else { - offset = seqState->prevOffset[0]; - } - } else { - seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset; - } - seq.offset = offset; - } - - seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ - if (ZSTD_32bits() && (mlBits + llBits > 24)) - BIT_reloadDStream(&seqState->DStream); - - seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ - if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog))) - BIT_reloadDStream(&seqState->DStream); - - { - size_t const pos = seqState->pos + seq.litLength; - seq.match = seqState->base + pos - seq.offset; /* single memory segment */ - if (seq.offset > pos) - seq.match += seqState->gotoDict; /* separate memory segment */ - seqState->pos = pos + seq.matchLength; - } - - /* ANS state update */ - FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ - FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ - if (ZSTD_32bits()) - BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ - FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ - - return seq; -} - -static seq_t ZSTD_decodeSequenceLong(seqState_t *seqState, unsigned const windowSize) -{ - if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) { - return ZSTD_decodeSequenceLong_generic(seqState, 1); - } else { - return ZSTD_decodeSequenceLong_generic(seqState, 0); - } -} - -FORCE_INLINE -size_t ZSTD_execSequenceLong(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base, - const BYTE *const vBase, const BYTE *const dictEnd) -{ - BYTE *const oLitEnd = op + sequence.litLength; - size_t const sequenceLength = sequence.litLength + sequence.matchLength; - BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ - BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH; - const BYTE *const iLitEnd = *litPtr + sequence.litLength; - const BYTE *match = sequence.match; - - /* check */ - if (oMatchEnd > oend) - return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ - if (iLitEnd > litLimit) - return ERROR(corruption_detected); /* over-read beyond lit buffer */ - if (oLitEnd > oend_w) - return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); - - /* copy Literals */ - ZSTD_copy8(op, *litPtr); - if (sequence.litLength > 8) - ZSTD_wildcopy(op + 8, (*litPtr) + 8, - sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ - op = oLitEnd; - *litPtr = iLitEnd; /* update for next sequence */ - - /* copy Match */ - if (sequence.offset > (size_t)(oLitEnd - base)) { - /* offset beyond prefix */ - if (sequence.offset > (size_t)(oLitEnd - vBase)) - return ERROR(corruption_detected); - if (match + sequence.matchLength <= dictEnd) { - memmove(oLitEnd, match, sequence.matchLength); - return sequenceLength; - } - /* span extDict & currPrefixSegment */ - { - size_t const length1 = dictEnd - match; - memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = base; - if (op > oend_w || sequence.matchLength < MINMATCH) { - U32 i; - for (i = 0; i < sequence.matchLength; ++i) - op[i] = match[i]; - return sequenceLength; - } - } - } - /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ - - /* match within prefix */ - if (sequence.offset < 8) { - /* close range match, overlap */ - static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ - static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */ - int const sub2 = dec64table[sequence.offset]; - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += dec32table[sequence.offset]; - ZSTD_copy4(op + 4, match); - match -= sub2; - } else { - ZSTD_copy8(op, match); - } - op += 8; - match += 8; - - if (oMatchEnd > oend - (16 - MINMATCH)) { - if (op < oend_w) { - ZSTD_wildcopy(op, match, oend_w - op); - match += oend_w - op; - op = oend_w; - } - while (op < oMatchEnd) - *op++ = *match++; - } else { - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */ - } - return sequenceLength; -} - -static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize) -{ - const BYTE *ip = (const BYTE *)seqStart; - const BYTE *const iend = ip + seqSize; - BYTE *const ostart = (BYTE * const)dst; - BYTE *const oend = ostart + maxDstSize; - BYTE *op = ostart; - const BYTE *litPtr = dctx->litPtr; - const BYTE *const litEnd = litPtr + dctx->litSize; - const BYTE *const base = (const BYTE *)(dctx->base); - const BYTE *const vBase = (const BYTE *)(dctx->vBase); - const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd); - unsigned const windowSize = dctx->fParams.windowSize; - int nbSeq; - - /* Build Decoding Tables */ - { - size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); - if (ZSTD_isError(seqHSize)) - return seqHSize; - ip += seqHSize; - } - - /* Regen sequences */ - if (nbSeq) { -#define STORED_SEQS 4 -#define STOSEQ_MASK (STORED_SEQS - 1) -#define ADVANCED_SEQS 4 - seq_t *sequences = (seq_t *)dctx->entropy.workspace; - int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); - seqState_t seqState; - int seqNb; - ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.workspace) >= sizeof(seq_t) * STORED_SEQS); - dctx->fseEntropy = 1; - { - U32 i; - for (i = 0; i < ZSTD_REP_NUM; i++) - seqState.prevOffset[i] = dctx->entropy.rep[i]; - } - seqState.base = base; - seqState.pos = (size_t)(op - base); - seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */ - CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected); - FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); - FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); - FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); - - /* prepare in advance */ - for (seqNb = 0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb < seqAdvance; seqNb++) { - sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize); - } - if (seqNb < seqAdvance) - return ERROR(corruption_detected); - - /* decode and decompress */ - for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb < nbSeq; seqNb++) { - seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize); - size_t const oneSeqSize = - ZSTD_execSequenceLong(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd); - if (ZSTD_isError(oneSeqSize)) - return oneSeqSize; - ZSTD_PREFETCH(sequence.match); - sequences[seqNb & STOSEQ_MASK] = sequence; - op += oneSeqSize; - } - if (seqNb < nbSeq) - return ERROR(corruption_detected); - - /* finish queue */ - seqNb -= seqAdvance; - for (; seqNb < nbSeq; seqNb++) { - size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd); - if (ZSTD_isError(oneSeqSize)) - return oneSeqSize; - op += oneSeqSize; - } - - /* save reps for next block */ - { - U32 i; - for (i = 0; i < ZSTD_REP_NUM; i++) - dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); - } - } - - /* last literal segment */ - { - size_t const lastLLSize = litEnd - litPtr; - if (lastLLSize > (size_t)(oend - op)) - return ERROR(dstSize_tooSmall); - memcpy(op, litPtr, lastLLSize); - op += lastLLSize; - } - - return op - ostart; -} - -static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ /* blockType == blockCompressed */ - const BYTE *ip = (const BYTE *)src; - - if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) - return ERROR(srcSize_wrong); - - /* Decode literals section */ - { - size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); - if (ZSTD_isError(litCSize)) - return litCSize; - ip += litCSize; - srcSize -= litCSize; - } - if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */ - /* likely because of register pressure */ - /* if that's the correct cause, then 32-bits ARM should be affected differently */ - /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */ - if (dctx->fParams.windowSize > (1 << 23)) - return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize); - return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize); -} - -static void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst) -{ - if (dst != dctx->previousDstEnd) { /* not contiguous */ - dctx->dictEnd = dctx->previousDstEnd; - dctx->vBase = (const char *)dst - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base)); - dctx->base = dst; - dctx->previousDstEnd = dst; - } -} - -size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - size_t dSize; - ZSTD_checkContinuity(dctx, dst); - dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); - dctx->previousDstEnd = (char *)dst + dSize; - return dSize; -} - -/** ZSTD_insertBlock() : - insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ -size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize) -{ - ZSTD_checkContinuity(dctx, blockStart); - dctx->previousDstEnd = (const char *)blockStart + blockSize; - return blockSize; -} - -size_t ZSTD_generateNxBytes(void *dst, size_t dstCapacity, BYTE byte, size_t length) -{ - if (length > dstCapacity) - return ERROR(dstSize_tooSmall); - memset(dst, byte, length); - return length; -} - -/** ZSTD_findFrameCompressedSize() : - * compatible with legacy mode - * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame - * `srcSize` must be at least as large as the frame contained - * @return : the compressed size of the frame starting at `src` */ -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) -{ - if (srcSize >= ZSTD_skippableHeaderSize && (ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { - return ZSTD_skippableHeaderSize + ZSTD_readLE32((const BYTE *)src + 4); - } else { - const BYTE *ip = (const BYTE *)src; - const BYTE *const ipstart = ip; - size_t remainingSize = srcSize; - ZSTD_frameParams fParams; - - size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize); - if (ZSTD_isError(headerSize)) - return headerSize; - - /* Frame Header */ - { - size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize); - if (ZSTD_isError(ret)) - return ret; - if (ret > 0) - return ERROR(srcSize_wrong); - } - - ip += headerSize; - remainingSize -= headerSize; - - /* Loop on each block */ - while (1) { - blockProperties_t blockProperties; - size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); - if (ZSTD_isError(cBlockSize)) - return cBlockSize; - - if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) - return ERROR(srcSize_wrong); - - ip += ZSTD_blockHeaderSize + cBlockSize; - remainingSize -= ZSTD_blockHeaderSize + cBlockSize; - - if (blockProperties.lastBlock) - break; - } - - if (fParams.checksumFlag) { /* Frame content checksum */ - if (remainingSize < 4) - return ERROR(srcSize_wrong); - ip += 4; - remainingSize -= 4; - } - - return ip - ipstart; - } -} - -/*! ZSTD_decompressFrame() : -* @dctx must be properly initialized */ -static size_t ZSTD_decompressFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr) -{ - const BYTE *ip = (const BYTE *)(*srcPtr); - BYTE *const ostart = (BYTE * const)dst; - BYTE *const oend = ostart + dstCapacity; - BYTE *op = ostart; - size_t remainingSize = *srcSizePtr; - - /* check */ - if (remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize) - return ERROR(srcSize_wrong); - - /* Frame Header */ - { - size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); - if (ZSTD_isError(frameHeaderSize)) - return frameHeaderSize; - if (remainingSize < frameHeaderSize + ZSTD_blockHeaderSize) - return ERROR(srcSize_wrong); - CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize)); - ip += frameHeaderSize; - remainingSize -= frameHeaderSize; - } - - /* Loop on each block */ - while (1) { - size_t decodedSize; - blockProperties_t blockProperties; - size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); - if (ZSTD_isError(cBlockSize)) - return cBlockSize; - - ip += ZSTD_blockHeaderSize; - remainingSize -= ZSTD_blockHeaderSize; - if (cBlockSize > remainingSize) - return ERROR(srcSize_wrong); - - switch (blockProperties.blockType) { - case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend - op, ip, cBlockSize); break; - case bt_raw: decodedSize = ZSTD_copyRawBlock(op, oend - op, ip, cBlockSize); break; - case bt_rle: decodedSize = ZSTD_generateNxBytes(op, oend - op, *ip, blockProperties.origSize); break; - case bt_reserved: - default: return ERROR(corruption_detected); - } - - if (ZSTD_isError(decodedSize)) - return decodedSize; - if (dctx->fParams.checksumFlag) - xxh64_update(&dctx->xxhState, op, decodedSize); - op += decodedSize; - ip += cBlockSize; - remainingSize -= cBlockSize; - if (blockProperties.lastBlock) - break; - } - - if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ - U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState); - U32 checkRead; - if (remainingSize < 4) - return ERROR(checksum_wrong); - checkRead = ZSTD_readLE32(ip); - if (checkRead != checkCalc) - return ERROR(checksum_wrong); - ip += 4; - remainingSize -= 4; - } - - /* Allow caller to get size read */ - *srcPtr = ip; - *srcSizePtr = remainingSize; - return op - ostart; -} - -static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict); -static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict); - -static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, - const ZSTD_DDict *ddict) -{ - void *const dststart = dst; - - if (ddict) { - if (dict) { - /* programmer error, these two cases should be mutually exclusive */ - return ERROR(GENERIC); - } - - dict = ZSTD_DDictDictContent(ddict); - dictSize = ZSTD_DDictDictSize(ddict); - } - - while (srcSize >= ZSTD_frameHeaderSize_prefix) { - U32 magicNumber; - - magicNumber = ZSTD_readLE32(src); - if (magicNumber != ZSTD_MAGICNUMBER) { - if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { - size_t skippableSize; - if (srcSize < ZSTD_skippableHeaderSize) - return ERROR(srcSize_wrong); - skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize; - if (srcSize < skippableSize) { - return ERROR(srcSize_wrong); - } - - src = (const BYTE *)src + skippableSize; - srcSize -= skippableSize; - continue; - } else { - return ERROR(prefix_unknown); - } - } - - if (ddict) { - /* we were called from ZSTD_decompress_usingDDict */ - ZSTD_refDDict(dctx, ddict); - } else { - /* this will initialize correctly with no dict if dict == NULL, so - * use this in all cases but ddict */ - CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize)); - } - ZSTD_checkContinuity(dctx, dst); - - { - const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); - if (ZSTD_isError(res)) - return res; - /* don't need to bounds check this, ZSTD_decompressFrame will have - * already */ - dst = (BYTE *)dst + res; - dstCapacity -= res; - } - } - - if (srcSize) - return ERROR(srcSize_wrong); /* input not entirely consumed */ - - return (BYTE *)dst - (BYTE *)dststart; -} - -size_t ZSTD_decompress_usingDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize) -{ - return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); -} - -size_t ZSTD_decompressDCtx(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0); -} - -/*-************************************** -* Advanced Streaming Decompression API -* Bufferless and synchronous -****************************************/ -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx) { return dctx->expected; } - -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx) -{ - switch (dctx->stage) { - default: /* should not happen */ - case ZSTDds_getFrameHeaderSize: - case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader; - case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader; - case ZSTDds_decompressBlock: return ZSTDnit_block; - case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock; - case ZSTDds_checkChecksum: return ZSTDnit_checksum; - case ZSTDds_decodeSkippableHeader: - case ZSTDds_skipFrame: return ZSTDnit_skippableFrame; - } -} - -int ZSTD_isSkipFrame(ZSTD_DCtx *dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */ - -/** ZSTD_decompressContinue() : -* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) -* or an error code, which can be tested using ZSTD_isError() */ -size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - /* Sanity check */ - if (srcSize != dctx->expected) - return ERROR(srcSize_wrong); - if (dstCapacity) - ZSTD_checkContinuity(dctx, dst); - - switch (dctx->stage) { - case ZSTDds_getFrameHeaderSize: - if (srcSize != ZSTD_frameHeaderSize_prefix) - return ERROR(srcSize_wrong); /* impossible */ - if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ - memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); - dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */ - dctx->stage = ZSTDds_decodeSkippableHeader; - return 0; - } - dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix); - if (ZSTD_isError(dctx->headerSize)) - return dctx->headerSize; - memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); - if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) { - dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix; - dctx->stage = ZSTDds_decodeFrameHeader; - return 0; - } - dctx->expected = 0; /* not necessary to copy more */ - - case ZSTDds_decodeFrameHeader: - memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); - CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize)); - dctx->expected = ZSTD_blockHeaderSize; - dctx->stage = ZSTDds_decodeBlockHeader; - return 0; - - case ZSTDds_decodeBlockHeader: { - blockProperties_t bp; - size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); - if (ZSTD_isError(cBlockSize)) - return cBlockSize; - dctx->expected = cBlockSize; - dctx->bType = bp.blockType; - dctx->rleSize = bp.origSize; - if (cBlockSize) { - dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; - return 0; - } - /* empty block */ - if (bp.lastBlock) { - if (dctx->fParams.checksumFlag) { - dctx->expected = 4; - dctx->stage = ZSTDds_checkChecksum; - } else { - dctx->expected = 0; /* end of frame */ - dctx->stage = ZSTDds_getFrameHeaderSize; - } - } else { - dctx->expected = 3; /* go directly to next header */ - dctx->stage = ZSTDds_decodeBlockHeader; - } - return 0; - } - case ZSTDds_decompressLastBlock: - case ZSTDds_decompressBlock: { - size_t rSize; - switch (dctx->bType) { - case bt_compressed: rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break; - case bt_raw: rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break; - case bt_rle: rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break; - case bt_reserved: /* should never happen */ - default: return ERROR(corruption_detected); - } - if (ZSTD_isError(rSize)) - return rSize; - if (dctx->fParams.checksumFlag) - xxh64_update(&dctx->xxhState, dst, rSize); - - if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ - if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ - dctx->expected = 4; - dctx->stage = ZSTDds_checkChecksum; - } else { - dctx->expected = 0; /* ends here */ - dctx->stage = ZSTDds_getFrameHeaderSize; - } - } else { - dctx->stage = ZSTDds_decodeBlockHeader; - dctx->expected = ZSTD_blockHeaderSize; - dctx->previousDstEnd = (char *)dst + rSize; - } - return rSize; - } - case ZSTDds_checkChecksum: { - U32 const h32 = (U32)xxh64_digest(&dctx->xxhState); - U32 const check32 = ZSTD_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */ - if (check32 != h32) - return ERROR(checksum_wrong); - dctx->expected = 0; - dctx->stage = ZSTDds_getFrameHeaderSize; - return 0; - } - case ZSTDds_decodeSkippableHeader: { - memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); - dctx->expected = ZSTD_readLE32(dctx->headerBuffer + 4); - dctx->stage = ZSTDds_skipFrame; - return 0; - } - case ZSTDds_skipFrame: { - dctx->expected = 0; - dctx->stage = ZSTDds_getFrameHeaderSize; - return 0; - } - default: - return ERROR(GENERIC); /* impossible */ - } -} - -static size_t ZSTD_refDictContent(ZSTD_DCtx *dctx, const void *dict, size_t dictSize) -{ - dctx->dictEnd = dctx->previousDstEnd; - dctx->vBase = (const char *)dict - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base)); - dctx->base = dict; - dctx->previousDstEnd = (const char *)dict + dictSize; - return 0; -} - -/* ZSTD_loadEntropy() : - * dict : must point at beginning of a valid zstd dictionary - * @return : size of entropy tables read */ -static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t *entropy, const void *const dict, size_t const dictSize) -{ - const BYTE *dictPtr = (const BYTE *)dict; - const BYTE *const dictEnd = dictPtr + dictSize; - - if (dictSize <= 8) - return ERROR(dictionary_corrupted); - dictPtr += 8; /* skip header = magic + dictID */ - - { - size_t const hSize = HUF_readDTableX4_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace)); - if (HUF_isError(hSize)) - return ERROR(dictionary_corrupted); - dictPtr += hSize; - } - - { - short offcodeNCount[MaxOff + 1]; - U32 offcodeMaxValue = MaxOff, offcodeLog; - size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr); - if (FSE_isError(offcodeHeaderSize)) - return ERROR(dictionary_corrupted); - if (offcodeLog > OffFSELog) - return ERROR(dictionary_corrupted); - CHECK_E(FSE_buildDTable_wksp(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted); - dictPtr += offcodeHeaderSize; - } - - { - short matchlengthNCount[MaxML + 1]; - unsigned matchlengthMaxValue = MaxML, matchlengthLog; - size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr); - if (FSE_isError(matchlengthHeaderSize)) - return ERROR(dictionary_corrupted); - if (matchlengthLog > MLFSELog) - return ERROR(dictionary_corrupted); - CHECK_E(FSE_buildDTable_wksp(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted); - dictPtr += matchlengthHeaderSize; - } - - { - short litlengthNCount[MaxLL + 1]; - unsigned litlengthMaxValue = MaxLL, litlengthLog; - size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr); - if (FSE_isError(litlengthHeaderSize)) - return ERROR(dictionary_corrupted); - if (litlengthLog > LLFSELog) - return ERROR(dictionary_corrupted); - CHECK_E(FSE_buildDTable_wksp(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted); - dictPtr += litlengthHeaderSize; - } - - if (dictPtr + 12 > dictEnd) - return ERROR(dictionary_corrupted); - { - int i; - size_t const dictContentSize = (size_t)(dictEnd - (dictPtr + 12)); - for (i = 0; i < 3; i++) { - U32 const rep = ZSTD_readLE32(dictPtr); - dictPtr += 4; - if (rep == 0 || rep >= dictContentSize) - return ERROR(dictionary_corrupted); - entropy->rep[i] = rep; - } - } - - return dictPtr - (const BYTE *)dict; -} - -static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx *dctx, const void *dict, size_t dictSize) -{ - if (dictSize < 8) - return ZSTD_refDictContent(dctx, dict, dictSize); - { - U32 const magic = ZSTD_readLE32(dict); - if (magic != ZSTD_DICT_MAGIC) { - return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ - } - } - dctx->dictID = ZSTD_readLE32((const char *)dict + 4); - - /* load entropy tables */ - { - size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); - if (ZSTD_isError(eSize)) - return ERROR(dictionary_corrupted); - dict = (const char *)dict + eSize; - dictSize -= eSize; - } - dctx->litEntropy = dctx->fseEntropy = 1; - - /* reference dictionary content */ - return ZSTD_refDictContent(dctx, dict, dictSize); -} - -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, size_t dictSize) -{ - CHECK_F(ZSTD_decompressBegin(dctx)); - if (dict && dictSize) - CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted); - return 0; -} - -/* ====== ZSTD_DDict ====== */ - -struct ZSTD_DDict_s { - void *dictBuffer; - const void *dictContent; - size_t dictSize; - ZSTD_entropyTables_t entropy; - U32 dictID; - U32 entropyPresent; - ZSTD_customMem cMem; -}; /* typedef'd to ZSTD_DDict within "zstd.h" */ - -size_t ZSTD_DDictWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); } - -static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict) { return ddict->dictContent; } - -static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict) { return ddict->dictSize; } - -static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict) -{ - ZSTD_decompressBegin(dstDCtx); /* init */ - if (ddict) { /* support refDDict on NULL */ - dstDCtx->dictID = ddict->dictID; - dstDCtx->base = ddict->dictContent; - dstDCtx->vBase = ddict->dictContent; - dstDCtx->dictEnd = (const BYTE *)ddict->dictContent + ddict->dictSize; - dstDCtx->previousDstEnd = dstDCtx->dictEnd; - if (ddict->entropyPresent) { - dstDCtx->litEntropy = 1; - dstDCtx->fseEntropy = 1; - dstDCtx->LLTptr = ddict->entropy.LLTable; - dstDCtx->MLTptr = ddict->entropy.MLTable; - dstDCtx->OFTptr = ddict->entropy.OFTable; - dstDCtx->HUFptr = ddict->entropy.hufTable; - dstDCtx->entropy.rep[0] = ddict->entropy.rep[0]; - dstDCtx->entropy.rep[1] = ddict->entropy.rep[1]; - dstDCtx->entropy.rep[2] = ddict->entropy.rep[2]; - } else { - dstDCtx->litEntropy = 0; - dstDCtx->fseEntropy = 0; - } - } -} - -static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict *ddict) -{ - ddict->dictID = 0; - ddict->entropyPresent = 0; - if (ddict->dictSize < 8) - return 0; - { - U32 const magic = ZSTD_readLE32(ddict->dictContent); - if (magic != ZSTD_DICT_MAGIC) - return 0; /* pure content mode */ - } - ddict->dictID = ZSTD_readLE32((const char *)ddict->dictContent + 4); - - /* load entropy tables */ - CHECK_E(ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted); - ddict->entropyPresent = 1; - return 0; -} - -static ZSTD_DDict *ZSTD_createDDict_advanced(const void *dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem) -{ - if (!customMem.customAlloc || !customMem.customFree) - return NULL; - - { - ZSTD_DDict *const ddict = (ZSTD_DDict *)ZSTD_malloc(sizeof(ZSTD_DDict), customMem); - if (!ddict) - return NULL; - ddict->cMem = customMem; - - if ((byReference) || (!dict) || (!dictSize)) { - ddict->dictBuffer = NULL; - ddict->dictContent = dict; - } else { - void *const internalBuffer = ZSTD_malloc(dictSize, customMem); - if (!internalBuffer) { - ZSTD_freeDDict(ddict); - return NULL; - } - memcpy(internalBuffer, dict, dictSize); - ddict->dictBuffer = internalBuffer; - ddict->dictContent = internalBuffer; - } - ddict->dictSize = dictSize; - ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ - /* parse dictionary content */ - { - size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict); - if (ZSTD_isError(errorCode)) { - ZSTD_freeDDict(ddict); - return NULL; - } - } - - return ddict; - } -} - -/*! ZSTD_initDDict() : -* Create a digested dictionary, to start decompression without startup delay. -* `dict` content is copied inside DDict. -* Consequently, `dict` can be released after `ZSTD_DDict` creation */ -ZSTD_DDict *ZSTD_initDDict(const void *dict, size_t dictSize, void *workspace, size_t workspaceSize) -{ - ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); - return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem); -} - -size_t ZSTD_freeDDict(ZSTD_DDict *ddict) -{ - if (ddict == NULL) - return 0; /* support free on NULL */ - { - ZSTD_customMem const cMem = ddict->cMem; - ZSTD_free(ddict->dictBuffer, cMem); - ZSTD_free(ddict, cMem); - return 0; - } -} - -/*! ZSTD_getDictID_fromDict() : - * Provides the dictID stored within dictionary. - * if @return == 0, the dictionary is not conformant with Zstandard specification. - * It can still be loaded, but as a content-only dictionary. */ -unsigned ZSTD_getDictID_fromDict(const void *dict, size_t dictSize) -{ - if (dictSize < 8) - return 0; - if (ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) - return 0; - return ZSTD_readLE32((const char *)dict + 4); -} - -/*! ZSTD_getDictID_fromDDict() : - * Provides the dictID of the dictionary loaded into `ddict`. - * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. - * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ -unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict) -{ - if (ddict == NULL) - return 0; - return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); -} - -/*! ZSTD_getDictID_fromFrame() : - * Provides the dictID required to decompressed the frame stored within `src`. - * If @return == 0, the dictID could not be decoded. - * This could for one of the following reasons : - * - The frame does not require a dictionary to be decoded (most common case). - * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. - * Note : this use case also happens when using a non-conformant dictionary. - * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). - * - This is not a Zstandard frame. - * When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */ -unsigned ZSTD_getDictID_fromFrame(const void *src, size_t srcSize) -{ - ZSTD_frameParams zfp = {0, 0, 0, 0}; - size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize); - if (ZSTD_isError(hError)) - return 0; - return zfp.dictID; -} - -/*! ZSTD_decompress_usingDDict() : -* Decompression using a pre-digested Dictionary -* Use dictionary without significant overhead. */ -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict) -{ - /* pass content and size in case legacy frames are encountered */ - return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict); -} - -/*===================================== -* Streaming decompression -*====================================*/ - -typedef enum { zdss_init, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; - -/* *** Resource management *** */ -struct ZSTD_DStream_s { - ZSTD_DCtx *dctx; - ZSTD_DDict *ddictLocal; - const ZSTD_DDict *ddict; - ZSTD_frameParams fParams; - ZSTD_dStreamStage stage; - char *inBuff; - size_t inBuffSize; - size_t inPos; - size_t maxWindowSize; - char *outBuff; - size_t outBuffSize; - size_t outStart; - size_t outEnd; - size_t blockSize; - BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */ - size_t lhSize; - ZSTD_customMem customMem; - void *legacyContext; - U32 previousLegacyVersion; - U32 legacyVersion; - U32 hostageByte; -}; /* typedef'd to ZSTD_DStream within "zstd.h" */ - -size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize) -{ - size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); - size_t const inBuffSize = blockSize; - size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; - return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize); -} - -static ZSTD_DStream *ZSTD_createDStream_advanced(ZSTD_customMem customMem) -{ - ZSTD_DStream *zds; - - if (!customMem.customAlloc || !customMem.customFree) - return NULL; - - zds = (ZSTD_DStream *)ZSTD_malloc(sizeof(ZSTD_DStream), customMem); - if (zds == NULL) - return NULL; - memset(zds, 0, sizeof(ZSTD_DStream)); - memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem)); - zds->dctx = ZSTD_createDCtx_advanced(customMem); - if (zds->dctx == NULL) { - ZSTD_freeDStream(zds); - return NULL; - } - zds->stage = zdss_init; - zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; - return zds; -} - -ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, size_t workspaceSize) -{ - ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); - ZSTD_DStream *zds = ZSTD_createDStream_advanced(stackMem); - if (!zds) { - return NULL; - } - - zds->maxWindowSize = maxWindowSize; - zds->stage = zdss_loadHeader; - zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; - ZSTD_freeDDict(zds->ddictLocal); - zds->ddictLocal = NULL; - zds->ddict = zds->ddictLocal; - zds->legacyVersion = 0; - zds->hostageByte = 0; - - { - size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); - size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; - - zds->inBuff = (char *)ZSTD_malloc(blockSize, zds->customMem); - zds->inBuffSize = blockSize; - zds->outBuff = (char *)ZSTD_malloc(neededOutSize, zds->customMem); - zds->outBuffSize = neededOutSize; - if (zds->inBuff == NULL || zds->outBuff == NULL) { - ZSTD_freeDStream(zds); - return NULL; - } - } - return zds; -} - -ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize) -{ - ZSTD_DStream *zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize); - if (zds) { - zds->ddict = ddict; - } - return zds; -} - -size_t ZSTD_freeDStream(ZSTD_DStream *zds) -{ - if (zds == NULL) - return 0; /* support free on null */ - { - ZSTD_customMem const cMem = zds->customMem; - ZSTD_freeDCtx(zds->dctx); - zds->dctx = NULL; - ZSTD_freeDDict(zds->ddictLocal); - zds->ddictLocal = NULL; - ZSTD_free(zds->inBuff, cMem); - zds->inBuff = NULL; - ZSTD_free(zds->outBuff, cMem); - zds->outBuff = NULL; - ZSTD_free(zds, cMem); - return 0; - } -} - -/* *** Initialization *** */ - -size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; } -size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } - -size_t ZSTD_resetDStream(ZSTD_DStream *zds) -{ - zds->stage = zdss_loadHeader; - zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; - zds->legacyVersion = 0; - zds->hostageByte = 0; - return ZSTD_frameHeaderSize_prefix; -} - -/* ***** Decompression ***** */ - -ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize) -{ - size_t const length = MIN(dstCapacity, srcSize); - memcpy(dst, src, length); - return length; -} - -size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input) -{ - const char *const istart = (const char *)(input->src) + input->pos; - const char *const iend = (const char *)(input->src) + input->size; - const char *ip = istart; - char *const ostart = (char *)(output->dst) + output->pos; - char *const oend = (char *)(output->dst) + output->size; - char *op = ostart; - U32 someMoreWork = 1; - - while (someMoreWork) { - switch (zds->stage) { - case zdss_init: - ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */ - /* fall-through */ - - case zdss_loadHeader: { - size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); - if (ZSTD_isError(hSize)) - return hSize; - if (hSize != 0) { /* need more input */ - size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ - if (toLoad > (size_t)(iend - ip)) { /* not enough input to load full header */ - memcpy(zds->headerBuffer + zds->lhSize, ip, iend - ip); - zds->lhSize += iend - ip; - input->pos = input->size; - return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + - ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ - } - memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); - zds->lhSize = hSize; - ip += toLoad; - break; - } - - /* check for single-pass mode opportunity */ - if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */ - && (U64)(size_t)(oend - op) >= zds->fParams.frameContentSize) { - size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend - istart); - if (cSize <= (size_t)(iend - istart)) { - size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend - op, istart, cSize, zds->ddict); - if (ZSTD_isError(decompressedSize)) - return decompressedSize; - ip = istart + cSize; - op += decompressedSize; - zds->dctx->expected = 0; - zds->stage = zdss_init; - someMoreWork = 0; - break; - } - } - - /* Consume header */ - ZSTD_refDDict(zds->dctx, zds->ddict); - { - size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */ - CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size)); - { - size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); - CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer + h1Size, h2Size)); - } - } - - zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); - if (zds->fParams.windowSize > zds->maxWindowSize) - return ERROR(frameParameter_windowTooLarge); - - /* Buffers are preallocated, but double check */ - { - size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); - size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; - if (zds->inBuffSize < blockSize) { - return ERROR(GENERIC); - } - if (zds->outBuffSize < neededOutSize) { - return ERROR(GENERIC); - } - zds->blockSize = blockSize; - } - zds->stage = zdss_read; - } - /* pass-through */ - - case zdss_read: { - size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); - if (neededInSize == 0) { /* end of frame */ - zds->stage = zdss_init; - someMoreWork = 0; - break; - } - if ((size_t)(iend - ip) >= neededInSize) { /* decode directly from src */ - const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); - size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, - (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize); - if (ZSTD_isError(decodedSize)) - return decodedSize; - ip += neededInSize; - if (!decodedSize && !isSkipFrame) - break; /* this was just a header */ - zds->outEnd = zds->outStart + decodedSize; - zds->stage = zdss_flush; - break; - } - if (ip == iend) { - someMoreWork = 0; - break; - } /* no more input */ - zds->stage = zdss_load; - /* pass-through */ - } - - case zdss_load: { - size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); - size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */ - size_t loadedSize; - if (toLoad > zds->inBuffSize - zds->inPos) - return ERROR(corruption_detected); /* should never happen */ - loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend - ip); - ip += loadedSize; - zds->inPos += loadedSize; - if (loadedSize < toLoad) { - someMoreWork = 0; - break; - } /* not enough input, wait for more */ - - /* decode loaded input */ - { - const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); - size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart, - zds->inBuff, neededInSize); - if (ZSTD_isError(decodedSize)) - return decodedSize; - zds->inPos = 0; /* input is consumed */ - if (!decodedSize && !isSkipFrame) { - zds->stage = zdss_read; - break; - } /* this was just a header */ - zds->outEnd = zds->outStart + decodedSize; - zds->stage = zdss_flush; - /* pass-through */ - } - } - - case zdss_flush: { - size_t const toFlushSize = zds->outEnd - zds->outStart; - size_t const flushedSize = ZSTD_limitCopy(op, oend - op, zds->outBuff + zds->outStart, toFlushSize); - op += flushedSize; - zds->outStart += flushedSize; - if (flushedSize == toFlushSize) { /* flush completed */ - zds->stage = zdss_read; - if (zds->outStart + zds->blockSize > zds->outBuffSize) - zds->outStart = zds->outEnd = 0; - break; - } - /* cannot complete flush */ - someMoreWork = 0; - break; - } - default: - return ERROR(GENERIC); /* impossible */ - } - } - - /* result */ - input->pos += (size_t)(ip - istart); - output->pos += (size_t)(op - ostart); - { - size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx); - if (!nextSrcSizeHint) { /* frame fully decoded */ - if (zds->outEnd == zds->outStart) { /* output fully flushed */ - if (zds->hostageByte) { - if (input->pos >= input->size) { - zds->stage = zdss_read; - return 1; - } /* can't release hostage (not present) */ - input->pos++; /* release hostage */ - } - return 0; - } - if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ - input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ - zds->hostageByte = 1; - } - return 1; - } - nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */ - if (zds->inPos > nextSrcSizeHint) - return ERROR(GENERIC); /* should never happen */ - nextSrcSizeHint -= zds->inPos; /* already loaded*/ - return nextSrcSizeHint; - } -} - -EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound); -EXPORT_SYMBOL(ZSTD_initDCtx); -EXPORT_SYMBOL(ZSTD_decompressDCtx); -EXPORT_SYMBOL(ZSTD_decompress_usingDict); - -EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound); -EXPORT_SYMBOL(ZSTD_initDDict); -EXPORT_SYMBOL(ZSTD_decompress_usingDDict); - -EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound); -EXPORT_SYMBOL(ZSTD_initDStream); -EXPORT_SYMBOL(ZSTD_initDStream_usingDDict); -EXPORT_SYMBOL(ZSTD_resetDStream); -EXPORT_SYMBOL(ZSTD_decompressStream); -EXPORT_SYMBOL(ZSTD_DStreamInSize); -EXPORT_SYMBOL(ZSTD_DStreamOutSize); - -EXPORT_SYMBOL(ZSTD_findFrameCompressedSize); -EXPORT_SYMBOL(ZSTD_getFrameContentSize); -EXPORT_SYMBOL(ZSTD_findDecompressedSize); - -EXPORT_SYMBOL(ZSTD_isFrame); -EXPORT_SYMBOL(ZSTD_getDictID_fromDict); -EXPORT_SYMBOL(ZSTD_getDictID_fromDDict); -EXPORT_SYMBOL(ZSTD_getDictID_fromFrame); - -EXPORT_SYMBOL(ZSTD_getFrameParams); -EXPORT_SYMBOL(ZSTD_decompressBegin); -EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict); -EXPORT_SYMBOL(ZSTD_copyDCtx); -EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress); -EXPORT_SYMBOL(ZSTD_decompressContinue); -EXPORT_SYMBOL(ZSTD_nextInputType); - -EXPORT_SYMBOL(ZSTD_decompressBlock); -EXPORT_SYMBOL(ZSTD_insertBlock); - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("Zstd Decompressor"); diff --git a/contrib/linux-kernel/lib/zstd/entropy_common.c b/contrib/linux-kernel/lib/zstd/entropy_common.c deleted file mode 100644 index 2b0a643c32c..00000000000 --- a/contrib/linux-kernel/lib/zstd/entropy_common.c +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Common functions of New Generation Entropy library - * Copyright (C) 2016, Yann Collet. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - * - * You can contact the author at : - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - */ - -/* ************************************* -* Dependencies -***************************************/ -#include "error_private.h" /* ERR_*, ERROR */ -#include "fse.h" -#include "huf.h" -#include "mem.h" - -/*=== Version ===*/ -unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } - -/*=== Error Management ===*/ -unsigned FSE_isError(size_t code) { return ERR_isError(code); } - -unsigned HUF_isError(size_t code) { return ERR_isError(code); } - -/*-************************************************************** -* FSE NCount encoding-decoding -****************************************************************/ -size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize) -{ - const BYTE *const istart = (const BYTE *)headerBuffer; - const BYTE *const iend = istart + hbSize; - const BYTE *ip = istart; - int nbBits; - int remaining; - int threshold; - U32 bitStream; - int bitCount; - unsigned charnum = 0; - int previous0 = 0; - - if (hbSize < 4) - return ERROR(srcSize_wrong); - bitStream = ZSTD_readLE32(ip); - nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ - if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) - return ERROR(tableLog_tooLarge); - bitStream >>= 4; - bitCount = 4; - *tableLogPtr = nbBits; - remaining = (1 << nbBits) + 1; - threshold = 1 << nbBits; - nbBits++; - - while ((remaining > 1) & (charnum <= *maxSVPtr)) { - if (previous0) { - unsigned n0 = charnum; - while ((bitStream & 0xFFFF) == 0xFFFF) { - n0 += 24; - if (ip < iend - 5) { - ip += 2; - bitStream = ZSTD_readLE32(ip) >> bitCount; - } else { - bitStream >>= 16; - bitCount += 16; - } - } - while ((bitStream & 3) == 3) { - n0 += 3; - bitStream >>= 2; - bitCount += 2; - } - n0 += bitStream & 3; - bitCount += 2; - if (n0 > *maxSVPtr) - return ERROR(maxSymbolValue_tooSmall); - while (charnum < n0) - normalizedCounter[charnum++] = 0; - if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) { - ip += bitCount >> 3; - bitCount &= 7; - bitStream = ZSTD_readLE32(ip) >> bitCount; - } else { - bitStream >>= 2; - } - } - { - int const max = (2 * threshold - 1) - remaining; - int count; - - if ((bitStream & (threshold - 1)) < (U32)max) { - count = bitStream & (threshold - 1); - bitCount += nbBits - 1; - } else { - count = bitStream & (2 * threshold - 1); - if (count >= threshold) - count -= max; - bitCount += nbBits; - } - - count--; /* extra accuracy */ - remaining -= count < 0 ? -count : count; /* -1 means +1 */ - normalizedCounter[charnum++] = (short)count; - previous0 = !count; - while (remaining < threshold) { - nbBits--; - threshold >>= 1; - } - - if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) { - ip += bitCount >> 3; - bitCount &= 7; - } else { - bitCount -= (int)(8 * (iend - 4 - ip)); - ip = iend - 4; - } - bitStream = ZSTD_readLE32(ip) >> (bitCount & 31); - } - } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */ - if (remaining != 1) - return ERROR(corruption_detected); - if (bitCount > 32) - return ERROR(corruption_detected); - *maxSVPtr = charnum - 1; - - ip += (bitCount + 7) >> 3; - return ip - istart; -} - -/*! HUF_readStats() : - Read compact Huffman tree, saved by HUF_writeCTable(). - `huffWeight` is destination buffer. - `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. - @return : size read from `src` , or an error Code . - Note : Needed by HUF_readCTable() and HUF_readDTableX?() . -*/ -size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) -{ - U32 weightTotal; - const BYTE *ip = (const BYTE *)src; - size_t iSize; - size_t oSize; - - if (!srcSize) - return ERROR(srcSize_wrong); - iSize = ip[0]; - /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */ - - if (iSize >= 128) { /* special header */ - oSize = iSize - 127; - iSize = ((oSize + 1) / 2); - if (iSize + 1 > srcSize) - return ERROR(srcSize_wrong); - if (oSize >= hwSize) - return ERROR(corruption_detected); - ip += 1; - { - U32 n; - for (n = 0; n < oSize; n += 2) { - huffWeight[n] = ip[n / 2] >> 4; - huffWeight[n + 1] = ip[n / 2] & 15; - } - } - } else { /* header compressed with FSE (normal case) */ - if (iSize + 1 > srcSize) - return ERROR(srcSize_wrong); - oSize = FSE_decompress_wksp(huffWeight, hwSize - 1, ip + 1, iSize, 6, workspace, workspaceSize); /* max (hwSize-1) values decoded, as last one is implied */ - if (FSE_isError(oSize)) - return oSize; - } - - /* collect weight stats */ - memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); - weightTotal = 0; - { - U32 n; - for (n = 0; n < oSize; n++) { - if (huffWeight[n] >= HUF_TABLELOG_MAX) - return ERROR(corruption_detected); - rankStats[huffWeight[n]]++; - weightTotal += (1 << huffWeight[n]) >> 1; - } - } - if (weightTotal == 0) - return ERROR(corruption_detected); - - /* get last non-null symbol weight (implied, total must be 2^n) */ - { - U32 const tableLog = BIT_highbit32(weightTotal) + 1; - if (tableLog > HUF_TABLELOG_MAX) - return ERROR(corruption_detected); - *tableLogPtr = tableLog; - /* determine last weight */ - { - U32 const total = 1 << tableLog; - U32 const rest = total - weightTotal; - U32 const verif = 1 << BIT_highbit32(rest); - U32 const lastWeight = BIT_highbit32(rest) + 1; - if (verif != rest) - return ERROR(corruption_detected); /* last value must be a clean power of 2 */ - huffWeight[oSize] = (BYTE)lastWeight; - rankStats[lastWeight]++; - } - } - - /* check tree construction validity */ - if ((rankStats[1] < 2) || (rankStats[1] & 1)) - return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ - - /* results */ - *nbSymbolsPtr = (U32)(oSize + 1); - return iSize + 1; -} diff --git a/contrib/linux-kernel/lib/zstd/error_private.h b/contrib/linux-kernel/lib/zstd/error_private.h deleted file mode 100644 index 2062ff05acd..00000000000 --- a/contrib/linux-kernel/lib/zstd/error_private.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of https://github.com/facebook/zstd. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - */ - -/* Note : this module is expected to remain private, do not expose it */ - -#ifndef ERROR_H_MODULE -#define ERROR_H_MODULE - -/* **************************************** -* Dependencies -******************************************/ -#include /* size_t */ -#include /* enum list */ - -/* **************************************** -* Compiler-specific -******************************************/ -#define ERR_STATIC static __attribute__((unused)) - -/*-**************************************** -* Customization (error_public.h) -******************************************/ -typedef ZSTD_ErrorCode ERR_enum; -#define PREFIX(name) ZSTD_error_##name - -/*-**************************************** -* Error codes handling -******************************************/ -#define ERROR(name) ((size_t)-PREFIX(name)) - -ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } - -ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) -{ - if (!ERR_isError(code)) - return (ERR_enum)0; - return (ERR_enum)(0 - code); -} - -#endif /* ERROR_H_MODULE */ diff --git a/contrib/linux-kernel/lib/zstd/fse.h b/contrib/linux-kernel/lib/zstd/fse.h deleted file mode 100644 index 7460ab04b19..00000000000 --- a/contrib/linux-kernel/lib/zstd/fse.h +++ /dev/null @@ -1,575 +0,0 @@ -/* - * FSE : Finite State Entropy codec - * Public Prototypes declaration - * Copyright (C) 2013-2016, Yann Collet. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - * - * You can contact the author at : - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - */ -#ifndef FSE_H -#define FSE_H - -/*-***************************************** -* Dependencies -******************************************/ -#include /* size_t, ptrdiff_t */ - -/*-***************************************** -* FSE_PUBLIC_API : control library symbols visibility -******************************************/ -#define FSE_PUBLIC_API - -/*------ Version ------*/ -#define FSE_VERSION_MAJOR 0 -#define FSE_VERSION_MINOR 9 -#define FSE_VERSION_RELEASE 0 - -#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE -#define FSE_QUOTE(str) #str -#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) -#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) - -#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR * 100 * 100 + FSE_VERSION_MINOR * 100 + FSE_VERSION_RELEASE) -FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ - -/*-***************************************** -* Tool functions -******************************************/ -FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ - -/* Error Management */ -FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ - -/*-***************************************** -* FSE detailed API -******************************************/ -/*! -FSE_compress() does the following: -1. count symbol occurrence from source[] into table count[] -2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) -3. save normalized counters to memory buffer using writeNCount() -4. build encoding table 'CTable' from normalized counters -5. encode the data stream using encoding table 'CTable' - -FSE_decompress() does the following: -1. read normalized counters with readNCount() -2. build decoding table 'DTable' from normalized counters -3. decode the data stream using decoding table 'DTable' - -The following API allows targeting specific sub-functions for advanced tasks. -For example, it's possible to compress several blocks using the same 'CTable', -or to save and provide normalized distribution using external method. -*/ - -/* *** COMPRESSION *** */ -/*! FSE_optimalTableLog(): - dynamically downsize 'tableLog' when conditions are met. - It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. - @return : recommended tableLog (necessarily <= 'maxTableLog') */ -FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); - -/*! FSE_normalizeCount(): - normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) - 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). - @return : tableLog, - or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue); - -/*! FSE_NCountWriteBound(): - Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. - Typically useful for allocation purpose. */ -FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); - -/*! FSE_writeNCount(): - Compactly save 'normalizedCounter' into 'buffer'. - @return : size of the compressed table, - or an errorCode, which can be tested using FSE_isError(). */ -FSE_PUBLIC_API size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); - -/*! Constructor and Destructor of FSE_CTable. - Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ -typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ - -/*! FSE_compress_usingCTable(): - Compress `src` using `ct` into `dst` which must be already allocated. - @return : size of compressed data (<= `dstCapacity`), - or 0 if compressed data could not fit into `dst`, - or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct); - -/*! -Tutorial : ----------- -The first step is to count all symbols. FSE_count() does this job very fast. -Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. -'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] -maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) -FSE_count() will return the number of occurrence of the most frequent symbol. -This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). - -The next step is to normalize the frequencies. -FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. -It also guarantees a minimum of 1 to any Symbol with frequency >= 1. -You can use 'tableLog'==0 to mean "use default tableLog value". -If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), -which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). - -The result of FSE_normalizeCount() will be saved into a table, -called 'normalizedCounter', which is a table of signed short. -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. -The return value is tableLog if everything proceeded as expected. -It is 0 if there is a single symbol within distribution. -If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). - -'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). -'buffer' must be already allocated. -For guaranteed success, buffer size must be at least FSE_headerBound(). -The result of the function is the number of bytes written into 'buffer'. -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). - -'normalizedCounter' can then be used to create the compression table 'CTable'. -The space required by 'CTable' must be already allocated, using FSE_createCTable(). -You can then use FSE_buildCTable() to fill 'CTable'. -If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). - -'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). -Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' -The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. -If it returns '0', compressed data could not fit into 'dst'. -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). -*/ - -/* *** DECOMPRESSION *** */ - -/*! FSE_readNCount(): - Read compactly saved 'normalizedCounter' from 'rBuffer'. - @return : size read from 'rBuffer', - or an errorCode, which can be tested using FSE_isError(). - maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ -FSE_PUBLIC_API size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize); - -/*! Constructor and Destructor of FSE_DTable. - Note that its size depends on 'tableLog' */ -typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ - -/*! FSE_buildDTable(): - Builds 'dt', which must be already allocated, using FSE_createDTable(). - return : 0, or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize); - -/*! FSE_decompress_usingDTable(): - Decompress compressed source `cSrc` of size `cSrcSize` using `dt` - into `dst` which must be already allocated. - @return : size of regenerated data (necessarily <= `dstCapacity`), - or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt); - -/*! -Tutorial : ----------- -(Note : these functions only decompress FSE-compressed blocks. - If block is uncompressed, use memcpy() instead - If block is a single repeated byte, use memset() instead ) - -The first step is to obtain the normalized frequencies of symbols. -This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. -In practice, that means it's necessary to know 'maxSymbolValue' beforehand, -or size the table to handle worst case situations (typically 256). -FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. -The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. -Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. -If there is an error, the function will return an error code, which can be tested using FSE_isError(). - -The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. -This is performed by the function FSE_buildDTable(). -The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). -If there is an error, the function will return an error code, which can be tested using FSE_isError(). - -`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). -`cSrcSize` must be strictly correct, otherwise decompression will fail. -FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). -If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) -*/ - -/* *** Dependency *** */ -#include "bitstream.h" - -/* ***************************************** -* Static allocation -*******************************************/ -/* FSE buffer bounds */ -#define FSE_NCOUNTBOUND 512 -#define FSE_BLOCKBOUND(size) (size + (size >> 7)) -#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ - -/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ -#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1 << (maxTableLog - 1)) + ((maxSymbolValue + 1) * 2)) -#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1 << maxTableLog)) - -/* ***************************************** -* FSE advanced API -*******************************************/ -/* FSE_count_wksp() : - * Same as FSE_count(), but using an externally provided scratch buffer. - * `workSpace` size must be table of >= `1024` unsigned - */ -size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace); - -/* FSE_countFast_wksp() : - * Same as FSE_countFast(), but using an externally provided scratch buffer. - * `workSpace` must be a table of minimum `1024` unsigned - */ -size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *workSpace); - -/*! FSE_count_simple - * Same as FSE_countFast(), but does not use any additional memory (not even on stack). - * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`). -*/ -size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize); - -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); -/**< same as FSE_optimalTableLog(), which used `minus==2` */ - -size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits); -/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ - -size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue); -/**< build a fake FSE_CTable, designed to compress always the same symbolValue */ - -/* FSE_buildCTable_wksp() : - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). - * `wkspSize` must be >= `(1<= BIT_DStream_completed - -When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. -Checking if DStream has reached its end is performed by : - BIT_endOfDStream(&DStream); -Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. - FSE_endOfDState(&DState); -*/ - -/* ***************************************** -* FSE unsafe API -*******************************************/ -static unsigned char FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD); -/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ - -/* ***************************************** -* Implementation of inlined functions -*******************************************/ -typedef struct { - int deltaFindState; - U32 deltaNbBits; -} FSE_symbolCompressionTransform; /* total 8 bytes */ - -ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct) -{ - const void *ptr = ct; - const U16 *u16ptr = (const U16 *)ptr; - const U32 tableLog = ZSTD_read16(ptr); - statePtr->value = (ptrdiff_t)1 << tableLog; - statePtr->stateTable = u16ptr + 2; - statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1)); - statePtr->stateLog = tableLog; -} - -/*! FSE_initCState2() : -* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) -* uses the smallest state value possible, saving the cost of this symbol */ -ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol) -{ - FSE_initCState(statePtr, ct); - { - const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol]; - const U16 *stateTable = (const U16 *)(statePtr->stateTable); - U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1 << 15)) >> 16); - statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; - statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; - } -} - -ZSTD_STATIC void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *statePtr, U32 symbol) -{ - const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol]; - const U16 *const stateTable = (const U16 *)(statePtr->stateTable); - U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); - BIT_addBits(bitC, statePtr->value, nbBitsOut); - statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; -} - -ZSTD_STATIC void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *statePtr) -{ - BIT_addBits(bitC, statePtr->value, statePtr->stateLog); - BIT_flushBits(bitC); -} - -/* ====== Decompression ====== */ - -typedef struct { - U16 tableLog; - U16 fastMode; -} FSE_DTableHeader; /* sizeof U32 */ - -typedef struct { - unsigned short newState; - unsigned char symbol; - unsigned char nbBits; -} FSE_decode_t; /* size == U32 */ - -ZSTD_STATIC void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt) -{ - const void *ptr = dt; - const FSE_DTableHeader *const DTableH = (const FSE_DTableHeader *)ptr; - DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); - BIT_reloadDStream(bitD); - DStatePtr->table = dt + 1; -} - -ZSTD_STATIC BYTE FSE_peekSymbol(const FSE_DState_t *DStatePtr) -{ - FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; - return DInfo.symbol; -} - -ZSTD_STATIC void FSE_updateState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) -{ - FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; - U32 const nbBits = DInfo.nbBits; - size_t const lowBits = BIT_readBits(bitD, nbBits); - DStatePtr->state = DInfo.newState + lowBits; -} - -ZSTD_STATIC BYTE FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) -{ - FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; - U32 const nbBits = DInfo.nbBits; - BYTE const symbol = DInfo.symbol; - size_t const lowBits = BIT_readBits(bitD, nbBits); - - DStatePtr->state = DInfo.newState + lowBits; - return symbol; -} - -/*! FSE_decodeSymbolFast() : - unsafe, only works if no symbol has a probability > 50% */ -ZSTD_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) -{ - FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; - U32 const nbBits = DInfo.nbBits; - BYTE const symbol = DInfo.symbol; - size_t const lowBits = BIT_readBitsFast(bitD, nbBits); - - DStatePtr->state = DInfo.newState + lowBits; - return symbol; -} - -ZSTD_STATIC unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr) { return DStatePtr->state == 0; } - -/* ************************************************************** -* Tuning parameters -****************************************************************/ -/*!MEMORY_USAGE : -* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) -* Increasing memory usage improves compression ratio -* Reduced memory usage can improve speed, due to cache effect -* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ -#ifndef FSE_MAX_MEMORY_USAGE -#define FSE_MAX_MEMORY_USAGE 14 -#endif -#ifndef FSE_DEFAULT_MEMORY_USAGE -#define FSE_DEFAULT_MEMORY_USAGE 13 -#endif - -/*!FSE_MAX_SYMBOL_VALUE : -* Maximum symbol value authorized. -* Required for proper stack allocation */ -#ifndef FSE_MAX_SYMBOL_VALUE -#define FSE_MAX_SYMBOL_VALUE 255 -#endif - -/* ************************************************************** -* template functions type & suffix -****************************************************************/ -#define FSE_FUNCTION_TYPE BYTE -#define FSE_FUNCTION_EXTENSION -#define FSE_DECODE_TYPE FSE_decode_t - -/* *************************************************************** -* Constants -*****************************************************************/ -#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE - 2) -#define FSE_MAX_TABLESIZE (1U << FSE_MAX_TABLELOG) -#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE - 1) -#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE - 2) -#define FSE_MIN_TABLELOG 5 - -#define FSE_TABLELOG_ABSOLUTE_MAX 15 -#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX -#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" -#endif - -#define FSE_TABLESTEP(tableSize) ((tableSize >> 1) + (tableSize >> 3) + 3) - -#endif /* FSE_H */ diff --git a/contrib/linux-kernel/lib/zstd/fse_compress.c b/contrib/linux-kernel/lib/zstd/fse_compress.c deleted file mode 100644 index 0fe468edf6f..00000000000 --- a/contrib/linux-kernel/lib/zstd/fse_compress.c +++ /dev/null @@ -1,795 +0,0 @@ -/* - * FSE : Finite State Entropy encoder - * Copyright (C) 2013-2015, Yann Collet. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - * - * You can contact the author at : - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - */ - -/* ************************************************************** -* Compiler specifics -****************************************************************/ -#define FORCE_INLINE static __always_inline - -/* ************************************************************** -* Includes -****************************************************************/ -#include "bitstream.h" -#include "fse.h" -#include -#include -#include -#include /* memcpy, memset */ - -/* ************************************************************** -* Error Management -****************************************************************/ -#define FSE_STATIC_ASSERT(c) \ - { \ - enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ - } /* use only *after* variable declarations */ - -/* ************************************************************** -* Templates -****************************************************************/ -/* - designed to be included - for type-specific functions (template emulation in C) - Objective is to write these functions only once, for improved maintenance -*/ - -/* safety checks */ -#ifndef FSE_FUNCTION_EXTENSION -#error "FSE_FUNCTION_EXTENSION must be defined" -#endif -#ifndef FSE_FUNCTION_TYPE -#error "FSE_FUNCTION_TYPE must be defined" -#endif - -/* Function names */ -#define FSE_CAT(X, Y) X##Y -#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y) -#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y) - -/* Function templates */ - -/* FSE_buildCTable_wksp() : - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). - * wkspSize should be sized to handle worst case situation, which is `1<> 1 : 1); - FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT); - U32 const step = FSE_TABLESTEP(tableSize); - U32 highThreshold = tableSize - 1; - - U32 *cumul; - FSE_FUNCTION_TYPE *tableSymbol; - size_t spaceUsed32 = 0; - - cumul = (U32 *)workspace + spaceUsed32; - spaceUsed32 += FSE_MAX_SYMBOL_VALUE + 2; - tableSymbol = (FSE_FUNCTION_TYPE *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += ALIGN(sizeof(FSE_FUNCTION_TYPE) * ((size_t)1 << tableLog), sizeof(U32)) >> 2; - - if ((spaceUsed32 << 2) > workspaceSize) - return ERROR(tableLog_tooLarge); - workspace = (U32 *)workspace + spaceUsed32; - workspaceSize -= (spaceUsed32 << 2); - - /* CTable header */ - tableU16[-2] = (U16)tableLog; - tableU16[-1] = (U16)maxSymbolValue; - - /* For explanations on how to distribute symbol values over the table : - * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ - - /* symbol start positions */ - { - U32 u; - cumul[0] = 0; - for (u = 1; u <= maxSymbolValue + 1; u++) { - if (normalizedCounter[u - 1] == -1) { /* Low proba symbol */ - cumul[u] = cumul[u - 1] + 1; - tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u - 1); - } else { - cumul[u] = cumul[u - 1] + normalizedCounter[u - 1]; - } - } - cumul[maxSymbolValue + 1] = tableSize + 1; - } - - /* Spread symbols */ - { - U32 position = 0; - U32 symbol; - for (symbol = 0; symbol <= maxSymbolValue; symbol++) { - int nbOccurrences; - for (nbOccurrences = 0; nbOccurrences < normalizedCounter[symbol]; nbOccurrences++) { - tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol; - position = (position + step) & tableMask; - while (position > highThreshold) - position = (position + step) & tableMask; /* Low proba area */ - } - } - - if (position != 0) - return ERROR(GENERIC); /* Must have gone through all positions */ - } - - /* Build table */ - { - U32 u; - for (u = 0; u < tableSize; u++) { - FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */ - tableU16[cumul[s]++] = (U16)(tableSize + u); /* TableU16 : sorted by symbol order; gives next state value */ - } - } - - /* Build Symbol Transformation Table */ - { - unsigned total = 0; - unsigned s; - for (s = 0; s <= maxSymbolValue; s++) { - switch (normalizedCounter[s]) { - case 0: break; - - case -1: - case 1: - symbolTT[s].deltaNbBits = (tableLog << 16) - (1 << tableLog); - symbolTT[s].deltaFindState = total - 1; - total++; - break; - default: { - U32 const maxBitsOut = tableLog - BIT_highbit32(normalizedCounter[s] - 1); - U32 const minStatePlus = normalizedCounter[s] << maxBitsOut; - symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; - symbolTT[s].deltaFindState = total - normalizedCounter[s]; - total += normalizedCounter[s]; - } - } - } - } - - return 0; -} - -/*-************************************************************** -* FSE NCount encoding-decoding -****************************************************************/ -size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog) -{ - size_t const maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3; - return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ -} - -static size_t FSE_writeNCount_generic(void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, - unsigned writeIsSafe) -{ - BYTE *const ostart = (BYTE *)header; - BYTE *out = ostart; - BYTE *const oend = ostart + headerBufferSize; - int nbBits; - const int tableSize = 1 << tableLog; - int remaining; - int threshold; - U32 bitStream; - int bitCount; - unsigned charnum = 0; - int previous0 = 0; - - bitStream = 0; - bitCount = 0; - /* Table Size */ - bitStream += (tableLog - FSE_MIN_TABLELOG) << bitCount; - bitCount += 4; - - /* Init */ - remaining = tableSize + 1; /* +1 for extra accuracy */ - threshold = tableSize; - nbBits = tableLog + 1; - - while (remaining > 1) { /* stops at 1 */ - if (previous0) { - unsigned start = charnum; - while (!normalizedCounter[charnum]) - charnum++; - while (charnum >= start + 24) { - start += 24; - bitStream += 0xFFFFU << bitCount; - if ((!writeIsSafe) && (out > oend - 2)) - return ERROR(dstSize_tooSmall); /* Buffer overflow */ - out[0] = (BYTE)bitStream; - out[1] = (BYTE)(bitStream >> 8); - out += 2; - bitStream >>= 16; - } - while (charnum >= start + 3) { - start += 3; - bitStream += 3 << bitCount; - bitCount += 2; - } - bitStream += (charnum - start) << bitCount; - bitCount += 2; - if (bitCount > 16) { - if ((!writeIsSafe) && (out > oend - 2)) - return ERROR(dstSize_tooSmall); /* Buffer overflow */ - out[0] = (BYTE)bitStream; - out[1] = (BYTE)(bitStream >> 8); - out += 2; - bitStream >>= 16; - bitCount -= 16; - } - } - { - int count = normalizedCounter[charnum++]; - int const max = (2 * threshold - 1) - remaining; - remaining -= count < 0 ? -count : count; - count++; /* +1 for extra accuracy */ - if (count >= threshold) - count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ - bitStream += count << bitCount; - bitCount += nbBits; - bitCount -= (count < max); - previous0 = (count == 1); - if (remaining < 1) - return ERROR(GENERIC); - while (remaining < threshold) - nbBits--, threshold >>= 1; - } - if (bitCount > 16) { - if ((!writeIsSafe) && (out > oend - 2)) - return ERROR(dstSize_tooSmall); /* Buffer overflow */ - out[0] = (BYTE)bitStream; - out[1] = (BYTE)(bitStream >> 8); - out += 2; - bitStream >>= 16; - bitCount -= 16; - } - } - - /* flush remaining bitStream */ - if ((!writeIsSafe) && (out > oend - 2)) - return ERROR(dstSize_tooSmall); /* Buffer overflow */ - out[0] = (BYTE)bitStream; - out[1] = (BYTE)(bitStream >> 8); - out += (bitCount + 7) / 8; - - if (charnum > maxSymbolValue + 1) - return ERROR(GENERIC); - - return (out - ostart); -} - -size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) -{ - if (tableLog > FSE_MAX_TABLELOG) - return ERROR(tableLog_tooLarge); /* Unsupported */ - if (tableLog < FSE_MIN_TABLELOG) - return ERROR(GENERIC); /* Unsupported */ - - if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) - return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); - - return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); -} - -/*-************************************************************** -* Counting histogram -****************************************************************/ -/*! FSE_count_simple - This function counts byte values within `src`, and store the histogram into table `count`. - It doesn't use any additional memory. - But this function is unsafe : it doesn't check that all values within `src` can fit into `count`. - For this reason, prefer using a table `count` with 256 elements. - @return : count of most numerous element -*/ -size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize) -{ - const BYTE *ip = (const BYTE *)src; - const BYTE *const end = ip + srcSize; - unsigned maxSymbolValue = *maxSymbolValuePtr; - unsigned max = 0; - - memset(count, 0, (maxSymbolValue + 1) * sizeof(*count)); - if (srcSize == 0) { - *maxSymbolValuePtr = 0; - return 0; - } - - while (ip < end) - count[*ip++]++; - - while (!count[maxSymbolValue]) - maxSymbolValue--; - *maxSymbolValuePtr = maxSymbolValue; - - { - U32 s; - for (s = 0; s <= maxSymbolValue; s++) - if (count[s] > max) - max = count[s]; - } - - return (size_t)max; -} - -/* FSE_count_parallel_wksp() : - * Same as FSE_count_parallel(), but using an externally provided scratch buffer. - * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */ -static size_t FSE_count_parallel_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned checkMax, - unsigned *const workSpace) -{ - const BYTE *ip = (const BYTE *)source; - const BYTE *const iend = ip + sourceSize; - unsigned maxSymbolValue = *maxSymbolValuePtr; - unsigned max = 0; - U32 *const Counting1 = workSpace; - U32 *const Counting2 = Counting1 + 256; - U32 *const Counting3 = Counting2 + 256; - U32 *const Counting4 = Counting3 + 256; - - memset(Counting1, 0, 4 * 256 * sizeof(unsigned)); - - /* safety checks */ - if (!sourceSize) { - memset(count, 0, maxSymbolValue + 1); - *maxSymbolValuePtr = 0; - return 0; - } - if (!maxSymbolValue) - maxSymbolValue = 255; /* 0 == default */ - - /* by stripes of 16 bytes */ - { - U32 cached = ZSTD_read32(ip); - ip += 4; - while (ip < iend - 15) { - U32 c = cached; - cached = ZSTD_read32(ip); - ip += 4; - Counting1[(BYTE)c]++; - Counting2[(BYTE)(c >> 8)]++; - Counting3[(BYTE)(c >> 16)]++; - Counting4[c >> 24]++; - c = cached; - cached = ZSTD_read32(ip); - ip += 4; - Counting1[(BYTE)c]++; - Counting2[(BYTE)(c >> 8)]++; - Counting3[(BYTE)(c >> 16)]++; - Counting4[c >> 24]++; - c = cached; - cached = ZSTD_read32(ip); - ip += 4; - Counting1[(BYTE)c]++; - Counting2[(BYTE)(c >> 8)]++; - Counting3[(BYTE)(c >> 16)]++; - Counting4[c >> 24]++; - c = cached; - cached = ZSTD_read32(ip); - ip += 4; - Counting1[(BYTE)c]++; - Counting2[(BYTE)(c >> 8)]++; - Counting3[(BYTE)(c >> 16)]++; - Counting4[c >> 24]++; - } - ip -= 4; - } - - /* finish last symbols */ - while (ip < iend) - Counting1[*ip++]++; - - if (checkMax) { /* verify stats will fit into destination table */ - U32 s; - for (s = 255; s > maxSymbolValue; s--) { - Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; - if (Counting1[s]) - return ERROR(maxSymbolValue_tooSmall); - } - } - - { - U32 s; - for (s = 0; s <= maxSymbolValue; s++) { - count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; - if (count[s] > max) - max = count[s]; - } - } - - while (!count[maxSymbolValue]) - maxSymbolValue--; - *maxSymbolValuePtr = maxSymbolValue; - return (size_t)max; -} - -/* FSE_countFast_wksp() : - * Same as FSE_countFast(), but using an externally provided scratch buffer. - * `workSpace` size must be table of >= `1024` unsigned */ -size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace) -{ - if (sourceSize < 1500) - return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize); - return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace); -} - -/* FSE_count_wksp() : - * Same as FSE_count(), but using an externally provided scratch buffer. - * `workSpace` size must be table of >= `1024` unsigned */ -size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace) -{ - if (*maxSymbolValuePtr < 255) - return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace); - *maxSymbolValuePtr = 255; - return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace); -} - -/*-************************************************************** -* FSE Compression Code -****************************************************************/ -/*! FSE_sizeof_CTable() : - FSE_CTable is a variable size structure which contains : - `U16 tableLog;` - `U16 maxSymbolValue;` - `U16 nextStateNumber[1 << tableLog];` // This size is variable - `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable -Allocation is manual (C standard does not support variable-size structures). -*/ -size_t FSE_sizeof_CTable(unsigned maxSymbolValue, unsigned tableLog) -{ - if (tableLog > FSE_MAX_TABLELOG) - return ERROR(tableLog_tooLarge); - return FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue) * sizeof(U32); -} - -/* provides the minimum logSize to safely represent a distribution */ -static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) -{ - U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1; - U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; - U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; - return minBits; -} - -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) -{ - U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; - U32 tableLog = maxTableLog; - U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); - if (tableLog == 0) - tableLog = FSE_DEFAULT_TABLELOG; - if (maxBitsSrc < tableLog) - tableLog = maxBitsSrc; /* Accuracy can be reduced */ - if (minBits > tableLog) - tableLog = minBits; /* Need a minimum to safely represent all symbol values */ - if (tableLog < FSE_MIN_TABLELOG) - tableLog = FSE_MIN_TABLELOG; - if (tableLog > FSE_MAX_TABLELOG) - tableLog = FSE_MAX_TABLELOG; - return tableLog; -} - -unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) -{ - return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); -} - -/* Secondary normalization method. - To be used when primary method fails. */ - -static size_t FSE_normalizeM2(short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue) -{ - short const NOT_YET_ASSIGNED = -2; - U32 s; - U32 distributed = 0; - U32 ToDistribute; - - /* Init */ - U32 const lowThreshold = (U32)(total >> tableLog); - U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); - - for (s = 0; s <= maxSymbolValue; s++) { - if (count[s] == 0) { - norm[s] = 0; - continue; - } - if (count[s] <= lowThreshold) { - norm[s] = -1; - distributed++; - total -= count[s]; - continue; - } - if (count[s] <= lowOne) { - norm[s] = 1; - distributed++; - total -= count[s]; - continue; - } - - norm[s] = NOT_YET_ASSIGNED; - } - ToDistribute = (1 << tableLog) - distributed; - - if ((total / ToDistribute) > lowOne) { - /* risk of rounding to zero */ - lowOne = (U32)((total * 3) / (ToDistribute * 2)); - for (s = 0; s <= maxSymbolValue; s++) { - if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { - norm[s] = 1; - distributed++; - total -= count[s]; - continue; - } - } - ToDistribute = (1 << tableLog) - distributed; - } - - if (distributed == maxSymbolValue + 1) { - /* all values are pretty poor; - probably incompressible data (should have already been detected); - find max, then give all remaining points to max */ - U32 maxV = 0, maxC = 0; - for (s = 0; s <= maxSymbolValue; s++) - if (count[s] > maxC) - maxV = s, maxC = count[s]; - norm[maxV] += (short)ToDistribute; - return 0; - } - - if (total == 0) { - /* all of the symbols were low enough for the lowOne or lowThreshold */ - for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1)) - if (norm[s] > 0) - ToDistribute--, norm[s]++; - return 0; - } - - { - U64 const vStepLog = 62 - tableLog; - U64 const mid = (1ULL << (vStepLog - 1)) - 1; - U64 const rStep = div_u64((((U64)1 << vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */ - U64 tmpTotal = mid; - for (s = 0; s <= maxSymbolValue; s++) { - if (norm[s] == NOT_YET_ASSIGNED) { - U64 const end = tmpTotal + (count[s] * rStep); - U32 const sStart = (U32)(tmpTotal >> vStepLog); - U32 const sEnd = (U32)(end >> vStepLog); - U32 const weight = sEnd - sStart; - if (weight < 1) - return ERROR(GENERIC); - norm[s] = (short)weight; - tmpTotal = end; - } - } - } - - return 0; -} - -size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t total, unsigned maxSymbolValue) -{ - /* Sanity checks */ - if (tableLog == 0) - tableLog = FSE_DEFAULT_TABLELOG; - if (tableLog < FSE_MIN_TABLELOG) - return ERROR(GENERIC); /* Unsupported size */ - if (tableLog > FSE_MAX_TABLELOG) - return ERROR(tableLog_tooLarge); /* Unsupported size */ - if (tableLog < FSE_minTableLog(total, maxSymbolValue)) - return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ - - { - U32 const rtbTable[] = {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}; - U64 const scale = 62 - tableLog; - U64 const step = div_u64((U64)1 << 62, (U32)total); /* <== here, one division ! */ - U64 const vStep = 1ULL << (scale - 20); - int stillToDistribute = 1 << tableLog; - unsigned s; - unsigned largest = 0; - short largestP = 0; - U32 lowThreshold = (U32)(total >> tableLog); - - for (s = 0; s <= maxSymbolValue; s++) { - if (count[s] == total) - return 0; /* rle special case */ - if (count[s] == 0) { - normalizedCounter[s] = 0; - continue; - } - if (count[s] <= lowThreshold) { - normalizedCounter[s] = -1; - stillToDistribute--; - } else { - short proba = (short)((count[s] * step) >> scale); - if (proba < 8) { - U64 restToBeat = vStep * rtbTable[proba]; - proba += (count[s] * step) - ((U64)proba << scale) > restToBeat; - } - if (proba > largestP) - largestP = proba, largest = s; - normalizedCounter[s] = proba; - stillToDistribute -= proba; - } - } - if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { - /* corner case, need another normalization method */ - size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); - if (FSE_isError(errorCode)) - return errorCode; - } else - normalizedCounter[largest] += (short)stillToDistribute; - } - - return tableLog; -} - -/* fake FSE_CTable, for raw (uncompressed) input */ -size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits) -{ - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSymbolValue = tableMask; - void *const ptr = ct; - U16 *const tableU16 = ((U16 *)ptr) + 2; - void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableSize >> 1); /* assumption : tableLog >= 1 */ - FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT); - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) - return ERROR(GENERIC); /* min size */ - - /* header */ - tableU16[-2] = (U16)nbBits; - tableU16[-1] = (U16)maxSymbolValue; - - /* Build table */ - for (s = 0; s < tableSize; s++) - tableU16[s] = (U16)(tableSize + s); - - /* Build Symbol Transformation Table */ - { - const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits); - for (s = 0; s <= maxSymbolValue; s++) { - symbolTT[s].deltaNbBits = deltaNbBits; - symbolTT[s].deltaFindState = s - 1; - } - } - - return 0; -} - -/* fake FSE_CTable, for rle input (always same symbol) */ -size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue) -{ - void *ptr = ct; - U16 *tableU16 = ((U16 *)ptr) + 2; - void *FSCTptr = (U32 *)ptr + 2; - FSE_symbolCompressionTransform *symbolTT = (FSE_symbolCompressionTransform *)FSCTptr; - - /* header */ - tableU16[-2] = (U16)0; - tableU16[-1] = (U16)symbolValue; - - /* Build table */ - tableU16[0] = 0; - tableU16[1] = 0; /* just in case */ - - /* Build Symbol Transformation Table */ - symbolTT[symbolValue].deltaNbBits = 0; - symbolTT[symbolValue].deltaFindState = 0; - - return 0; -} - -static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast) -{ - const BYTE *const istart = (const BYTE *)src; - const BYTE *const iend = istart + srcSize; - const BYTE *ip = iend; - - BIT_CStream_t bitC; - FSE_CState_t CState1, CState2; - - /* init */ - if (srcSize <= 2) - return 0; - { - size_t const initError = BIT_initCStream(&bitC, dst, dstSize); - if (FSE_isError(initError)) - return 0; /* not enough space available to write a bitstream */ - } - -#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s)) - - if (srcSize & 1) { - FSE_initCState2(&CState1, ct, *--ip); - FSE_initCState2(&CState2, ct, *--ip); - FSE_encodeSymbol(&bitC, &CState1, *--ip); - FSE_FLUSHBITS(&bitC); - } else { - FSE_initCState2(&CState2, ct, *--ip); - FSE_initCState2(&CState1, ct, *--ip); - } - - /* join to mod 4 */ - srcSize -= 2; - if ((sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) && (srcSize & 2)) { /* test bit 2 */ - FSE_encodeSymbol(&bitC, &CState2, *--ip); - FSE_encodeSymbol(&bitC, &CState1, *--ip); - FSE_FLUSHBITS(&bitC); - } - - /* 2 or 4 encoding per loop */ - while (ip > istart) { - - FSE_encodeSymbol(&bitC, &CState2, *--ip); - - if (sizeof(bitC.bitContainer) * 8 < FSE_MAX_TABLELOG * 2 + 7) /* this test must be static */ - FSE_FLUSHBITS(&bitC); - - FSE_encodeSymbol(&bitC, &CState1, *--ip); - - if (sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) { /* this test must be static */ - FSE_encodeSymbol(&bitC, &CState2, *--ip); - FSE_encodeSymbol(&bitC, &CState1, *--ip); - } - - FSE_FLUSHBITS(&bitC); - } - - FSE_flushCState(&bitC, &CState2); - FSE_flushCState(&bitC, &CState1); - return BIT_closeCStream(&bitC); -} - -size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct) -{ - unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); - - if (fast) - return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); - else - return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); -} - -size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } diff --git a/contrib/linux-kernel/lib/zstd/fse_decompress.c b/contrib/linux-kernel/lib/zstd/fse_decompress.c deleted file mode 100644 index a84300e5a01..00000000000 --- a/contrib/linux-kernel/lib/zstd/fse_decompress.c +++ /dev/null @@ -1,332 +0,0 @@ -/* - * FSE : Finite State Entropy decoder - * Copyright (C) 2013-2015, Yann Collet. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - * - * You can contact the author at : - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - */ - -/* ************************************************************** -* Compiler specifics -****************************************************************/ -#define FORCE_INLINE static __always_inline - -/* ************************************************************** -* Includes -****************************************************************/ -#include "bitstream.h" -#include "fse.h" -#include -#include -#include /* memcpy, memset */ - -/* ************************************************************** -* Error Management -****************************************************************/ -#define FSE_isError ERR_isError -#define FSE_STATIC_ASSERT(c) \ - { \ - enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ - } /* use only *after* variable declarations */ - -/* check and forward error code */ -#define CHECK_F(f) \ - { \ - size_t const e = f; \ - if (FSE_isError(e)) \ - return e; \ - } - -/* ************************************************************** -* Templates -****************************************************************/ -/* - designed to be included - for type-specific functions (template emulation in C) - Objective is to write these functions only once, for improved maintenance -*/ - -/* safety checks */ -#ifndef FSE_FUNCTION_EXTENSION -#error "FSE_FUNCTION_EXTENSION must be defined" -#endif -#ifndef FSE_FUNCTION_TYPE -#error "FSE_FUNCTION_TYPE must be defined" -#endif - -/* Function names */ -#define FSE_CAT(X, Y) X##Y -#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y) -#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y) - -/* Function templates */ - -size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize) -{ - void *const tdPtr = dt + 1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ - FSE_DECODE_TYPE *const tableDecode = (FSE_DECODE_TYPE *)(tdPtr); - U16 *symbolNext = (U16 *)workspace; - - U32 const maxSV1 = maxSymbolValue + 1; - U32 const tableSize = 1 << tableLog; - U32 highThreshold = tableSize - 1; - - /* Sanity Checks */ - if (workspaceSize < sizeof(U16) * (FSE_MAX_SYMBOL_VALUE + 1)) - return ERROR(tableLog_tooLarge); - if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) - return ERROR(maxSymbolValue_tooLarge); - if (tableLog > FSE_MAX_TABLELOG) - return ERROR(tableLog_tooLarge); - - /* Init, lay down lowprob symbols */ - { - FSE_DTableHeader DTableH; - DTableH.tableLog = (U16)tableLog; - DTableH.fastMode = 1; - { - S16 const largeLimit = (S16)(1 << (tableLog - 1)); - U32 s; - for (s = 0; s < maxSV1; s++) { - if (normalizedCounter[s] == -1) { - tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; - symbolNext[s] = 1; - } else { - if (normalizedCounter[s] >= largeLimit) - DTableH.fastMode = 0; - symbolNext[s] = normalizedCounter[s]; - } - } - } - memcpy(dt, &DTableH, sizeof(DTableH)); - } - - /* Spread symbols */ - { - U32 const tableMask = tableSize - 1; - U32 const step = FSE_TABLESTEP(tableSize); - U32 s, position = 0; - for (s = 0; s < maxSV1; s++) { - int i; - for (i = 0; i < normalizedCounter[s]; i++) { - tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s; - position = (position + step) & tableMask; - while (position > highThreshold) - position = (position + step) & tableMask; /* lowprob area */ - } - } - if (position != 0) - return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ - } - - /* Build Decoding table */ - { - U32 u; - for (u = 0; u < tableSize; u++) { - FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol); - U16 nextState = symbolNext[symbol]++; - tableDecode[u].nbBits = (BYTE)(tableLog - BIT_highbit32((U32)nextState)); - tableDecode[u].newState = (U16)((nextState << tableDecode[u].nbBits) - tableSize); - } - } - - return 0; -} - -/*-******************************************************* -* Decompression (Byte symbols) -*********************************************************/ -size_t FSE_buildDTable_rle(FSE_DTable *dt, BYTE symbolValue) -{ - void *ptr = dt; - FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr; - void *dPtr = dt + 1; - FSE_decode_t *const cell = (FSE_decode_t *)dPtr; - - DTableH->tableLog = 0; - DTableH->fastMode = 0; - - cell->newState = 0; - cell->symbol = symbolValue; - cell->nbBits = 0; - - return 0; -} - -size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits) -{ - void *ptr = dt; - FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr; - void *dPtr = dt + 1; - FSE_decode_t *const dinfo = (FSE_decode_t *)dPtr; - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSV1 = tableMask + 1; - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) - return ERROR(GENERIC); /* min size */ - - /* Build Decoding Table */ - DTableH->tableLog = (U16)nbBits; - DTableH->fastMode = 1; - for (s = 0; s < maxSV1; s++) { - dinfo[s].newState = 0; - dinfo[s].symbol = (BYTE)s; - dinfo[s].nbBits = (BYTE)nbBits; - } - - return 0; -} - -FORCE_INLINE size_t FSE_decompress_usingDTable_generic(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt, - const unsigned fast) -{ - BYTE *const ostart = (BYTE *)dst; - BYTE *op = ostart; - BYTE *const omax = op + maxDstSize; - BYTE *const olimit = omax - 3; - - BIT_DStream_t bitD; - FSE_DState_t state1; - FSE_DState_t state2; - - /* Init */ - CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize)); - - FSE_initDState(&state1, &bitD, dt); - FSE_initDState(&state2, &bitD, dt); - -#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) - - /* 4 symbols per loop */ - for (; (BIT_reloadDStream(&bitD) == BIT_DStream_unfinished) & (op < olimit); op += 4) { - op[0] = FSE_GETSYMBOL(&state1); - - if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */ - BIT_reloadDStream(&bitD); - - op[1] = FSE_GETSYMBOL(&state2); - - if (FSE_MAX_TABLELOG * 4 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */ - { - if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { - op += 2; - break; - } - } - - op[2] = FSE_GETSYMBOL(&state1); - - if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */ - BIT_reloadDStream(&bitD); - - op[3] = FSE_GETSYMBOL(&state2); - } - - /* tail */ - /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ - while (1) { - if (op > (omax - 2)) - return ERROR(dstSize_tooSmall); - *op++ = FSE_GETSYMBOL(&state1); - if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) { - *op++ = FSE_GETSYMBOL(&state2); - break; - } - - if (op > (omax - 2)) - return ERROR(dstSize_tooSmall); - *op++ = FSE_GETSYMBOL(&state2); - if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) { - *op++ = FSE_GETSYMBOL(&state1); - break; - } - } - - return op - ostart; -} - -size_t FSE_decompress_usingDTable(void *dst, size_t originalSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt) -{ - const void *ptr = dt; - const FSE_DTableHeader *DTableH = (const FSE_DTableHeader *)ptr; - const U32 fastMode = DTableH->fastMode; - - /* select fast mode (static) */ - if (fastMode) - return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); - return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); -} - -size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize) -{ - const BYTE *const istart = (const BYTE *)cSrc; - const BYTE *ip = istart; - unsigned tableLog; - unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; - size_t NCountLength; - - FSE_DTable *dt; - short *counting; - size_t spaceUsed32 = 0; - - FSE_STATIC_ASSERT(sizeof(FSE_DTable) == sizeof(U32)); - - dt = (FSE_DTable *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += FSE_DTABLE_SIZE_U32(maxLog); - counting = (short *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += ALIGN(sizeof(short) * (FSE_MAX_SYMBOL_VALUE + 1), sizeof(U32)) >> 2; - - if ((spaceUsed32 << 2) > workspaceSize) - return ERROR(tableLog_tooLarge); - workspace = (U32 *)workspace + spaceUsed32; - workspaceSize -= (spaceUsed32 << 2); - - /* normal FSE decoding mode */ - NCountLength = FSE_readNCount(counting, &maxSymbolValue, &tableLog, istart, cSrcSize); - if (FSE_isError(NCountLength)) - return NCountLength; - // if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining - // case : NCountLength==cSrcSize */ - if (tableLog > maxLog) - return ERROR(tableLog_tooLarge); - ip += NCountLength; - cSrcSize -= NCountLength; - - CHECK_F(FSE_buildDTable_wksp(dt, counting, maxSymbolValue, tableLog, workspace, workspaceSize)); - - return FSE_decompress_usingDTable(dst, dstCapacity, ip, cSrcSize, dt); /* always return, even if it is an error code */ -} diff --git a/contrib/linux-kernel/lib/zstd/huf.h b/contrib/linux-kernel/lib/zstd/huf.h deleted file mode 100644 index 923218d12e2..00000000000 --- a/contrib/linux-kernel/lib/zstd/huf.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Huffman coder, part of New Generation Entropy library - * header file - * Copyright (C) 2013-2016, Yann Collet. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - * - * You can contact the author at : - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - */ -#ifndef HUF_H_298734234 -#define HUF_H_298734234 - -/* *** Dependencies *** */ -#include /* size_t */ - -/* *** Tool functions *** */ -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ -size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ - -/* Error Management */ -unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ - -/* *** Advanced function *** */ - -/** HUF_compress4X_wksp() : -* Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */ -size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, - size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ - -/* *** Dependencies *** */ -#include "mem.h" /* U32 */ - -/* *** Constants *** */ -#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ -#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */ -#define HUF_SYMBOLVALUE_MAX 255 - -#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ -#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) -#error "HUF_TABLELOG_MAX is too large !" -#endif - -/* **************************************** -* Static allocation -******************************************/ -/* HUF buffer bounds */ -#define HUF_CTABLEBOUND 129 -#define HUF_BLOCKBOUND(size) (size + (size >> 8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ -#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ - -/* static allocation of HUF's Compression Table */ -#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ - U32 name##hb[maxSymbolValue + 1]; \ - void *name##hv = &(name##hb); \ - HUF_CElt *name = (HUF_CElt *)(name##hv) /* no final ; */ - -/* static allocation of HUF's DTable */ -typedef U32 HUF_DTable; -#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1 << (maxTableLog))) -#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = {((U32)((maxTableLog)-1) * 0x01000001)} -#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = {((U32)(maxTableLog)*0x01000001)} - -/* The workspace must have alignment at least 4 and be at least this large */ -#define HUF_COMPRESS_WORKSPACE_SIZE (6 << 10) -#define HUF_COMPRESS_WORKSPACE_SIZE_U32 (HUF_COMPRESS_WORKSPACE_SIZE / sizeof(U32)) - -/* The workspace must have alignment at least 4 and be at least this large */ -#define HUF_DECOMPRESS_WORKSPACE_SIZE (3 << 10) -#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) - -/* **************************************** -* Advanced decompression functions -******************************************/ -size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); /**< decodes RLE and uncompressed */ -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, - size_t workspaceSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, - size_t workspaceSize); /**< single-symbol decoder */ -size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, - size_t workspaceSize); /**< double-symbols decoder */ - -/* **************************************** -* HUF detailed API -******************************************/ -/*! -HUF_compress() does the following: -1. count symbol occurrence from source[] into table count[] using FSE_count() -2. (optional) refine tableLog using HUF_optimalTableLog() -3. build Huffman table from count using HUF_buildCTable() -4. save Huffman table to memory buffer using HUF_writeCTable_wksp() -5. encode the data stream using HUF_compress4X_usingCTable() - -The following API allows targeting specific sub-functions for advanced tasks. -For example, it's possible to compress several blocks using the same 'CTable', -or to save and regenerate 'CTable' using external methods. -*/ -/* FSE_count() : find it within "fse.h" */ -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); -typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ -size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize); -size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable); - -typedef enum { - HUF_repeat_none, /**< Cannot use the previous table */ - HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, - 4}X_repeat */ - HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ -} HUF_repeat; -/** HUF_compress4X_repeat() : -* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. -* If it uses hufTable it does not modify hufTable or repeat. -* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. -* If preferRepeat then the old table will always be used if valid. */ -size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, - size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, - int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ - -/** HUF_buildCTable_wksp() : - * Same as HUF_buildCTable(), but using externally allocated scratch buffer. - * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. - */ -size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize); - -/*! HUF_readStats() : - Read compact Huffman tree, saved by HUF_writeCTable(). - `huffWeight` is destination buffer. - @return : size read from `src` , or an error Code . - Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ -size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, - void *workspace, size_t workspaceSize); - -/** HUF_readCTable() : -* Loading a CTable saved with HUF_writeCTable() */ -size_t HUF_readCTable_wksp(HUF_CElt *CTable, unsigned maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize); - -/* -HUF_decompress() does the following: -1. select the decompression algorithm (X2, X4) based on pre-computed heuristics -2. build Huffman table from save, using HUF_readDTableXn() -3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable -*/ - -/** HUF_selectDecoder() : -* Tells which decoder is likely to decode faster, -* based on a set of pre-determined metrics. -* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . -* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ -U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize); - -size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize); -size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize); - -size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); -size_t HUF_decompress4X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); -size_t HUF_decompress4X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); - -/* single stream variants */ - -size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, - size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ -size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable); -/** HUF_compress1X_repeat() : -* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. -* If it uses hufTable it does not modify hufTable or repeat. -* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. -* If preferRepeat then the old table will always be used if valid. */ -size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, - size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, - int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ - -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, - size_t workspaceSize); /**< single-symbol decoder */ -size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, - size_t workspaceSize); /**< double-symbols decoder */ - -size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, - const HUF_DTable *DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ -size_t HUF_decompress1X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); -size_t HUF_decompress1X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); - -#endif /* HUF_H_298734234 */ diff --git a/contrib/linux-kernel/lib/zstd/huf_compress.c b/contrib/linux-kernel/lib/zstd/huf_compress.c deleted file mode 100644 index 40055a7016e..00000000000 --- a/contrib/linux-kernel/lib/zstd/huf_compress.c +++ /dev/null @@ -1,770 +0,0 @@ -/* - * Huffman encoder, part of New Generation Entropy library - * Copyright (C) 2013-2016, Yann Collet. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - * - * You can contact the author at : - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - */ - -/* ************************************************************** -* Includes -****************************************************************/ -#include "bitstream.h" -#include "fse.h" /* header compression */ -#include "huf.h" -#include -#include /* memcpy, memset */ - -/* ************************************************************** -* Error Management -****************************************************************/ -#define HUF_STATIC_ASSERT(c) \ - { \ - enum { HUF_static_assert = 1 / (int)(!!(c)) }; \ - } /* use only *after* variable declarations */ -#define CHECK_V_F(e, f) \ - size_t const e = f; \ - if (ERR_isError(e)) \ - return f -#define CHECK_F(f) \ - { \ - CHECK_V_F(_var_err__, f); \ - } - -/* ************************************************************** -* Utils -****************************************************************/ -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) -{ - return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); -} - -/* ******************************************************* -* HUF : Huffman block compression -*********************************************************/ -/* HUF_compressWeights() : - * Same as FSE_compress(), but dedicated to huff0's weights compression. - * The use case needs much less stack memory. - * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. - */ -#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 -size_t HUF_compressWeights_wksp(void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize) -{ - BYTE *const ostart = (BYTE *)dst; - BYTE *op = ostart; - BYTE *const oend = ostart + dstSize; - - U32 maxSymbolValue = HUF_TABLELOG_MAX; - U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; - - FSE_CTable *CTable; - U32 *count; - S16 *norm; - size_t spaceUsed32 = 0; - - HUF_STATIC_ASSERT(sizeof(FSE_CTable) == sizeof(U32)); - - CTable = (FSE_CTable *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX); - count = (U32 *)workspace + spaceUsed32; - spaceUsed32 += HUF_TABLELOG_MAX + 1; - norm = (S16 *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += ALIGN(sizeof(S16) * (HUF_TABLELOG_MAX + 1), sizeof(U32)) >> 2; - - if ((spaceUsed32 << 2) > workspaceSize) - return ERROR(tableLog_tooLarge); - workspace = (U32 *)workspace + spaceUsed32; - workspaceSize -= (spaceUsed32 << 2); - - /* init conditions */ - if (wtSize <= 1) - return 0; /* Not compressible */ - - /* Scan input and build symbol stats */ - { - CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize)); - if (maxCount == wtSize) - return 1; /* only a single symbol in src : rle */ - if (maxCount == 1) - return 0; /* each symbol present maximum once => not compressible */ - } - - tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); - CHECK_F(FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue)); - - /* Write table description header */ - { - CHECK_V_F(hSize, FSE_writeNCount(op, oend - op, norm, maxSymbolValue, tableLog)); - op += hSize; - } - - /* Compress */ - CHECK_F(FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, workspace, workspaceSize)); - { - CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable)); - if (cSize == 0) - return 0; /* not enough space for compressed data */ - op += cSize; - } - - return op - ostart; -} - -struct HUF_CElt_s { - U16 val; - BYTE nbBits; -}; /* typedef'd to HUF_CElt within "huf.h" */ - -/*! HUF_writeCTable_wksp() : - `CTable` : Huffman tree to save, using huf representation. - @return : size of saved CTable */ -size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, U32 maxSymbolValue, U32 huffLog, void *workspace, size_t workspaceSize) -{ - BYTE *op = (BYTE *)dst; - U32 n; - - BYTE *bitsToWeight; - BYTE *huffWeight; - size_t spaceUsed32 = 0; - - bitsToWeight = (BYTE *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += ALIGN(HUF_TABLELOG_MAX + 1, sizeof(U32)) >> 2; - huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX, sizeof(U32)) >> 2; - - if ((spaceUsed32 << 2) > workspaceSize) - return ERROR(tableLog_tooLarge); - workspace = (U32 *)workspace + spaceUsed32; - workspaceSize -= (spaceUsed32 << 2); - - /* check conditions */ - if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) - return ERROR(maxSymbolValue_tooLarge); - - /* convert to weight */ - bitsToWeight[0] = 0; - for (n = 1; n < huffLog + 1; n++) - bitsToWeight[n] = (BYTE)(huffLog + 1 - n); - for (n = 0; n < maxSymbolValue; n++) - huffWeight[n] = bitsToWeight[CTable[n].nbBits]; - - /* attempt weights compression by FSE */ - { - CHECK_V_F(hSize, HUF_compressWeights_wksp(op + 1, maxDstSize - 1, huffWeight, maxSymbolValue, workspace, workspaceSize)); - if ((hSize > 1) & (hSize < maxSymbolValue / 2)) { /* FSE compressed */ - op[0] = (BYTE)hSize; - return hSize + 1; - } - } - - /* write raw values as 4-bits (max : 15) */ - if (maxSymbolValue > (256 - 128)) - return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ - if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize) - return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ - op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue - 1)); - huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ - for (n = 0; n < maxSymbolValue; n += 2) - op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]); - return ((maxSymbolValue + 1) / 2) + 1; -} - -size_t HUF_readCTable_wksp(HUF_CElt *CTable, U32 maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) -{ - U32 *rankVal; - BYTE *huffWeight; - U32 tableLog = 0; - U32 nbSymbols = 0; - size_t readSize; - size_t spaceUsed32 = 0; - - rankVal = (U32 *)workspace + spaceUsed32; - spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; - huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; - - if ((spaceUsed32 << 2) > workspaceSize) - return ERROR(tableLog_tooLarge); - workspace = (U32 *)workspace + spaceUsed32; - workspaceSize -= (spaceUsed32 << 2); - - /* get symbol weights */ - readSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize); - if (ERR_isError(readSize)) - return readSize; - - /* check result */ - if (tableLog > HUF_TABLELOG_MAX) - return ERROR(tableLog_tooLarge); - if (nbSymbols > maxSymbolValue + 1) - return ERROR(maxSymbolValue_tooSmall); - - /* Prepare base value per rank */ - { - U32 n, nextRankStart = 0; - for (n = 1; n <= tableLog; n++) { - U32 curr = nextRankStart; - nextRankStart += (rankVal[n] << (n - 1)); - rankVal[n] = curr; - } - } - - /* fill nbBits */ - { - U32 n; - for (n = 0; n < nbSymbols; n++) { - const U32 w = huffWeight[n]; - CTable[n].nbBits = (BYTE)(tableLog + 1 - w); - } - } - - /* fill val */ - { - U16 nbPerRank[HUF_TABLELOG_MAX + 2] = {0}; /* support w=0=>n=tableLog+1 */ - U16 valPerRank[HUF_TABLELOG_MAX + 2] = {0}; - { - U32 n; - for (n = 0; n < nbSymbols; n++) - nbPerRank[CTable[n].nbBits]++; - } - /* determine stating value per rank */ - valPerRank[tableLog + 1] = 0; /* for w==0 */ - { - U16 min = 0; - U32 n; - for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */ - valPerRank[n] = min; /* get starting value within each rank */ - min += nbPerRank[n]; - min >>= 1; - } - } - /* assign value within rank, symbol order */ - { - U32 n; - for (n = 0; n <= maxSymbolValue; n++) - CTable[n].val = valPerRank[CTable[n].nbBits]++; - } - } - - return readSize; -} - -typedef struct nodeElt_s { - U32 count; - U16 parent; - BYTE byte; - BYTE nbBits; -} nodeElt; - -static U32 HUF_setMaxHeight(nodeElt *huffNode, U32 lastNonNull, U32 maxNbBits) -{ - const U32 largestBits = huffNode[lastNonNull].nbBits; - if (largestBits <= maxNbBits) - return largestBits; /* early exit : no elt > maxNbBits */ - - /* there are several too large elements (at least >= 2) */ - { - int totalCost = 0; - const U32 baseCost = 1 << (largestBits - maxNbBits); - U32 n = lastNonNull; - - while (huffNode[n].nbBits > maxNbBits) { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); - huffNode[n].nbBits = (BYTE)maxNbBits; - n--; - } /* n stops at huffNode[n].nbBits <= maxNbBits */ - while (huffNode[n].nbBits == maxNbBits) - n--; /* n end at index of smallest symbol using < maxNbBits */ - - /* renorm totalCost */ - totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ - - /* repay normalized cost */ - { - U32 const noSymbol = 0xF0F0F0F0; - U32 rankLast[HUF_TABLELOG_MAX + 2]; - int pos; - - /* Get pos of last (smallest) symbol per rank */ - memset(rankLast, 0xF0, sizeof(rankLast)); - { - U32 currNbBits = maxNbBits; - for (pos = n; pos >= 0; pos--) { - if (huffNode[pos].nbBits >= currNbBits) - continue; - currNbBits = huffNode[pos].nbBits; /* < maxNbBits */ - rankLast[maxNbBits - currNbBits] = pos; - } - } - - while (totalCost > 0) { - U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1; - for (; nBitsToDecrease > 1; nBitsToDecrease--) { - U32 highPos = rankLast[nBitsToDecrease]; - U32 lowPos = rankLast[nBitsToDecrease - 1]; - if (highPos == noSymbol) - continue; - if (lowPos == noSymbol) - break; - { - U32 const highTotal = huffNode[highPos].count; - U32 const lowTotal = 2 * huffNode[lowPos].count; - if (highTotal <= lowTotal) - break; - } - } - /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ - /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ - while ((nBitsToDecrease <= HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) - nBitsToDecrease++; - totalCost -= 1 << (nBitsToDecrease - 1); - if (rankLast[nBitsToDecrease - 1] == noSymbol) - rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ - huffNode[rankLast[nBitsToDecrease]].nbBits++; - if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol; - else { - rankLast[nBitsToDecrease]--; - if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits - nBitsToDecrease) - rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ - } - } /* while (totalCost > 0) */ - - while (totalCost < 0) { /* Sometimes, cost correction overshoot */ - if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 - (using maxNbBits) */ - while (huffNode[n].nbBits == maxNbBits) - n--; - huffNode[n + 1].nbBits--; - rankLast[1] = n + 1; - totalCost++; - continue; - } - huffNode[rankLast[1] + 1].nbBits--; - rankLast[1]++; - totalCost++; - } - } - } /* there are several too large elements (at least >= 2) */ - - return maxNbBits; -} - -typedef struct { - U32 base; - U32 curr; -} rankPos; - -static void HUF_sort(nodeElt *huffNode, const U32 *count, U32 maxSymbolValue) -{ - rankPos rank[32]; - U32 n; - - memset(rank, 0, sizeof(rank)); - for (n = 0; n <= maxSymbolValue; n++) { - U32 r = BIT_highbit32(count[n] + 1); - rank[r].base++; - } - for (n = 30; n > 0; n--) - rank[n - 1].base += rank[n].base; - for (n = 0; n < 32; n++) - rank[n].curr = rank[n].base; - for (n = 0; n <= maxSymbolValue; n++) { - U32 const c = count[n]; - U32 const r = BIT_highbit32(c + 1) + 1; - U32 pos = rank[r].curr++; - while ((pos > rank[r].base) && (c > huffNode[pos - 1].count)) - huffNode[pos] = huffNode[pos - 1], pos--; - huffNode[pos].count = c; - huffNode[pos].byte = (BYTE)n; - } -} - -/** HUF_buildCTable_wksp() : - * Same as HUF_buildCTable(), but using externally allocated scratch buffer. - * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. - */ -#define STARTNODE (HUF_SYMBOLVALUE_MAX + 1) -typedef nodeElt huffNodeTable[2 * HUF_SYMBOLVALUE_MAX + 1 + 1]; -size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize) -{ - nodeElt *const huffNode0 = (nodeElt *)workSpace; - nodeElt *const huffNode = huffNode0 + 1; - U32 n, nonNullRank; - int lowS, lowN; - U16 nodeNb = STARTNODE; - U32 nodeRoot; - - /* safety checks */ - if (wkspSize < sizeof(huffNodeTable)) - return ERROR(GENERIC); /* workSpace is not large enough */ - if (maxNbBits == 0) - maxNbBits = HUF_TABLELOG_DEFAULT; - if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) - return ERROR(GENERIC); - memset(huffNode0, 0, sizeof(huffNodeTable)); - - /* sort, decreasing order */ - HUF_sort(huffNode, count, maxSymbolValue); - - /* init for parents */ - nonNullRank = maxSymbolValue; - while (huffNode[nonNullRank].count == 0) - nonNullRank--; - lowS = nonNullRank; - nodeRoot = nodeNb + lowS - 1; - lowN = nodeNb; - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count; - huffNode[lowS].parent = huffNode[lowS - 1].parent = nodeNb; - nodeNb++; - lowS -= 2; - for (n = nodeNb; n <= nodeRoot; n++) - huffNode[n].count = (U32)(1U << 30); - huffNode0[0].count = (U32)(1U << 31); /* fake entry, strong barrier */ - - /* create parents */ - while (nodeNb <= nodeRoot) { - U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; - U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; - huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; - huffNode[n1].parent = huffNode[n2].parent = nodeNb; - nodeNb++; - } - - /* distribute weights (unlimited tree height) */ - huffNode[nodeRoot].nbBits = 0; - for (n = nodeRoot - 1; n >= STARTNODE; n--) - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1; - for (n = 0; n <= nonNullRank; n++) - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1; - - /* enforce maxTableLog */ - maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); - - /* fill result into tree (val, nbBits) */ - { - U16 nbPerRank[HUF_TABLELOG_MAX + 1] = {0}; - U16 valPerRank[HUF_TABLELOG_MAX + 1] = {0}; - if (maxNbBits > HUF_TABLELOG_MAX) - return ERROR(GENERIC); /* check fit into table */ - for (n = 0; n <= nonNullRank; n++) - nbPerRank[huffNode[n].nbBits]++; - /* determine stating value per rank */ - { - U16 min = 0; - for (n = maxNbBits; n > 0; n--) { - valPerRank[n] = min; /* get starting value within each rank */ - min += nbPerRank[n]; - min >>= 1; - } - } - for (n = 0; n <= maxSymbolValue; n++) - tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ - for (n = 0; n <= maxSymbolValue; n++) - tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ - } - - return maxNbBits; -} - -static size_t HUF_estimateCompressedSize(HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue) -{ - size_t nbBits = 0; - int s; - for (s = 0; s <= (int)maxSymbolValue; ++s) { - nbBits += CTable[s].nbBits * count[s]; - } - return nbBits >> 3; -} - -static int HUF_validateCTable(const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue) -{ - int bad = 0; - int s; - for (s = 0; s <= (int)maxSymbolValue; ++s) { - bad |= (count[s] != 0) & (CTable[s].nbBits == 0); - } - return !bad; -} - -static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable) -{ - BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); -} - -size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } - -#define HUF_FLUSHBITS(s) BIT_flushBits(s) - -#define HUF_FLUSHBITS_1(stream) \ - if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \ - HUF_FLUSHBITS(stream) - -#define HUF_FLUSHBITS_2(stream) \ - if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 4 + 7) \ - HUF_FLUSHBITS(stream) - -size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable) -{ - const BYTE *ip = (const BYTE *)src; - BYTE *const ostart = (BYTE *)dst; - BYTE *const oend = ostart + dstSize; - BYTE *op = ostart; - size_t n; - BIT_CStream_t bitC; - - /* init */ - if (dstSize < 8) - return 0; /* not enough space to compress */ - { - size_t const initErr = BIT_initCStream(&bitC, op, oend - op); - if (HUF_isError(initErr)) - return 0; - } - - n = srcSize & ~3; /* join to mod 4 */ - switch (srcSize & 3) { - case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC); - case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC); - case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC); - case 0: - default:; - } - - for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */ - HUF_encodeSymbol(&bitC, ip[n - 1], CTable); - HUF_FLUSHBITS_1(&bitC); - HUF_encodeSymbol(&bitC, ip[n - 2], CTable); - HUF_FLUSHBITS_2(&bitC); - HUF_encodeSymbol(&bitC, ip[n - 3], CTable); - HUF_FLUSHBITS_1(&bitC); - HUF_encodeSymbol(&bitC, ip[n - 4], CTable); - HUF_FLUSHBITS(&bitC); - } - - return BIT_closeCStream(&bitC); -} - -size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable) -{ - size_t const segmentSize = (srcSize + 3) / 4; /* first 3 segments */ - const BYTE *ip = (const BYTE *)src; - const BYTE *const iend = ip + srcSize; - BYTE *const ostart = (BYTE *)dst; - BYTE *const oend = ostart + dstSize; - BYTE *op = ostart; - - if (dstSize < 6 + 1 + 1 + 1 + 8) - return 0; /* minimum space to compress successfully */ - if (srcSize < 12) - return 0; /* no saving possible : too small input */ - op += 6; /* jumpTable */ - - { - CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable)); - if (cSize == 0) - return 0; - ZSTD_writeLE16(ostart, (U16)cSize); - op += cSize; - } - - ip += segmentSize; - { - CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable)); - if (cSize == 0) - return 0; - ZSTD_writeLE16(ostart + 2, (U16)cSize); - op += cSize; - } - - ip += segmentSize; - { - CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable)); - if (cSize == 0) - return 0; - ZSTD_writeLE16(ostart + 4, (U16)cSize); - op += cSize; - } - - ip += segmentSize; - { - CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, iend - ip, CTable)); - if (cSize == 0) - return 0; - op += cSize; - } - - return op - ostart; -} - -static size_t HUF_compressCTable_internal(BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, unsigned singleStream, - const HUF_CElt *CTable) -{ - size_t const cSize = - singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable); - if (HUF_isError(cSize)) { - return cSize; - } - if (cSize == 0) { - return 0; - } /* uncompressible */ - op += cSize; - /* check compressibility */ - if ((size_t)(op - ostart) >= srcSize - 1) { - return 0; - } - return op - ostart; -} - -/* `workSpace` must a table of at least 1024 unsigned */ -static size_t HUF_compress_internal(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, - unsigned singleStream, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int preferRepeat) -{ - BYTE *const ostart = (BYTE *)dst; - BYTE *const oend = ostart + dstSize; - BYTE *op = ostart; - - U32 *count; - size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1); - HUF_CElt *CTable; - size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1); - - /* checks & inits */ - if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) - return ERROR(GENERIC); - if (!srcSize) - return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */ - if (!dstSize) - return 0; /* cannot fit within dst budget */ - if (srcSize > HUF_BLOCKSIZE_MAX) - return ERROR(srcSize_wrong); /* curr block size limit */ - if (huffLog > HUF_TABLELOG_MAX) - return ERROR(tableLog_tooLarge); - if (!maxSymbolValue) - maxSymbolValue = HUF_SYMBOLVALUE_MAX; - if (!huffLog) - huffLog = HUF_TABLELOG_DEFAULT; - - count = (U32 *)workSpace; - workSpace = (BYTE *)workSpace + countSize; - wkspSize -= countSize; - CTable = (HUF_CElt *)workSpace; - workSpace = (BYTE *)workSpace + CTableSize; - wkspSize -= CTableSize; - - /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */ - if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { - return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); - } - - /* Scan input and build symbol stats */ - { - CHECK_V_F(largest, FSE_count_wksp(count, &maxSymbolValue, (const BYTE *)src, srcSize, (U32 *)workSpace)); - if (largest == srcSize) { - *ostart = ((const BYTE *)src)[0]; - return 1; - } /* single symbol, rle */ - if (largest <= (srcSize >> 7) + 1) - return 0; /* Fast heuristic : not compressible enough */ - } - - /* Check validity of previous table */ - if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) { - *repeat = HUF_repeat_none; - } - /* Heuristic : use existing table for small inputs */ - if (preferRepeat && repeat && *repeat != HUF_repeat_none) { - return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); - } - - /* Build Huffman Tree */ - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); - { - CHECK_V_F(maxBits, HUF_buildCTable_wksp(CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize)); - huffLog = (U32)maxBits; - /* Zero the unused symbols so we can check it for validity */ - memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt)); - } - - /* Write table description header */ - { - CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, CTable, maxSymbolValue, huffLog, workSpace, wkspSize)); - /* Check if using the previous table will be beneficial */ - if (repeat && *repeat != HUF_repeat_none) { - size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue); - size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue); - if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { - return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); - } - } - /* Use the new table */ - if (hSize + 12ul >= srcSize) { - return 0; - } - op += hSize; - if (repeat) { - *repeat = HUF_repeat_none; - } - if (oldHufTable) { - memcpy(oldHufTable, CTable, CTableSize); - } /* Save the new table */ - } - return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable); -} - -size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, - size_t wkspSize) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0); -} - -size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, - size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, - preferRepeat); -} - -size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, - size_t wkspSize) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0); -} - -size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, - size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, - preferRepeat); -} diff --git a/contrib/linux-kernel/lib/zstd/huf_decompress.c b/contrib/linux-kernel/lib/zstd/huf_decompress.c deleted file mode 100644 index 6526482047d..00000000000 --- a/contrib/linux-kernel/lib/zstd/huf_decompress.c +++ /dev/null @@ -1,960 +0,0 @@ -/* - * Huffman decoder, part of New Generation Entropy library - * Copyright (C) 2013-2016, Yann Collet. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - * - * You can contact the author at : - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - */ - -/* ************************************************************** -* Compiler specifics -****************************************************************/ -#define FORCE_INLINE static __always_inline - -/* ************************************************************** -* Dependencies -****************************************************************/ -#include "bitstream.h" /* BIT_* */ -#include "fse.h" /* header compression */ -#include "huf.h" -#include -#include -#include /* memcpy, memset */ - -/* ************************************************************** -* Error Management -****************************************************************/ -#define HUF_STATIC_ASSERT(c) \ - { \ - enum { HUF_static_assert = 1 / (int)(!!(c)) }; \ - } /* use only *after* variable declarations */ - -/*-***************************/ -/* generic DTableDesc */ -/*-***************************/ - -typedef struct { - BYTE maxTableLog; - BYTE tableType; - BYTE tableLog; - BYTE reserved; -} DTableDesc; - -static DTableDesc HUF_getDTableDesc(const HUF_DTable *table) -{ - DTableDesc dtd; - memcpy(&dtd, table, sizeof(dtd)); - return dtd; -} - -/*-***************************/ -/* single-symbol decoding */ -/*-***************************/ - -typedef struct { - BYTE byte; - BYTE nbBits; -} HUF_DEltX2; /* single-symbol decoding */ - -size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) -{ - U32 tableLog = 0; - U32 nbSymbols = 0; - size_t iSize; - void *const dtPtr = DTable + 1; - HUF_DEltX2 *const dt = (HUF_DEltX2 *)dtPtr; - - U32 *rankVal; - BYTE *huffWeight; - size_t spaceUsed32 = 0; - - rankVal = (U32 *)workspace + spaceUsed32; - spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; - huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; - - if ((spaceUsed32 << 2) > workspaceSize) - return ERROR(tableLog_tooLarge); - workspace = (U32 *)workspace + spaceUsed32; - workspaceSize -= (spaceUsed32 << 2); - - HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); - /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ - - iSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize); - if (HUF_isError(iSize)) - return iSize; - - /* Table header */ - { - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (tableLog > (U32)(dtd.maxTableLog + 1)) - return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ - dtd.tableType = 0; - dtd.tableLog = (BYTE)tableLog; - memcpy(DTable, &dtd, sizeof(dtd)); - } - - /* Calculate starting value for each rank */ - { - U32 n, nextRankStart = 0; - for (n = 1; n < tableLog + 1; n++) { - U32 const curr = nextRankStart; - nextRankStart += (rankVal[n] << (n - 1)); - rankVal[n] = curr; - } - } - - /* fill DTable */ - { - U32 n; - for (n = 0; n < nbSymbols; n++) { - U32 const w = huffWeight[n]; - U32 const length = (1 << w) >> 1; - U32 u; - HUF_DEltX2 D; - D.byte = (BYTE)n; - D.nbBits = (BYTE)(tableLog + 1 - w); - for (u = rankVal[w]; u < rankVal[w] + length; u++) - dt[u] = D; - rankVal[w] += length; - } - } - - return iSize; -} - -static BYTE HUF_decodeSymbolX2(BIT_DStream_t *Dstream, const HUF_DEltX2 *dt, const U32 dtLog) -{ - size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ - BYTE const c = dt[val].byte; - BIT_skipBits(Dstream, dt[val].nbBits); - return c; -} - -#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) - -#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ - if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \ - HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) - -#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ - if (ZSTD_64bits()) \ - HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) - -FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog) -{ - BYTE *const pStart = p; - - /* up to 4 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) { - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_1(p, bitDPtr); - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - } - - /* closer to the end */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - - /* no more data to retrieve from bitstream, hence no need to reload */ - while (p < pEnd) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - - return pEnd - pStart; -} - -static size_t HUF_decompress1X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -{ - BYTE *op = (BYTE *)dst; - BYTE *const oend = op + dstSize; - const void *dtPtr = DTable + 1; - const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr; - BIT_DStream_t bitD; - DTableDesc const dtd = HUF_getDTableDesc(DTable); - U32 const dtLog = dtd.tableLog; - - { - size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); - if (HUF_isError(errorCode)) - return errorCode; - } - - HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog); - - /* check */ - if (!BIT_endOfDStream(&bitD)) - return ERROR(corruption_detected); - - return dstSize; -} - -size_t HUF_decompress1X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 0) - return ERROR(GENERIC); - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); -} - -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -{ - const BYTE *ip = (const BYTE *)cSrc; - - size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize); - if (HUF_isError(hSize)) - return hSize; - if (hSize >= cSrcSize) - return ERROR(srcSize_wrong); - ip += hSize; - cSrcSize -= hSize; - - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx); -} - -static size_t HUF_decompress4X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -{ - /* Check */ - if (cSrcSize < 10) - return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ - - { - const BYTE *const istart = (const BYTE *)cSrc; - BYTE *const ostart = (BYTE *)dst; - BYTE *const oend = ostart + dstSize; - const void *const dtPtr = DTable + 1; - const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr; - - /* Init */ - BIT_DStream_t bitD1; - BIT_DStream_t bitD2; - BIT_DStream_t bitD3; - BIT_DStream_t bitD4; - size_t const length1 = ZSTD_readLE16(istart); - size_t const length2 = ZSTD_readLE16(istart + 2); - size_t const length3 = ZSTD_readLE16(istart + 4); - size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); - const BYTE *const istart1 = istart + 6; /* jumpTable */ - const BYTE *const istart2 = istart1 + length1; - const BYTE *const istart3 = istart2 + length2; - const BYTE *const istart4 = istart3 + length3; - const size_t segmentSize = (dstSize + 3) / 4; - BYTE *const opStart2 = ostart + segmentSize; - BYTE *const opStart3 = opStart2 + segmentSize; - BYTE *const opStart4 = opStart3 + segmentSize; - BYTE *op1 = ostart; - BYTE *op2 = opStart2; - BYTE *op3 = opStart3; - BYTE *op4 = opStart4; - U32 endSignal; - DTableDesc const dtd = HUF_getDTableDesc(DTable); - U32 const dtLog = dtd.tableLog; - - if (length4 > cSrcSize) - return ERROR(corruption_detected); /* overflow */ - { - size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); - if (HUF_isError(errorCode)) - return errorCode; - } - { - size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); - if (HUF_isError(errorCode)) - return errorCode; - } - { - size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); - if (HUF_isError(errorCode)) - return errorCode; - } - { - size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); - if (HUF_isError(errorCode)) - return errorCode; - } - - /* 16-32 symbols per loop (4-8 symbols per stream) */ - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - for (; (endSignal == BIT_DStream_unfinished) && (op4 < (oend - 7));) { - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_1(op1, &bitD1); - HUF_DECODE_SYMBOLX2_1(op2, &bitD2); - HUF_DECODE_SYMBOLX2_1(op3, &bitD3); - HUF_DECODE_SYMBOLX2_1(op4, &bitD4); - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_0(op1, &bitD1); - HUF_DECODE_SYMBOLX2_0(op2, &bitD2); - HUF_DECODE_SYMBOLX2_0(op3, &bitD3); - HUF_DECODE_SYMBOLX2_0(op4, &bitD4); - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - } - - /* check corruption */ - if (op1 > opStart2) - return ERROR(corruption_detected); - if (op2 > opStart3) - return ERROR(corruption_detected); - if (op3 > opStart4) - return ERROR(corruption_detected); - /* note : op4 supposed already verified within main loop */ - - /* finish bitStreams one by one */ - HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); - HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); - HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); - HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); - - /* check */ - endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); - if (!endSignal) - return ERROR(corruption_detected); - - /* decoded size */ - return dstSize; - } -} - -size_t HUF_decompress4X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 0) - return ERROR(GENERIC); - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); -} - -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -{ - const BYTE *ip = (const BYTE *)cSrc; - - size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize); - if (HUF_isError(hSize)) - return hSize; - if (hSize >= cSrcSize) - return ERROR(srcSize_wrong); - ip += hSize; - cSrcSize -= hSize; - - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); -} - -/* *************************/ -/* double-symbols decoding */ -/* *************************/ -typedef struct { - U16 sequence; - BYTE nbBits; - BYTE length; -} HUF_DEltX4; /* double-symbols decoding */ - -typedef struct { - BYTE symbol; - BYTE weight; -} sortedSymbol_t; - -/* HUF_fillDTableX4Level2() : - * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ -static void HUF_fillDTableX4Level2(HUF_DEltX4 *DTable, U32 sizeLog, const U32 consumed, const U32 *rankValOrigin, const int minWeight, - const sortedSymbol_t *sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) -{ - HUF_DEltX4 DElt; - U32 rankVal[HUF_TABLELOG_MAX + 1]; - - /* get pre-calculated rankVal */ - memcpy(rankVal, rankValOrigin, sizeof(rankVal)); - - /* fill skipped values */ - if (minWeight > 1) { - U32 i, skipSize = rankVal[minWeight]; - ZSTD_writeLE16(&(DElt.sequence), baseSeq); - DElt.nbBits = (BYTE)(consumed); - DElt.length = 1; - for (i = 0; i < skipSize; i++) - DTable[i] = DElt; - } - - /* fill DTable */ - { - U32 s; - for (s = 0; s < sortedListSize; s++) { /* note : sortedSymbols already skipped */ - const U32 symbol = sortedSymbols[s].symbol; - const U32 weight = sortedSymbols[s].weight; - const U32 nbBits = nbBitsBaseline - weight; - const U32 length = 1 << (sizeLog - nbBits); - const U32 start = rankVal[weight]; - U32 i = start; - const U32 end = start + length; - - ZSTD_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8))); - DElt.nbBits = (BYTE)(nbBits + consumed); - DElt.length = 2; - do { - DTable[i++] = DElt; - } while (i < end); /* since length >= 1 */ - - rankVal[weight] += length; - } - } -} - -typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1]; -typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; - -static void HUF_fillDTableX4(HUF_DEltX4 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 sortedListSize, const U32 *rankStart, - rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) -{ - U32 rankVal[HUF_TABLELOG_MAX + 1]; - const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ - const U32 minBits = nbBitsBaseline - maxWeight; - U32 s; - - memcpy(rankVal, rankValOrigin, sizeof(rankVal)); - - /* fill DTable */ - for (s = 0; s < sortedListSize; s++) { - const U16 symbol = sortedList[s].symbol; - const U32 weight = sortedList[s].weight; - const U32 nbBits = nbBitsBaseline - weight; - const U32 start = rankVal[weight]; - const U32 length = 1 << (targetLog - nbBits); - - if (targetLog - nbBits >= minBits) { /* enough room for a second symbol */ - U32 sortedRank; - int minWeight = nbBits + scaleLog; - if (minWeight < 1) - minWeight = 1; - sortedRank = rankStart[minWeight]; - HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank, - sortedListSize - sortedRank, nbBitsBaseline, symbol); - } else { - HUF_DEltX4 DElt; - ZSTD_writeLE16(&(DElt.sequence), symbol); - DElt.nbBits = (BYTE)(nbBits); - DElt.length = 1; - { - U32 const end = start + length; - U32 u; - for (u = start; u < end; u++) - DTable[u] = DElt; - } - } - rankVal[weight] += length; - } -} - -size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) -{ - U32 tableLog, maxW, sizeOfSort, nbSymbols; - DTableDesc dtd = HUF_getDTableDesc(DTable); - U32 const maxTableLog = dtd.maxTableLog; - size_t iSize; - void *dtPtr = DTable + 1; /* force compiler to avoid strict-aliasing */ - HUF_DEltX4 *const dt = (HUF_DEltX4 *)dtPtr; - U32 *rankStart; - - rankValCol_t *rankVal; - U32 *rankStats; - U32 *rankStart0; - sortedSymbol_t *sortedSymbol; - BYTE *weightList; - size_t spaceUsed32 = 0; - - HUF_STATIC_ASSERT((sizeof(rankValCol_t) & 3) == 0); - - rankVal = (rankValCol_t *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2; - rankStats = (U32 *)workspace + spaceUsed32; - spaceUsed32 += HUF_TABLELOG_MAX + 1; - rankStart0 = (U32 *)workspace + spaceUsed32; - spaceUsed32 += HUF_TABLELOG_MAX + 2; - sortedSymbol = (sortedSymbol_t *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2; - weightList = (BYTE *)((U32 *)workspace + spaceUsed32); - spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; - - if ((spaceUsed32 << 2) > workspaceSize) - return ERROR(tableLog_tooLarge); - workspace = (U32 *)workspace + spaceUsed32; - workspaceSize -= (spaceUsed32 << 2); - - rankStart = rankStart0 + 1; - memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1)); - - HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ - if (maxTableLog > HUF_TABLELOG_MAX) - return ERROR(tableLog_tooLarge); - /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ - - iSize = HUF_readStats_wksp(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize); - if (HUF_isError(iSize)) - return iSize; - - /* check result */ - if (tableLog > maxTableLog) - return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ - - /* find maxWeight */ - for (maxW = tableLog; rankStats[maxW] == 0; maxW--) { - } /* necessarily finds a solution before 0 */ - - /* Get start index of each weight */ - { - U32 w, nextRankStart = 0; - for (w = 1; w < maxW + 1; w++) { - U32 curr = nextRankStart; - nextRankStart += rankStats[w]; - rankStart[w] = curr; - } - rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ - sizeOfSort = nextRankStart; - } - - /* sort symbols by weight */ - { - U32 s; - for (s = 0; s < nbSymbols; s++) { - U32 const w = weightList[s]; - U32 const r = rankStart[w]++; - sortedSymbol[r].symbol = (BYTE)s; - sortedSymbol[r].weight = (BYTE)w; - } - rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ - } - - /* Build rankVal */ - { - U32 *const rankVal0 = rankVal[0]; - { - int const rescale = (maxTableLog - tableLog) - 1; /* tableLog <= maxTableLog */ - U32 nextRankVal = 0; - U32 w; - for (w = 1; w < maxW + 1; w++) { - U32 curr = nextRankVal; - nextRankVal += rankStats[w] << (w + rescale); - rankVal0[w] = curr; - } - } - { - U32 const minBits = tableLog + 1 - maxW; - U32 consumed; - for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) { - U32 *const rankValPtr = rankVal[consumed]; - U32 w; - for (w = 1; w < maxW + 1; w++) { - rankValPtr[w] = rankVal0[w] >> consumed; - } - } - } - } - - HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog + 1); - - dtd.tableLog = (BYTE)maxTableLog; - dtd.tableType = 1; - memcpy(DTable, &dtd, sizeof(dtd)); - return iSize; -} - -static U32 HUF_decodeSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog) -{ - size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - memcpy(op, dt + val, 2); - BIT_skipBits(DStream, dt[val].nbBits); - return dt[val].length; -} - -static U32 HUF_decodeLastSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog) -{ - size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - memcpy(op, dt + val, 1); - if (dt[val].length == 1) - BIT_skipBits(DStream, dt[val].nbBits); - else { - if (DStream->bitsConsumed < (sizeof(DStream->bitContainer) * 8)) { - BIT_skipBits(DStream, dt[val].nbBits); - if (DStream->bitsConsumed > (sizeof(DStream->bitContainer) * 8)) - /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ - DStream->bitsConsumed = (sizeof(DStream->bitContainer) * 8); - } - } - return 1; -} - -#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) - -#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ - if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \ - ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) - -#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ - if (ZSTD_64bits()) \ - ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) - -FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog) -{ - BYTE *const pStart = p; - - /* up to 8 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) { - HUF_DECODE_SYMBOLX4_2(p, bitDPtr); - HUF_DECODE_SYMBOLX4_1(p, bitDPtr); - HUF_DECODE_SYMBOLX4_2(p, bitDPtr); - HUF_DECODE_SYMBOLX4_0(p, bitDPtr); - } - - /* closer to end : up to 2 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2)) - HUF_DECODE_SYMBOLX4_0(p, bitDPtr); - - while (p <= pEnd - 2) - HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ - - if (p < pEnd) - p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); - - return p - pStart; -} - -static size_t HUF_decompress1X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -{ - BIT_DStream_t bitD; - - /* Init */ - { - size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); - if (HUF_isError(errorCode)) - return errorCode; - } - - /* decode */ - { - BYTE *const ostart = (BYTE *)dst; - BYTE *const oend = ostart + dstSize; - const void *const dtPtr = DTable + 1; /* force compiler to not use strict-aliasing */ - const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr; - DTableDesc const dtd = HUF_getDTableDesc(DTable); - HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog); - } - - /* check */ - if (!BIT_endOfDStream(&bitD)) - return ERROR(corruption_detected); - - /* decoded size */ - return dstSize; -} - -size_t HUF_decompress1X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 1) - return ERROR(GENERIC); - return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); -} - -size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -{ - const BYTE *ip = (const BYTE *)cSrc; - - size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize); - if (HUF_isError(hSize)) - return hSize; - if (hSize >= cSrcSize) - return ERROR(srcSize_wrong); - ip += hSize; - cSrcSize -= hSize; - - return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx); -} - -static size_t HUF_decompress4X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -{ - if (cSrcSize < 10) - return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ - - { - const BYTE *const istart = (const BYTE *)cSrc; - BYTE *const ostart = (BYTE *)dst; - BYTE *const oend = ostart + dstSize; - const void *const dtPtr = DTable + 1; - const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr; - - /* Init */ - BIT_DStream_t bitD1; - BIT_DStream_t bitD2; - BIT_DStream_t bitD3; - BIT_DStream_t bitD4; - size_t const length1 = ZSTD_readLE16(istart); - size_t const length2 = ZSTD_readLE16(istart + 2); - size_t const length3 = ZSTD_readLE16(istart + 4); - size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); - const BYTE *const istart1 = istart + 6; /* jumpTable */ - const BYTE *const istart2 = istart1 + length1; - const BYTE *const istart3 = istart2 + length2; - const BYTE *const istart4 = istart3 + length3; - size_t const segmentSize = (dstSize + 3) / 4; - BYTE *const opStart2 = ostart + segmentSize; - BYTE *const opStart3 = opStart2 + segmentSize; - BYTE *const opStart4 = opStart3 + segmentSize; - BYTE *op1 = ostart; - BYTE *op2 = opStart2; - BYTE *op3 = opStart3; - BYTE *op4 = opStart4; - U32 endSignal; - DTableDesc const dtd = HUF_getDTableDesc(DTable); - U32 const dtLog = dtd.tableLog; - - if (length4 > cSrcSize) - return ERROR(corruption_detected); /* overflow */ - { - size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); - if (HUF_isError(errorCode)) - return errorCode; - } - { - size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); - if (HUF_isError(errorCode)) - return errorCode; - } - { - size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); - if (HUF_isError(errorCode)) - return errorCode; - } - { - size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); - if (HUF_isError(errorCode)) - return errorCode; - } - - /* 16-32 symbols per loop (4-8 symbols per stream) */ - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - for (; (endSignal == BIT_DStream_unfinished) & (op4 < (oend - (sizeof(bitD4.bitContainer) - 1)));) { - HUF_DECODE_SYMBOLX4_2(op1, &bitD1); - HUF_DECODE_SYMBOLX4_2(op2, &bitD2); - HUF_DECODE_SYMBOLX4_2(op3, &bitD3); - HUF_DECODE_SYMBOLX4_2(op4, &bitD4); - HUF_DECODE_SYMBOLX4_1(op1, &bitD1); - HUF_DECODE_SYMBOLX4_1(op2, &bitD2); - HUF_DECODE_SYMBOLX4_1(op3, &bitD3); - HUF_DECODE_SYMBOLX4_1(op4, &bitD4); - HUF_DECODE_SYMBOLX4_2(op1, &bitD1); - HUF_DECODE_SYMBOLX4_2(op2, &bitD2); - HUF_DECODE_SYMBOLX4_2(op3, &bitD3); - HUF_DECODE_SYMBOLX4_2(op4, &bitD4); - HUF_DECODE_SYMBOLX4_0(op1, &bitD1); - HUF_DECODE_SYMBOLX4_0(op2, &bitD2); - HUF_DECODE_SYMBOLX4_0(op3, &bitD3); - HUF_DECODE_SYMBOLX4_0(op4, &bitD4); - - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - } - - /* check corruption */ - if (op1 > opStart2) - return ERROR(corruption_detected); - if (op2 > opStart3) - return ERROR(corruption_detected); - if (op3 > opStart4) - return ERROR(corruption_detected); - /* note : op4 already verified within main loop */ - - /* finish bitStreams one by one */ - HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); - HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); - HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); - HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); - - /* check */ - { - U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); - if (!endCheck) - return ERROR(corruption_detected); - } - - /* decoded size */ - return dstSize; - } -} - -size_t HUF_decompress4X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 1) - return ERROR(GENERIC); - return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); -} - -size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -{ - const BYTE *ip = (const BYTE *)cSrc; - - size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize); - if (HUF_isError(hSize)) - return hSize; - if (hSize >= cSrcSize) - return ERROR(srcSize_wrong); - ip += hSize; - cSrcSize -= hSize; - - return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); -} - -/* ********************************/ -/* Generic decompression selector */ -/* ********************************/ - -size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); - return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) - : HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); -} - -size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); - return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) - : HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); -} - -typedef struct { - U32 tableTime; - U32 decode256Time; -} algo_time_t; -static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { - /* single, double, quad */ - {{0, 0}, {1, 1}, {2, 2}}, /* Q==0 : impossible */ - {{0, 0}, {1, 1}, {2, 2}}, /* Q==1 : impossible */ - {{38, 130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ - {{448, 128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ - {{556, 128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ - {{714, 128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ - {{883, 128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ - {{897, 128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ - {{926, 128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ - {{947, 128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ - {{1107, 128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ - {{1177, 128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ - {{1242, 128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ - {{1349, 128}, {2644, 106}, {5260, 106}}, /* Q ==13 : 81-87% */ - {{1455, 128}, {2422, 124}, {4174, 124}}, /* Q ==14 : 87-93% */ - {{722, 128}, {1891, 145}, {1936, 146}}, /* Q ==15 : 93-99% */ -}; - -/** HUF_selectDecoder() : -* Tells which decoder is likely to decode faster, -* based on a set of pre-determined metrics. -* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . -* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ -U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize) -{ - /* decoder timing evaluation */ - U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ - U32 const D256 = (U32)(dstSize >> 8); - U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); - U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); - DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */ - - return DTime1 < DTime0; -} - -typedef size_t (*decompressionAlgo)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize); - -size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -{ - /* validation checks */ - if (dstSize == 0) - return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) - return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { - memcpy(dst, cSrc, dstSize); - return dstSize; - } /* not compressed */ - if (cSrcSize == 1) { - memset(dst, *(const BYTE *)cSrc, dstSize); - return dstSize; - } /* RLE */ - - { - U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); - return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize) - : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize); - } -} - -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -{ - /* validation checks */ - if (dstSize == 0) - return ERROR(dstSize_tooSmall); - if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) - return ERROR(corruption_detected); /* invalid */ - - { - U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); - return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize) - : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize); - } -} - -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) -{ - /* validation checks */ - if (dstSize == 0) - return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) - return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { - memcpy(dst, cSrc, dstSize); - return dstSize; - } /* not compressed */ - if (cSrcSize == 1) { - memset(dst, *(const BYTE *)cSrc, dstSize); - return dstSize; - } /* RLE */ - - { - U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); - return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize) - : HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize); - } -} diff --git a/contrib/linux-kernel/lib/zstd/mem.h b/contrib/linux-kernel/lib/zstd/mem.h deleted file mode 100644 index 42a697b745f..00000000000 --- a/contrib/linux-kernel/lib/zstd/mem.h +++ /dev/null @@ -1,149 +0,0 @@ -/** - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of https://github.com/facebook/zstd. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - */ - -#ifndef MEM_H_MODULE -#define MEM_H_MODULE - -/*-**************************************** -* Dependencies -******************************************/ -#include -#include /* memcpy */ -#include /* size_t, ptrdiff_t */ - -/*-**************************************** -* Compiler specifics -******************************************/ -#define ZSTD_STATIC static __inline __attribute__((unused)) - -/*-************************************************************** -* Basic Types -*****************************************************************/ -typedef uint8_t BYTE; -typedef uint16_t U16; -typedef int16_t S16; -typedef uint32_t U32; -typedef int32_t S32; -typedef uint64_t U64; -typedef int64_t S64; -typedef ptrdiff_t iPtrDiff; -typedef uintptr_t uPtrDiff; - -/*-************************************************************** -* Memory I/O -*****************************************************************/ -ZSTD_STATIC unsigned ZSTD_32bits(void) { return sizeof(size_t) == 4; } -ZSTD_STATIC unsigned ZSTD_64bits(void) { return sizeof(size_t) == 8; } - -#if defined(__LITTLE_ENDIAN) -#define ZSTD_LITTLE_ENDIAN 1 -#else -#define ZSTD_LITTLE_ENDIAN 0 -#endif - -ZSTD_STATIC unsigned ZSTD_isLittleEndian(void) { return ZSTD_LITTLE_ENDIAN; } - -ZSTD_STATIC U16 ZSTD_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); } - -ZSTD_STATIC U32 ZSTD_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); } - -ZSTD_STATIC U64 ZSTD_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); } - -ZSTD_STATIC size_t ZSTD_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); } - -ZSTD_STATIC void ZSTD_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); } - -ZSTD_STATIC void ZSTD_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); } - -ZSTD_STATIC void ZSTD_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); } - -/*=== Little endian r/w ===*/ - -ZSTD_STATIC U16 ZSTD_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); } - -ZSTD_STATIC void ZSTD_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); } - -ZSTD_STATIC U32 ZSTD_readLE24(const void *memPtr) { return ZSTD_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); } - -ZSTD_STATIC void ZSTD_writeLE24(void *memPtr, U32 val) -{ - ZSTD_writeLE16(memPtr, (U16)val); - ((BYTE *)memPtr)[2] = (BYTE)(val >> 16); -} - -ZSTD_STATIC U32 ZSTD_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); } - -ZSTD_STATIC void ZSTD_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); } - -ZSTD_STATIC U64 ZSTD_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); } - -ZSTD_STATIC void ZSTD_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); } - -ZSTD_STATIC size_t ZSTD_readLEST(const void *memPtr) -{ - if (ZSTD_32bits()) - return (size_t)ZSTD_readLE32(memPtr); - else - return (size_t)ZSTD_readLE64(memPtr); -} - -ZSTD_STATIC void ZSTD_writeLEST(void *memPtr, size_t val) -{ - if (ZSTD_32bits()) - ZSTD_writeLE32(memPtr, (U32)val); - else - ZSTD_writeLE64(memPtr, (U64)val); -} - -/*=== Big endian r/w ===*/ - -ZSTD_STATIC U32 ZSTD_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); } - -ZSTD_STATIC void ZSTD_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); } - -ZSTD_STATIC U64 ZSTD_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); } - -ZSTD_STATIC void ZSTD_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); } - -ZSTD_STATIC size_t ZSTD_readBEST(const void *memPtr) -{ - if (ZSTD_32bits()) - return (size_t)ZSTD_readBE32(memPtr); - else - return (size_t)ZSTD_readBE64(memPtr); -} - -ZSTD_STATIC void ZSTD_writeBEST(void *memPtr, size_t val) -{ - if (ZSTD_32bits()) - ZSTD_writeBE32(memPtr, (U32)val); - else - ZSTD_writeBE64(memPtr, (U64)val); -} - -/* function safe only for comparisons */ -ZSTD_STATIC U32 ZSTD_readMINMATCH(const void *memPtr, U32 length) -{ - switch (length) { - default: - case 4: return ZSTD_read32(memPtr); - case 3: - if (ZSTD_isLittleEndian()) - return ZSTD_read32(memPtr) << 8; - else - return ZSTD_read32(memPtr) >> 8; - } -} - -#endif /* MEM_H_MODULE */ diff --git a/contrib/linux-kernel/lib/zstd/zstd_common.c b/contrib/linux-kernel/lib/zstd/zstd_common.c deleted file mode 100644 index e5f06d77142..00000000000 --- a/contrib/linux-kernel/lib/zstd/zstd_common.c +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of https://github.com/facebook/zstd. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - */ - -/*-************************************* -* Dependencies -***************************************/ -#include "error_private.h" -#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */ -#include - -/*=************************************************************** -* Custom allocator -****************************************************************/ - -#define stack_push(stack, size) \ - ({ \ - void *const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \ - (stack)->ptr = (char *)ptr + (size); \ - (stack)->ptr <= (stack)->end ? ptr : NULL; \ - }) - -ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize) -{ - ZSTD_customMem stackMem = {ZSTD_stackAlloc, ZSTD_stackFree, workspace}; - ZSTD_stack *stack = (ZSTD_stack *)workspace; - /* Verify preconditions */ - if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) { - ZSTD_customMem error = {NULL, NULL, NULL}; - return error; - } - /* Initialize the stack */ - stack->ptr = workspace; - stack->end = (char *)workspace + workspaceSize; - stack_push(stack, sizeof(ZSTD_stack)); - return stackMem; -} - -void *ZSTD_stackAllocAll(void *opaque, size_t *size) -{ - ZSTD_stack *stack = (ZSTD_stack *)opaque; - *size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr); - return stack_push(stack, *size); -} - -void *ZSTD_stackAlloc(void *opaque, size_t size) -{ - ZSTD_stack *stack = (ZSTD_stack *)opaque; - return stack_push(stack, size); -} -void ZSTD_stackFree(void *opaque, void *address) -{ - (void)opaque; - (void)address; -} - -void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.opaque, size); } - -void ZSTD_free(void *ptr, ZSTD_customMem customMem) -{ - if (ptr != NULL) - customMem.customFree(customMem.opaque, ptr); -} diff --git a/contrib/linux-kernel/lib/zstd/zstd_internal.h b/contrib/linux-kernel/lib/zstd/zstd_internal.h deleted file mode 100644 index a0fb83e34f1..00000000000 --- a/contrib/linux-kernel/lib/zstd/zstd_internal.h +++ /dev/null @@ -1,261 +0,0 @@ -/** - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of https://github.com/facebook/zstd. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - */ - -#ifndef ZSTD_CCOMMON_H_MODULE -#define ZSTD_CCOMMON_H_MODULE - -/*-******************************************************* -* Compiler specifics -*********************************************************/ -#define FORCE_INLINE static __always_inline -#define FORCE_NOINLINE static noinline - -/*-************************************* -* Dependencies -***************************************/ -#include "error_private.h" -#include "mem.h" -#include -#include -#include -#include - -/*-************************************* -* shared macros -***************************************/ -#define MIN(a, b) ((a) < (b) ? (a) : (b)) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) -#define CHECK_F(f) \ - { \ - size_t const errcod = f; \ - if (ERR_isError(errcod)) \ - return errcod; \ - } /* check and Forward error code */ -#define CHECK_E(f, e) \ - { \ - size_t const errcod = f; \ - if (ERR_isError(errcod)) \ - return ERROR(e); \ - } /* check and send Error code */ -#define ZSTD_STATIC_ASSERT(c) \ - { \ - enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \ - } - -/*-************************************* -* Common constants -***************************************/ -#define ZSTD_OPT_NUM (1 << 12) -#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */ - -#define ZSTD_REP_NUM 3 /* number of repcodes */ -#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */ -#define ZSTD_REP_MOVE (ZSTD_REP_NUM - 1) -#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM) -static const U32 repStartValue[ZSTD_REP_NUM] = {1, 4, 8}; - -#define KB *(1 << 10) -#define MB *(1 << 20) -#define GB *(1U << 30) - -#define BIT7 128 -#define BIT6 64 -#define BIT5 32 -#define BIT4 16 -#define BIT1 2 -#define BIT0 1 - -#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 -static const size_t ZSTD_fcs_fieldSize[4] = {0, 2, 4, 8}; -static const size_t ZSTD_did_fieldSize[4] = {0, 1, 2, 4}; - -#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ -static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; -typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; - -#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ -#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ - -#define HufLog 12 -typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; - -#define LONGNBSEQ 0x7F00 - -#define MINMATCH 3 -#define EQUAL_READ32 4 - -#define Litbits 8 -#define MaxLit ((1 << Litbits) - 1) -#define MaxML 52 -#define MaxLL 35 -#define MaxOff 28 -#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */ -#define MLFSELog 9 -#define LLFSELog 9 -#define OffFSELog 8 - -static const U32 LL_bits[MaxLL + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; -static const S16 LL_defaultNorm[MaxLL + 1] = {4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1}; -#define LL_DEFAULTNORMLOG 6 /* for static allocation */ -static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG; - -static const U32 ML_bits[MaxML + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; -static const S16 ML_defaultNorm[MaxML + 1] = {1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1}; -#define ML_DEFAULTNORMLOG 6 /* for static allocation */ -static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG; - -static const S16 OF_defaultNorm[MaxOff + 1] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}; -#define OF_DEFAULTNORMLOG 5 /* for static allocation */ -static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; - -/*-******************************************* -* Shared functions to include for inlining -*********************************************/ -ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) { - memcpy(dst, src, 8); -} -/*! ZSTD_wildcopy() : -* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */ -#define WILDCOPY_OVERLENGTH 8 -ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length) -{ - const BYTE* ip = (const BYTE*)src; - BYTE* op = (BYTE*)dst; - BYTE* const oend = op + length; - /* Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. - * Avoid the bad case where the loop only runs once by handling the - * special case separately. This doesn't trigger the bug because it - * doesn't involve pointer/integer overflow. - */ - if (length <= 8) - return ZSTD_copy8(dst, src); - do { - ZSTD_copy8(op, ip); - op += 8; - ip += 8; - } while (op < oend); -} - -/*-******************************************* -* Private interfaces -*********************************************/ -typedef struct ZSTD_stats_s ZSTD_stats_t; - -typedef struct { - U32 off; - U32 len; -} ZSTD_match_t; - -typedef struct { - U32 price; - U32 off; - U32 mlen; - U32 litlen; - U32 rep[ZSTD_REP_NUM]; -} ZSTD_optimal_t; - -typedef struct seqDef_s { - U32 offset; - U16 litLength; - U16 matchLength; -} seqDef; - -typedef struct { - seqDef *sequencesStart; - seqDef *sequences; - BYTE *litStart; - BYTE *lit; - BYTE *llCode; - BYTE *mlCode; - BYTE *ofCode; - U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */ - U32 longLengthPos; - /* opt */ - ZSTD_optimal_t *priceTable; - ZSTD_match_t *matchTable; - U32 *matchLengthFreq; - U32 *litLengthFreq; - U32 *litFreq; - U32 *offCodeFreq; - U32 matchLengthSum; - U32 matchSum; - U32 litLengthSum; - U32 litSum; - U32 offCodeSum; - U32 log2matchLengthSum; - U32 log2matchSum; - U32 log2litLengthSum; - U32 log2litSum; - U32 log2offCodeSum; - U32 factor; - U32 staticPrices; - U32 cachedPrice; - U32 cachedLitLength; - const BYTE *cachedLiterals; -} seqStore_t; - -const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx); -void ZSTD_seqToCodes(const seqStore_t *seqStorePtr); -int ZSTD_isSkipFrame(ZSTD_DCtx *dctx); - -/*= Custom memory allocation functions */ -typedef void *(*ZSTD_allocFunction)(void *opaque, size_t size); -typedef void (*ZSTD_freeFunction)(void *opaque, void *address); -typedef struct { - ZSTD_allocFunction customAlloc; - ZSTD_freeFunction customFree; - void *opaque; -} ZSTD_customMem; - -void *ZSTD_malloc(size_t size, ZSTD_customMem customMem); -void ZSTD_free(void *ptr, ZSTD_customMem customMem); - -/*====== stack allocation ======*/ - -typedef struct { - void *ptr; - const void *end; -} ZSTD_stack; - -#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t)) -#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t)) - -ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize); - -void *ZSTD_stackAllocAll(void *opaque, size_t *size); -void *ZSTD_stackAlloc(void *opaque, size_t size); -void ZSTD_stackFree(void *opaque, void *address); - -/*====== common function ======*/ - -ZSTD_STATIC U32 ZSTD_highbit32(U32 val) { return 31 - __builtin_clz(val); } - -/* hidden functions */ - -/* ZSTD_invalidateRepCodes() : - * ensures next compression will not use repcodes from previous block. - * Note : only works with regular variant; - * do not use with extDict variant ! */ -void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx); - -size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx); -size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx); -size_t ZSTD_freeCDict(ZSTD_CDict *cdict); -size_t ZSTD_freeDDict(ZSTD_DDict *cdict); -size_t ZSTD_freeCStream(ZSTD_CStream *zcs); -size_t ZSTD_freeDStream(ZSTD_DStream *zds); - -#endif /* ZSTD_CCOMMON_H_MODULE */ diff --git a/contrib/linux-kernel/lib/zstd/zstd_opt.h b/contrib/linux-kernel/lib/zstd/zstd_opt.h deleted file mode 100644 index ecdd7259e81..00000000000 --- a/contrib/linux-kernel/lib/zstd/zstd_opt.h +++ /dev/null @@ -1,1012 +0,0 @@ -/** - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of https://github.com/facebook/zstd. - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. This program is dual-licensed; you may select - * either version 2 of the GNU General Public License ("GPL") or BSD license - * ("BSD"). - */ - -/* Note : this file is intended to be included within zstd_compress.c */ - -#ifndef ZSTD_OPT_H_91842398743 -#define ZSTD_OPT_H_91842398743 - -#define ZSTD_LITFREQ_ADD 2 -#define ZSTD_FREQ_DIV 4 -#define ZSTD_MAX_PRICE (1 << 30) - -/*-************************************* -* Price functions for optimal parser -***************************************/ -FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t *ssPtr) -{ - ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum + 1); - ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum + 1); - ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum + 1); - ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum + 1); - ssPtr->factor = 1 + ((ssPtr->litSum >> 5) / ssPtr->litLengthSum) + ((ssPtr->litSum << 1) / (ssPtr->litSum + ssPtr->matchSum)); -} - -ZSTD_STATIC void ZSTD_rescaleFreqs(seqStore_t *ssPtr, const BYTE *src, size_t srcSize) -{ - unsigned u; - - ssPtr->cachedLiterals = NULL; - ssPtr->cachedPrice = ssPtr->cachedLitLength = 0; - ssPtr->staticPrices = 0; - - if (ssPtr->litLengthSum == 0) { - if (srcSize <= 1024) - ssPtr->staticPrices = 1; - - for (u = 0; u <= MaxLit; u++) - ssPtr->litFreq[u] = 0; - for (u = 0; u < srcSize; u++) - ssPtr->litFreq[src[u]]++; - - ssPtr->litSum = 0; - ssPtr->litLengthSum = MaxLL + 1; - ssPtr->matchLengthSum = MaxML + 1; - ssPtr->offCodeSum = (MaxOff + 1); - ssPtr->matchSum = (ZSTD_LITFREQ_ADD << Litbits); - - for (u = 0; u <= MaxLit; u++) { - ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> ZSTD_FREQ_DIV); - ssPtr->litSum += ssPtr->litFreq[u]; - } - for (u = 0; u <= MaxLL; u++) - ssPtr->litLengthFreq[u] = 1; - for (u = 0; u <= MaxML; u++) - ssPtr->matchLengthFreq[u] = 1; - for (u = 0; u <= MaxOff; u++) - ssPtr->offCodeFreq[u] = 1; - } else { - ssPtr->matchLengthSum = 0; - ssPtr->litLengthSum = 0; - ssPtr->offCodeSum = 0; - ssPtr->matchSum = 0; - ssPtr->litSum = 0; - - for (u = 0; u <= MaxLit; u++) { - ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> (ZSTD_FREQ_DIV + 1)); - ssPtr->litSum += ssPtr->litFreq[u]; - } - for (u = 0; u <= MaxLL; u++) { - ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u] >> (ZSTD_FREQ_DIV + 1)); - ssPtr->litLengthSum += ssPtr->litLengthFreq[u]; - } - for (u = 0; u <= MaxML; u++) { - ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u] >> ZSTD_FREQ_DIV); - ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u]; - ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3); - } - ssPtr->matchSum *= ZSTD_LITFREQ_ADD; - for (u = 0; u <= MaxOff; u++) { - ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u] >> ZSTD_FREQ_DIV); - ssPtr->offCodeSum += ssPtr->offCodeFreq[u]; - } - } - - ZSTD_setLog2Prices(ssPtr); -} - -FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t *ssPtr, U32 litLength, const BYTE *literals) -{ - U32 price, u; - - if (ssPtr->staticPrices) - return ZSTD_highbit32((U32)litLength + 1) + (litLength * 6); - - if (litLength == 0) - return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0] + 1); - - /* literals */ - if (ssPtr->cachedLiterals == literals) { - U32 const additional = litLength - ssPtr->cachedLitLength; - const BYTE *literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength; - price = ssPtr->cachedPrice + additional * ssPtr->log2litSum; - for (u = 0; u < additional; u++) - price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]] + 1); - ssPtr->cachedPrice = price; - ssPtr->cachedLitLength = litLength; - } else { - price = litLength * ssPtr->log2litSum; - for (u = 0; u < litLength; u++) - price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]] + 1); - - if (litLength >= 12) { - ssPtr->cachedLiterals = literals; - ssPtr->cachedPrice = price; - ssPtr->cachedLitLength = litLength; - } - } - - /* literal Length */ - { - const BYTE LL_deltaCode = 19; - const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; - price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode] + 1); - } - - return price; -} - -FORCE_INLINE U32 ZSTD_getPrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength, const int ultra) -{ - /* offset */ - U32 price; - BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1); - - if (seqStorePtr->staticPrices) - return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength + 1) + 16 + offCode; - - price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode] + 1); - if (!ultra && offCode >= 20) - price += (offCode - 19) * 2; - - /* match Length */ - { - const BYTE ML_deltaCode = 36; - const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; - price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode] + 1); - } - - return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor; -} - -ZSTD_STATIC void ZSTD_updatePrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength) -{ - U32 u; - - /* literals */ - seqStorePtr->litSum += litLength * ZSTD_LITFREQ_ADD; - for (u = 0; u < litLength; u++) - seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; - - /* literal Length */ - { - const BYTE LL_deltaCode = 19; - const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; - seqStorePtr->litLengthFreq[llCode]++; - seqStorePtr->litLengthSum++; - } - - /* match offset */ - { - BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1); - seqStorePtr->offCodeSum++; - seqStorePtr->offCodeFreq[offCode]++; - } - - /* match Length */ - { - const BYTE ML_deltaCode = 36; - const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; - seqStorePtr->matchLengthFreq[mlCode]++; - seqStorePtr->matchLengthSum++; - } - - ZSTD_setLog2Prices(seqStorePtr); -} - -#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \ - { \ - while (last_pos < pos) { \ - opt[last_pos + 1].price = ZSTD_MAX_PRICE; \ - last_pos++; \ - } \ - opt[pos].mlen = mlen_; \ - opt[pos].off = offset_; \ - opt[pos].litlen = litlen_; \ - opt[pos].price = price_; \ - } - -/* Update hashTable3 up to ip (excluded) - Assumption : always within prefix (i.e. not within extDict) */ -FORCE_INLINE -U32 ZSTD_insertAndFindFirstIndexHash3(ZSTD_CCtx *zc, const BYTE *ip) -{ - U32 *const hashTable3 = zc->hashTable3; - U32 const hashLog3 = zc->hashLog3; - const BYTE *const base = zc->base; - U32 idx = zc->nextToUpdate3; - const U32 target = zc->nextToUpdate3 = (U32)(ip - base); - const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3); - - while (idx < target) { - hashTable3[ZSTD_hash3Ptr(base + idx, hashLog3)] = idx; - idx++; - } - - return hashTable3[hash3]; -} - -/*-************************************* -* Binary Tree search -***************************************/ -static U32 ZSTD_insertBtAndGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, U32 nbCompares, const U32 mls, U32 extDict, - ZSTD_match_t *matches, const U32 minMatchLen) -{ - const BYTE *const base = zc->base; - const U32 curr = (U32)(ip - base); - const U32 hashLog = zc->params.cParams.hashLog; - const size_t h = ZSTD_hashPtr(ip, hashLog, mls); - U32 *const hashTable = zc->hashTable; - U32 matchIndex = hashTable[h]; - U32 *const bt = zc->chainTable; - const U32 btLog = zc->params.cParams.chainLog - 1; - const U32 btMask = (1U << btLog) - 1; - size_t commonLengthSmaller = 0, commonLengthLarger = 0; - const BYTE *const dictBase = zc->dictBase; - const U32 dictLimit = zc->dictLimit; - const BYTE *const dictEnd = dictBase + dictLimit; - const BYTE *const prefixStart = base + dictLimit; - const U32 btLow = btMask >= curr ? 0 : curr - btMask; - const U32 windowLow = zc->lowLimit; - U32 *smallerPtr = bt + 2 * (curr & btMask); - U32 *largerPtr = bt + 2 * (curr & btMask) + 1; - U32 matchEndIdx = curr + 8; - U32 dummy32; /* to be nullified at the end */ - U32 mnum = 0; - - const U32 minMatch = (mls == 3) ? 3 : 4; - size_t bestLength = minMatchLen - 1; - - if (minMatch == 3) { /* HC3 match finder */ - U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(zc, ip); - if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) { - const BYTE *match; - size_t currMl = 0; - if ((!extDict) || matchIndex3 >= dictLimit) { - match = base + matchIndex3; - if (match[bestLength] == ip[bestLength]) - currMl = ZSTD_count(ip, match, iLimit); - } else { - match = dictBase + matchIndex3; - if (ZSTD_readMINMATCH(match, MINMATCH) == - ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */ - currMl = ZSTD_count_2segments(ip + MINMATCH, match + MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH; - } - - /* save best solution */ - if (currMl > bestLength) { - bestLength = currMl; - matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3; - matches[mnum].len = (U32)currMl; - mnum++; - if (currMl > ZSTD_OPT_NUM) - goto update; - if (ip + currMl == iLimit) - goto update; /* best possible, and avoid read overflow*/ - } - } - } - - hashTable[h] = curr; /* Update Hash Table */ - - while (nbCompares-- && (matchIndex > windowLow)) { - U32 *nextPtr = bt + 2 * (matchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - const BYTE *match; - - if ((!extDict) || (matchIndex + matchLength >= dictLimit)) { - match = base + matchIndex; - if (match[matchLength] == ip[matchLength]) { - matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iLimit) + 1; - } - } else { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart); - if (matchIndex + matchLength >= dictLimit) - match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ - } - - if (matchLength > bestLength) { - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (U32)matchLength; - bestLength = matchLength; - matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex; - matches[mnum].len = (U32)matchLength; - mnum++; - if (matchLength > ZSTD_OPT_NUM) - break; - if (ip + matchLength == iLimit) /* equal : no way to know if inf or sup */ - break; /* drop, to guarantee consistency (miss a little bit of compression) */ - } - - if (match[matchLength] < ip[matchLength]) { - /* match is smaller than curr */ - *smallerPtr = matchIndex; /* update smaller idx */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - if (matchIndex <= btLow) { - smallerPtr = &dummy32; - break; - } /* beyond tree size, stop the search */ - smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */ - matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ - } else { - /* match is larger than curr */ - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) { - largerPtr = &dummy32; - break; - } /* beyond tree size, stop the search */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - } - } - - *smallerPtr = *largerPtr = 0; - -update: - zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1; - return mnum; -} - -/** Tree updater, providing best match */ -static U32 ZSTD_BtGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, ZSTD_match_t *matches, - const U32 minMatchLen) -{ - if (ip < zc->base + zc->nextToUpdate) - return 0; /* skipped area */ - ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); - return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen); -} - -static U32 ZSTD_BtGetAllMatches_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */ - const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch, - ZSTD_match_t *matches, const U32 minMatchLen) -{ - switch (matchLengthSearch) { - case 3: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); - default: - case 4: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); - case 5: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); - case 7: - case 6: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); - } -} - -/** Tree updater, providing best match */ -static U32 ZSTD_BtGetAllMatches_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, - ZSTD_match_t *matches, const U32 minMatchLen) -{ - if (ip < zc->base + zc->nextToUpdate) - return 0; /* skipped area */ - ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); - return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen); -} - -static U32 ZSTD_BtGetAllMatches_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */ - const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch, - ZSTD_match_t *matches, const U32 minMatchLen) -{ - switch (matchLengthSearch) { - case 3: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); - default: - case 4: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); - case 5: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); - case 7: - case 6: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); - } -} - -/*-******************************* -* Optimal parser -*********************************/ -FORCE_INLINE -void ZSTD_compressBlock_opt_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra) -{ - seqStore_t *seqStorePtr = &(ctx->seqStore); - const BYTE *const istart = (const BYTE *)src; - const BYTE *ip = istart; - const BYTE *anchor = istart; - const BYTE *const iend = istart + srcSize; - const BYTE *const ilimit = iend - 8; - const BYTE *const base = ctx->base; - const BYTE *const prefixStart = base + ctx->dictLimit; - - const U32 maxSearches = 1U << ctx->params.cParams.searchLog; - const U32 sufficient_len = ctx->params.cParams.targetLength; - const U32 mls = ctx->params.cParams.searchLength; - const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; - - ZSTD_optimal_t *opt = seqStorePtr->priceTable; - ZSTD_match_t *matches = seqStorePtr->matchTable; - const BYTE *inr; - U32 offset, rep[ZSTD_REP_NUM]; - - /* init */ - ctx->nextToUpdate3 = ctx->nextToUpdate; - ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize); - ip += (ip == prefixStart); - { - U32 i; - for (i = 0; i < ZSTD_REP_NUM; i++) - rep[i] = ctx->rep[i]; - } - - /* Match Loop */ - while (ip < ilimit) { - U32 cur, match_num, last_pos, litlen, price; - U32 u, mlen, best_mlen, best_off, litLength; - memset(opt, 0, sizeof(ZSTD_optimal_t)); - last_pos = 0; - litlen = (U32)(ip - anchor); - - /* check repCode */ - { - U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor); - for (i = (ip == anchor); i < last_i; i++) { - const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i]; - if ((repCur > 0) && (repCur < (S32)(ip - prefixStart)) && - (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) { - mlen = (U32)ZSTD_count(ip + minMatch, ip + minMatch - repCur, iend) + minMatch; - if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { - best_mlen = mlen; - best_off = i; - cur = 0; - last_pos = 1; - goto _storeSequence; - } - best_off = i - (ip == anchor); - do { - price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); - if (mlen > last_pos || price < opt[mlen].price) - SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ - mlen--; - } while (mlen >= minMatch); - } - } - } - - match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch); - - if (!last_pos && !match_num) { - ip++; - continue; - } - - if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) { - best_mlen = matches[match_num - 1].len; - best_off = matches[match_num - 1].off; - cur = 0; - last_pos = 1; - goto _storeSequence; - } - - /* set prices using matches at position = 0 */ - best_mlen = (last_pos) ? last_pos : minMatch; - for (u = 0; u < match_num; u++) { - mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; - best_mlen = matches[u].len; - while (mlen <= best_mlen) { - price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); - if (mlen > last_pos || price < opt[mlen].price) - SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */ - mlen++; - } - } - - if (last_pos < minMatch) { - ip++; - continue; - } - - /* initialize opt[0] */ - { - U32 i; - for (i = 0; i < ZSTD_REP_NUM; i++) - opt[0].rep[i] = rep[i]; - } - opt[0].mlen = 1; - opt[0].litlen = litlen; - - /* check further positions */ - for (cur = 1; cur <= last_pos; cur++) { - inr = ip + cur; - - if (opt[cur - 1].mlen == 1) { - litlen = opt[cur - 1].litlen + 1; - if (cur > litlen) { - price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen); - } else - price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); - } else { - litlen = 1; - price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1); - } - - if (cur > last_pos || price <= opt[cur].price) - SET_PRICE(cur, 1, 0, litlen, price); - - if (cur == last_pos) - break; - - if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ - continue; - - mlen = opt[cur].mlen; - if (opt[cur].off > ZSTD_REP_MOVE_OPT) { - opt[cur].rep[2] = opt[cur - mlen].rep[1]; - opt[cur].rep[1] = opt[cur - mlen].rep[0]; - opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; - } else { - opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2]; - opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1]; - opt[cur].rep[0] = - ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]); - } - - best_mlen = minMatch; - { - U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); - for (i = (opt[cur].mlen != 1); i < last_i; i++) { /* check rep */ - const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i]; - if ((repCur > 0) && (repCur < (S32)(inr - prefixStart)) && - (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) { - mlen = (U32)ZSTD_count(inr + minMatch, inr + minMatch - repCur, iend) + minMatch; - - if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { - best_mlen = mlen; - best_off = i; - last_pos = cur + 1; - goto _storeSequence; - } - - best_off = i - (opt[cur].mlen != 1); - if (mlen > best_mlen) - best_mlen = mlen; - - do { - if (opt[cur].mlen == 1) { - litlen = opt[cur].litlen; - if (cur > litlen) { - price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen, - best_off, mlen - MINMATCH, ultra); - } else - price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); - } else { - litlen = 0; - price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); - } - - if (cur + mlen > last_pos || price <= opt[cur + mlen].price) - SET_PRICE(cur + mlen, mlen, i, litlen, price); - mlen--; - } while (mlen >= minMatch); - } - } - } - - match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen); - - if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) { - best_mlen = matches[match_num - 1].len; - best_off = matches[match_num - 1].off; - last_pos = cur + 1; - goto _storeSequence; - } - - /* set prices using matches at position = cur */ - for (u = 0; u < match_num; u++) { - mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; - best_mlen = matches[u].len; - - while (mlen <= best_mlen) { - if (opt[cur].mlen == 1) { - litlen = opt[cur].litlen; - if (cur > litlen) - price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen, - matches[u].off - 1, mlen - MINMATCH, ultra); - else - price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); - } else { - litlen = 0; - price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra); - } - - if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) - SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); - - mlen++; - } - } - } - - best_mlen = opt[last_pos].mlen; - best_off = opt[last_pos].off; - cur = last_pos - best_mlen; - - /* store sequence */ -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ - opt[0].mlen = 1; - - while (1) { - mlen = opt[cur].mlen; - offset = opt[cur].off; - opt[cur].mlen = best_mlen; - opt[cur].off = best_off; - best_mlen = mlen; - best_off = offset; - if (mlen > cur) - break; - cur -= mlen; - } - - for (u = 0; u <= last_pos;) { - u += opt[u].mlen; - } - - for (cur = 0; cur < last_pos;) { - mlen = opt[cur].mlen; - if (mlen == 1) { - ip++; - cur++; - continue; - } - offset = opt[cur].off; - cur += mlen; - litLength = (U32)(ip - anchor); - - if (offset > ZSTD_REP_MOVE_OPT) { - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = offset - ZSTD_REP_MOVE_OPT; - offset--; - } else { - if (offset != 0) { - best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); - if (offset != 1) - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = best_off; - } - if (litLength == 0) - offset--; - } - - ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); - ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); - anchor = ip = ip + mlen; - } - } /* for (cur=0; cur < last_pos; ) */ - - /* Save reps for next block */ - { - int i; - for (i = 0; i < ZSTD_REP_NUM; i++) - ctx->repToConfirm[i] = rep[i]; - } - - /* Last Literals */ - { - size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - -FORCE_INLINE -void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra) -{ - seqStore_t *seqStorePtr = &(ctx->seqStore); - const BYTE *const istart = (const BYTE *)src; - const BYTE *ip = istart; - const BYTE *anchor = istart; - const BYTE *const iend = istart + srcSize; - const BYTE *const ilimit = iend - 8; - const BYTE *const base = ctx->base; - const U32 lowestIndex = ctx->lowLimit; - const U32 dictLimit = ctx->dictLimit; - const BYTE *const prefixStart = base + dictLimit; - const BYTE *const dictBase = ctx->dictBase; - const BYTE *const dictEnd = dictBase + dictLimit; - - const U32 maxSearches = 1U << ctx->params.cParams.searchLog; - const U32 sufficient_len = ctx->params.cParams.targetLength; - const U32 mls = ctx->params.cParams.searchLength; - const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; - - ZSTD_optimal_t *opt = seqStorePtr->priceTable; - ZSTD_match_t *matches = seqStorePtr->matchTable; - const BYTE *inr; - - /* init */ - U32 offset, rep[ZSTD_REP_NUM]; - { - U32 i; - for (i = 0; i < ZSTD_REP_NUM; i++) - rep[i] = ctx->rep[i]; - } - - ctx->nextToUpdate3 = ctx->nextToUpdate; - ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize); - ip += (ip == prefixStart); - - /* Match Loop */ - while (ip < ilimit) { - U32 cur, match_num, last_pos, litlen, price; - U32 u, mlen, best_mlen, best_off, litLength; - U32 curr = (U32)(ip - base); - memset(opt, 0, sizeof(ZSTD_optimal_t)); - last_pos = 0; - opt[0].litlen = (U32)(ip - anchor); - - /* check repCode */ - { - U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor); - for (i = (ip == anchor); i < last_i; i++) { - const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i]; - const U32 repIndex = (U32)(curr - repCur); - const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; - const BYTE *const repMatch = repBase + repIndex; - if ((repCur > 0 && repCur <= (S32)curr) && - (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ - && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) { - /* repcode detected we should take it */ - const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; - mlen = (U32)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch; - - if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { - best_mlen = mlen; - best_off = i; - cur = 0; - last_pos = 1; - goto _storeSequence; - } - - best_off = i - (ip == anchor); - litlen = opt[0].litlen; - do { - price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); - if (mlen > last_pos || price < opt[mlen].price) - SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ - mlen--; - } while (mlen >= minMatch); - } - } - } - - match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */ - - if (!last_pos && !match_num) { - ip++; - continue; - } - - { - U32 i; - for (i = 0; i < ZSTD_REP_NUM; i++) - opt[0].rep[i] = rep[i]; - } - opt[0].mlen = 1; - - if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) { - best_mlen = matches[match_num - 1].len; - best_off = matches[match_num - 1].off; - cur = 0; - last_pos = 1; - goto _storeSequence; - } - - best_mlen = (last_pos) ? last_pos : minMatch; - - /* set prices using matches at position = 0 */ - for (u = 0; u < match_num; u++) { - mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; - best_mlen = matches[u].len; - litlen = opt[0].litlen; - while (mlen <= best_mlen) { - price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); - if (mlen > last_pos || price < opt[mlen].price) - SET_PRICE(mlen, mlen, matches[u].off, litlen, price); - mlen++; - } - } - - if (last_pos < minMatch) { - ip++; - continue; - } - - /* check further positions */ - for (cur = 1; cur <= last_pos; cur++) { - inr = ip + cur; - - if (opt[cur - 1].mlen == 1) { - litlen = opt[cur - 1].litlen + 1; - if (cur > litlen) { - price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen); - } else - price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); - } else { - litlen = 1; - price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1); - } - - if (cur > last_pos || price <= opt[cur].price) - SET_PRICE(cur, 1, 0, litlen, price); - - if (cur == last_pos) - break; - - if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ - continue; - - mlen = opt[cur].mlen; - if (opt[cur].off > ZSTD_REP_MOVE_OPT) { - opt[cur].rep[2] = opt[cur - mlen].rep[1]; - opt[cur].rep[1] = opt[cur - mlen].rep[0]; - opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; - } else { - opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2]; - opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1]; - opt[cur].rep[0] = - ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]); - } - - best_mlen = minMatch; - { - U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); - for (i = (mlen != 1); i < last_i; i++) { - const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i]; - const U32 repIndex = (U32)(curr + cur - repCur); - const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; - const BYTE *const repMatch = repBase + repIndex; - if ((repCur > 0 && repCur <= (S32)(curr + cur)) && - (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ - && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) { - /* repcode detected */ - const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; - mlen = (U32)ZSTD_count_2segments(inr + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch; - - if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { - best_mlen = mlen; - best_off = i; - last_pos = cur + 1; - goto _storeSequence; - } - - best_off = i - (opt[cur].mlen != 1); - if (mlen > best_mlen) - best_mlen = mlen; - - do { - if (opt[cur].mlen == 1) { - litlen = opt[cur].litlen; - if (cur > litlen) { - price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen, - best_off, mlen - MINMATCH, ultra); - } else - price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); - } else { - litlen = 0; - price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); - } - - if (cur + mlen > last_pos || price <= opt[cur + mlen].price) - SET_PRICE(cur + mlen, mlen, i, litlen, price); - mlen--; - } while (mlen >= minMatch); - } - } - } - - match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch); - - if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) { - best_mlen = matches[match_num - 1].len; - best_off = matches[match_num - 1].off; - last_pos = cur + 1; - goto _storeSequence; - } - - /* set prices using matches at position = cur */ - for (u = 0; u < match_num; u++) { - mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; - best_mlen = matches[u].len; - - while (mlen <= best_mlen) { - if (opt[cur].mlen == 1) { - litlen = opt[cur].litlen; - if (cur > litlen) - price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen, - matches[u].off - 1, mlen - MINMATCH, ultra); - else - price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); - } else { - litlen = 0; - price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra); - } - - if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) - SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); - - mlen++; - } - } - } /* for (cur = 1; cur <= last_pos; cur++) */ - - best_mlen = opt[last_pos].mlen; - best_off = opt[last_pos].off; - cur = last_pos - best_mlen; - - /* store sequence */ -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ - opt[0].mlen = 1; - - while (1) { - mlen = opt[cur].mlen; - offset = opt[cur].off; - opt[cur].mlen = best_mlen; - opt[cur].off = best_off; - best_mlen = mlen; - best_off = offset; - if (mlen > cur) - break; - cur -= mlen; - } - - for (u = 0; u <= last_pos;) { - u += opt[u].mlen; - } - - for (cur = 0; cur < last_pos;) { - mlen = opt[cur].mlen; - if (mlen == 1) { - ip++; - cur++; - continue; - } - offset = opt[cur].off; - cur += mlen; - litLength = (U32)(ip - anchor); - - if (offset > ZSTD_REP_MOVE_OPT) { - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = offset - ZSTD_REP_MOVE_OPT; - offset--; - } else { - if (offset != 0) { - best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); - if (offset != 1) - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = best_off; - } - - if (litLength == 0) - offset--; - } - - ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); - ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); - anchor = ip = ip + mlen; - } - } /* for (cur=0; cur < last_pos; ) */ - - /* Save reps for next block */ - { - int i; - for (i = 0; i < ZSTD_REP_NUM; i++) - ctx->repToConfirm[i] = rep[i]; - } - - /* Last Literals */ - { - size_t lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - -#endif /* ZSTD_OPT_H_91842398743 */ diff --git a/contrib/linux-kernel/linux.mk b/contrib/linux-kernel/linux.mk new file mode 100644 index 00000000000..be218b5e0ed --- /dev/null +++ b/contrib/linux-kernel/linux.mk @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ################################################################ +obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o +obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o +obj-$(CONFIG_ZSTD_COMMON) += zstd_common.o + +zstd_compress-y := \ + zstd_compress_module.o \ + compress/fse_compress.o \ + compress/hist.o \ + compress/huf_compress.o \ + compress/zstd_compress.o \ + compress/zstd_compress_literals.o \ + compress/zstd_compress_sequences.o \ + compress/zstd_compress_superblock.o \ + compress/zstd_double_fast.o \ + compress/zstd_fast.o \ + compress/zstd_lazy.o \ + compress/zstd_ldm.o \ + compress/zstd_opt.o \ + compress/zstd_preSplit.o \ + +zstd_decompress-y := \ + zstd_decompress_module.o \ + decompress/huf_decompress.o \ + decompress/zstd_ddict.o \ + decompress/zstd_decompress.o \ + decompress/zstd_decompress_block.o \ + +zstd_common-y := \ + zstd_common_module.o \ + common/debug.o \ + common/entropy_common.o \ + common/error_private.o \ + common/fse_decompress.o \ + common/zstd_common.o \ diff --git a/contrib/linux-kernel/linux_zstd.h b/contrib/linux-kernel/linux_zstd.h new file mode 100644 index 00000000000..2f2a3c8b8a3 --- /dev/null +++ b/contrib/linux-kernel/linux_zstd.h @@ -0,0 +1,691 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of https://github.com/facebook/zstd) and + * the GPLv2 (found in the COPYING file in the root directory of + * https://github.com/facebook/zstd). You may select, at your option, one of the + * above-listed licenses. + */ + +#ifndef LINUX_ZSTD_H +#define LINUX_ZSTD_H + +/** + * This is a kernel-style API that wraps the upstream zstd API, which cannot be + * used directly because the symbols aren't exported. It exposes the minimal + * functionality which is currently required by users of zstd in the kernel. + * Expose extra functions from lib/zstd/zstd.h as needed. + */ + +/* ====== Dependency ====== */ +#include +#include +#include + +/* ====== Helper Functions ====== */ +/** + * zstd_compress_bound() - maximum compressed size in worst case scenario + * @src_size: The size of the data to compress. + * + * Return: The maximum compressed size in the worst case scenario. + */ +size_t zstd_compress_bound(size_t src_size); + +/** + * zstd_is_error() - tells if a size_t function result is an error code + * @code: The function result to check for error. + * + * Return: Non-zero iff the code is an error. + */ +unsigned int zstd_is_error(size_t code); + +/** + * enum zstd_error_code - zstd error codes + */ +typedef ZSTD_ErrorCode zstd_error_code; + +/** + * zstd_get_error_code() - translates an error function result to an error code + * @code: The function result for which zstd_is_error(code) is true. + * + * Return: A unique error code for this error. + */ +zstd_error_code zstd_get_error_code(size_t code); + +/** + * zstd_get_error_name() - translates an error function result to a string + * @code: The function result for which zstd_is_error(code) is true. + * + * Return: An error string corresponding to the error code. + */ +const char *zstd_get_error_name(size_t code); + +/** + * zstd_min_clevel() - minimum allowed compression level + * + * Return: The minimum allowed compression level. + */ +int zstd_min_clevel(void); + +/** + * zstd_max_clevel() - maximum allowed compression level + * + * Return: The maximum allowed compression level. + */ +int zstd_max_clevel(void); + +/** + * zstd_default_clevel() - default compression level + * + * Return: Default compression level. + */ +int zstd_default_clevel(void); + +/** + * struct zstd_custom_mem - custom memory allocation + */ +typedef ZSTD_customMem zstd_custom_mem; + +/** + * struct zstd_dict_load_method - Dictionary load method. + * See zstd_lib.h. + */ +typedef ZSTD_dictLoadMethod_e zstd_dict_load_method; + +/** + * struct zstd_dict_content_type - Dictionary context type. + * See zstd_lib.h. + */ +typedef ZSTD_dictContentType_e zstd_dict_content_type; + +/* ====== Parameter Selection ====== */ + +/** + * enum zstd_strategy - zstd compression search strategy + * + * From faster to stronger. See zstd_lib.h. + */ +typedef ZSTD_strategy zstd_strategy; + +/** + * struct zstd_compression_parameters - zstd compression parameters + * @windowLog: Log of the largest match distance. Larger means more + * compression, and more memory needed during decompression. + * @chainLog: Fully searched segment. Larger means more compression, + * slower, and more memory (useless for fast). + * @hashLog: Dispatch table. Larger means more compression, + * slower, and more memory. + * @searchLog: Number of searches. Larger means more compression and slower. + * @searchLength: Match length searched. Larger means faster decompression, + * sometimes less compression. + * @targetLength: Acceptable match size for optimal parser (only). Larger means + * more compression, and slower. + * @strategy: The zstd compression strategy. + * + * See zstd_lib.h. + */ +typedef ZSTD_compressionParameters zstd_compression_parameters; + +/** + * struct zstd_frame_parameters - zstd frame parameters + * @contentSizeFlag: Controls whether content size will be present in the + * frame header (when known). + * @checksumFlag: Controls whether a 32-bit checksum is generated at the + * end of the frame for error detection. + * @noDictIDFlag: Controls whether dictID will be saved into the frame + * header when using dictionary compression. + * + * The default value is all fields set to 0. See zstd_lib.h. + */ +typedef ZSTD_frameParameters zstd_frame_parameters; + +/** + * struct zstd_parameters - zstd parameters + * @cParams: The compression parameters. + * @fParams: The frame parameters. + */ +typedef ZSTD_parameters zstd_parameters; + +/** + * zstd_get_params() - returns zstd_parameters for selected level + * @level: The compression level + * @estimated_src_size: The estimated source size to compress or 0 + * if unknown. + * + * Return: The selected zstd_parameters. + */ +zstd_parameters zstd_get_params(int level, + unsigned long long estimated_src_size); + +/** + * zstd_get_cparams() - returns zstd_compression_parameters for selected level + * @level: The compression level + * @estimated_src_size: The estimated source size to compress or 0 + * if unknown. + * @dict_size: Dictionary size. + * + * Return: The selected zstd_compression_parameters. + */ +zstd_compression_parameters zstd_get_cparams(int level, + unsigned long long estimated_src_size, size_t dict_size); + +typedef ZSTD_CCtx zstd_cctx; +typedef ZSTD_cParameter zstd_cparameter; + +/** + * zstd_cctx_set_param() - sets a compression parameter + * @cctx: The context. Must have been initialized with zstd_init_cctx(). + * @param: The parameter to set. + * @value: The value to set the parameter to. + * + * Return: Zero or an error, which can be checked using zstd_is_error(). + */ +size_t zstd_cctx_set_param(zstd_cctx *cctx, zstd_cparameter param, int value); + +/* ====== Single-pass Compression ====== */ + +/** + * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx + * @parameters: The compression parameters to be used. + * + * If multiple compression parameters might be used, the caller must call + * zstd_cctx_workspace_bound() for each set of parameters and use the maximum + * size. + * + * Return: A lower bound on the size of the workspace that is passed to + * zstd_init_cctx(). + */ +size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters); + +/** + * zstd_cctx_workspace_bound_with_ext_seq_prod() - max memory needed to + * initialize a zstd_cctx when using the block-level external sequence + * producer API. + * @parameters: The compression parameters to be used. + * + * If multiple compression parameters might be used, the caller must call + * this function for each set of parameters and use the maximum size. + * + * Return: A lower bound on the size of the workspace that is passed to + * zstd_init_cctx(). + */ +size_t zstd_cctx_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *parameters); + +/** + * zstd_init_cctx() - initialize a zstd compression context + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspace_size: The size of workspace. Use zstd_cctx_workspace_bound() to + * determine how large the workspace must be. + * + * Return: A zstd compression context or NULL on error. + */ +zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size); + +/** + * zstd_compress_cctx() - compress src into dst with the initialized parameters + * @cctx: The context. Must have been initialized with zstd_init_cctx(). + * @dst: The buffer to compress src into. + * @dst_capacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @src_size: The size of the data to compress. + * @parameters: The compression parameters to be used. + * + * Return: The compressed size or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity, + const void *src, size_t src_size, const zstd_parameters *parameters); + +/** + * zstd_create_cctx_advanced() - Create compression context + * @custom_mem: Custom allocator. + * + * Return: NULL on error, pointer to compression context otherwise. + */ +zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem); + +/** + * zstd_free_cctx() - Free compression context + * @cdict: Pointer to compression context. + * + * Return: Always 0. + */ +size_t zstd_free_cctx(zstd_cctx* cctx); + +/** + * struct zstd_cdict - Compression dictionary. + * See zstd_lib.h. + */ +typedef ZSTD_CDict zstd_cdict; + +/** + * zstd_create_cdict_byreference() - Create compression dictionary + * @dict: Pointer to dictionary buffer. + * @dict_size: Size of the dictionary buffer. + * @dict_load_method: Dictionary load method. + * @dict_content_type: Dictionary content type. + * @custom_mem: Memory allocator. + * + * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be + * free before zstd_cdict is destroyed. + * + * Return: NULL on error, pointer to compression dictionary + * otherwise. + */ +zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size, + zstd_compression_parameters cparams, + zstd_custom_mem custom_mem); + +/** + * zstd_free_cdict() - Free compression dictionary + * @cdict: Pointer to compression dictionary. + * + * Return: Always 0. + */ +size_t zstd_free_cdict(zstd_cdict* cdict); + +/** + * zstd_compress_using_cdict() - compress src into dst using a dictionary + * @cctx: The context. Must have been initialized with zstd_init_cctx(). + * @dst: The buffer to compress src into. + * @dst_capacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @src_size: The size of the data to compress. + * @cdict: The dictionary to be used. + * + * Return: The compressed size or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst, + size_t dst_capacity, const void *src, size_t src_size, + const zstd_cdict *cdict); + +/* ====== Single-pass Decompression ====== */ + +typedef ZSTD_DCtx zstd_dctx; + +/** + * zstd_dctx_workspace_bound() - max memory needed to initialize a zstd_dctx + * + * Return: A lower bound on the size of the workspace that is passed to + * zstd_init_dctx(). + */ +size_t zstd_dctx_workspace_bound(void); + +/** + * zstd_init_dctx() - initialize a zstd decompression context + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspace_size: The size of workspace. Use zstd_dctx_workspace_bound() to + * determine how large the workspace must be. + * + * Return: A zstd decompression context or NULL on error. + */ +zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size); + +/** + * zstd_decompress_dctx() - decompress zstd compressed src into dst + * @dctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dst_capacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @src_size: The exact size of the data to decompress. + * + * Return: The decompressed size or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity, + const void *src, size_t src_size); + +/** + * struct zstd_ddict - Decompression dictionary. + * See zstd_lib.h. + */ +typedef ZSTD_DDict zstd_ddict; + +/** + * zstd_create_ddict_byreference() - Create decompression dictionary + * @dict: Pointer to dictionary buffer. + * @dict_size: Size of the dictionary buffer. + * @dict_load_method: Dictionary load method. + * @dict_content_type: Dictionary content type. + * @custom_mem: Memory allocator. + * + * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be + * free before zstd_ddict is destroyed. + * + * Return: NULL on error, pointer to decompression dictionary + * otherwise. + */ +zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size, + zstd_custom_mem custom_mem); +/** + * zstd_free_ddict() - Free decompression dictionary + * @dict: Pointer to the dictionary. + * + * Return: Always 0. + */ +size_t zstd_free_ddict(zstd_ddict *ddict); + +/** + * zstd_create_dctx_advanced() - Create decompression context + * @custom_mem: Custom allocator. + * + * Return: NULL on error, pointer to decompression context otherwise. + */ +zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem); + +/** + * zstd_free_dctx() -- Free decompression context + * @dctx: Pointer to decompression context. + * Return: Always 0. + */ +size_t zstd_free_dctx(zstd_dctx *dctx); + +/** + * zstd_decompress_using_ddict() - decompress src into dst using a dictionary + * @dctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dst_capacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @src_size: The exact size of the data to decompress. + * @ddict: The dictionary to be used. + * + * Return: The decompressed size or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_decompress_using_ddict(zstd_dctx *dctx, + void *dst, size_t dst_capacity, const void *src, size_t src_size, + const zstd_ddict *ddict); + + +/* ====== Streaming Buffers ====== */ + +/** + * struct zstd_in_buffer - input buffer for streaming + * @src: Start of the input buffer. + * @size: Size of the input buffer. + * @pos: Position where reading stopped. Will be updated. + * Necessarily 0 <= pos <= size. + * + * See zstd_lib.h. + */ +typedef ZSTD_inBuffer zstd_in_buffer; + +/** + * struct zstd_out_buffer - output buffer for streaming + * @dst: Start of the output buffer. + * @size: Size of the output buffer. + * @pos: Position where writing stopped. Will be updated. + * Necessarily 0 <= pos <= size. + * + * See zstd_lib.h. + */ +typedef ZSTD_outBuffer zstd_out_buffer; + +/* ====== Streaming Compression ====== */ + +typedef ZSTD_CStream zstd_cstream; + +/** + * zstd_cstream_workspace_bound() - memory needed to initialize a zstd_cstream + * @cparams: The compression parameters to be used for compression. + * + * Return: A lower bound on the size of the workspace that is passed to + * zstd_init_cstream(). + */ +size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams); + +/** + * zstd_cstream_workspace_bound_with_ext_seq_prod() - memory needed to initialize + * a zstd_cstream when using the block-level external sequence producer API. + * @cparams: The compression parameters to be used for compression. + * + * Return: A lower bound on the size of the workspace that is passed to + * zstd_init_cstream(). + */ +size_t zstd_cstream_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *cparams); + +/** + * zstd_init_cstream() - initialize a zstd streaming compression context + * @parameters The zstd parameters to use for compression. + * @pledged_src_size: If params.fParams.contentSizeFlag == 1 then the caller + * must pass the source size (zero means empty source). + * Otherwise, the caller may optionally pass the source + * size, or zero if unknown. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspace_size: The size of workspace. + * Use zstd_cstream_workspace_bound(params->cparams) to + * determine how large the workspace must be. + * + * Return: The zstd streaming compression context or NULL on error. + */ +zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters, + unsigned long long pledged_src_size, void *workspace, size_t workspace_size); + +/** + * zstd_reset_cstream() - reset the context using parameters from creation + * @cstream: The zstd streaming compression context to reset. + * @pledged_src_size: Optionally the source size, or zero if unknown. + * + * Resets the context using the parameters from creation. Skips dictionary + * loading, since it can be reused. If `pledged_src_size` is non-zero the frame + * content size is always written into the frame header. + * + * Return: Zero or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_reset_cstream(zstd_cstream *cstream, + unsigned long long pledged_src_size); + +/** + * zstd_compress_stream() - streaming compress some of input into output + * @cstream: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * @input: Source buffer. `input->pos` is updated to indicate how much data + * was read. Note that it may not consume the entire input, in which + * case `input->pos < input->size`, and it's up to the caller to + * present remaining data again. + * + * The `input` and `output` buffers may be any size. Guaranteed to make some + * forward progress if `input` and `output` are not empty. + * + * Return: A hint for the number of bytes to use as the input for the next + * function call or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output, + zstd_in_buffer *input); + +/** + * zstd_flush_stream() - flush internal buffers into output + * @cstream: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * + * zstd_flush_stream() must be called until it returns 0, meaning all the data + * has been flushed. Since zstd_flush_stream() causes a block to be ended, + * calling it too often will degrade the compression ratio. + * + * Return: The number of bytes still present within internal buffers or an + * error, which can be checked using zstd_is_error(). + */ +size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output); + +/** + * zstd_end_stream() - flush internal buffers into output and end the frame + * @cstream: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * + * zstd_end_stream() must be called until it returns 0, meaning all the data has + * been flushed and the frame epilogue has been written. + * + * Return: The number of bytes still present within internal buffers or an + * error, which can be checked using zstd_is_error(). + */ +size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output); + +/* ====== Streaming Decompression ====== */ + +typedef ZSTD_DStream zstd_dstream; + +/** + * zstd_dstream_workspace_bound() - memory needed to initialize a zstd_dstream + * @max_window_size: The maximum window size allowed for compressed frames. + * + * Return: A lower bound on the size of the workspace that is passed + * to zstd_init_dstream(). + */ +size_t zstd_dstream_workspace_bound(size_t max_window_size); + +/** + * zstd_init_dstream() - initialize a zstd streaming decompression context + * @max_window_size: The maximum window size allowed for compressed frames. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. + * Use zstd_dstream_workspace_bound(max_window_size) to + * determine how large the workspace must be. + * + * Return: The zstd streaming decompression context. + */ +zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace, + size_t workspace_size); + +/** + * zstd_reset_dstream() - reset the context using parameters from creation + * @dstream: The zstd streaming decompression context to reset. + * + * Resets the context using the parameters from creation. Skips dictionary + * loading, since it can be reused. + * + * Return: Zero or an error, which can be checked using zstd_is_error(). + */ +size_t zstd_reset_dstream(zstd_dstream *dstream); + +/** + * zstd_decompress_stream() - streaming decompress some of input into output + * @dstream: The zstd streaming decompression context. + * @output: Destination buffer. `output.pos` is updated to indicate how much + * decompressed data was written. + * @input: Source buffer. `input.pos` is updated to indicate how much data was + * read. Note that it may not consume the entire input, in which case + * `input.pos < input.size`, and it's up to the caller to present + * remaining data again. + * + * The `input` and `output` buffers may be any size. Guaranteed to make some + * forward progress if `input` and `output` are not empty. + * zstd_decompress_stream() will not consume the last byte of the frame until + * the entire frame is flushed. + * + * Return: Returns 0 iff a frame is completely decoded and fully flushed. + * Otherwise returns a hint for the number of bytes to use as the + * input for the next function call or an error, which can be checked + * using zstd_is_error(). The size hint will never load more than the + * frame. + */ +size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output, + zstd_in_buffer *input); + +/* ====== Frame Inspection Functions ====== */ + +/** + * zstd_find_frame_compressed_size() - returns the size of a compressed frame + * @src: Source buffer. It should point to the start of a zstd encoded + * frame or a skippable frame. + * @src_size: The size of the source buffer. It must be at least as large as the + * size of the frame. + * + * Return: The compressed size of the frame pointed to by `src` or an error, + * which can be check with zstd_is_error(). + * Suitable to pass to ZSTD_decompress() or similar functions. + */ +size_t zstd_find_frame_compressed_size(const void *src, size_t src_size); + +/** + * zstd_register_sequence_producer() - exposes the zstd library function + * ZSTD_registerSequenceProducer(). This is used for the block-level external + * sequence producer API. See upstream zstd.h for detailed documentation. + */ +typedef ZSTD_sequenceProducer_F zstd_sequence_producer_f; +void zstd_register_sequence_producer( + zstd_cctx *cctx, + void* sequence_producer_state, + zstd_sequence_producer_f sequence_producer +); + +/** + * struct zstd_frame_params - zstd frame parameters stored in the frame header + * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not + * present. + * @windowSize: The window size, or 0 if the frame is a skippable frame. + * @blockSizeMax: The maximum block size. + * @frameType: The frame type (zstd or skippable) + * @headerSize: The size of the frame header. + * @dictID: The dictionary id, or 0 if not present. + * @checksumFlag: Whether a checksum was used. + * + * See zstd_lib.h. + */ +typedef ZSTD_FrameHeader zstd_frame_header; + +/** + * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame + * @params: On success the frame parameters are written here. + * @src: The source buffer. It must point to a zstd or skippable frame. + * @src_size: The size of the source buffer. + * + * Return: 0 on success. If more data is required it returns how many bytes + * must be provided to make forward progress. Otherwise it returns + * an error, which can be checked using zstd_is_error(). + */ +size_t zstd_get_frame_header(zstd_frame_header *params, const void *src, + size_t src_size); + +/** + * struct zstd_sequence - a sequence of literals or a match + * + * @offset: The offset of the match + * @litLength: The literal length of the sequence + * @matchLength: The match length of the sequence + * @rep: Represents which repeat offset is used + */ +typedef ZSTD_Sequence zstd_sequence; + +/** + * zstd_compress_sequences_and_literals() - compress an array of zstd_sequence and literals + * + * @cctx: The zstd compression context. + * @dst: The buffer to compress the data into. + * @dst_capacity: The size of the destination buffer. + * @in_seqs: The array of zstd_sequence to compress. + * @in_seqs_size: The number of sequences in in_seqs. + * @literals: The literals associated to the sequences to be compressed. + * @lit_size: The size of the literals in the literals buffer. + * @lit_capacity: The size of the literals buffer. + * @decompressed_size: The size of the input data + * + * Return: The compressed size or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_compress_sequences_and_literals(zstd_cctx *cctx, void* dst, size_t dst_capacity, + const zstd_sequence *in_seqs, size_t in_seqs_size, + const void* literals, size_t lit_size, size_t lit_capacity, + size_t decompressed_size); + +#endif /* LINUX_ZSTD_H */ diff --git a/contrib/linux-kernel/mem.h b/contrib/linux-kernel/mem.h new file mode 100644 index 00000000000..d9bd752fe17 --- /dev/null +++ b/contrib/linux-kernel/mem.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef MEM_H_MODULE +#define MEM_H_MODULE + +/*-**************************************** +* Dependencies +******************************************/ +#include /* get_unaligned, put_unaligned* */ +#include /* inline */ +#include /* swab32, swab64 */ +#include /* size_t, ptrdiff_t */ +#include "debug.h" /* DEBUG_STATIC_ASSERT */ + +/*-**************************************** +* Compiler specifics +******************************************/ +#undef MEM_STATIC /* may be already defined from common/compiler.h */ +#define MEM_STATIC static inline + +/*-************************************************************** +* Basic Types +*****************************************************************/ +typedef uint8_t BYTE; +typedef uint8_t U8; +typedef int8_t S8; +typedef uint16_t U16; +typedef int16_t S16; +typedef uint32_t U32; +typedef int32_t S32; +typedef uint64_t U64; +typedef int64_t S64; + +/*-************************************************************** +* Memory I/O API +*****************************************************************/ +/*=== Static platform detection ===*/ +MEM_STATIC unsigned MEM_32bits(void); +MEM_STATIC unsigned MEM_64bits(void); +MEM_STATIC unsigned MEM_isLittleEndian(void); + +/*=== Native unaligned read/write ===*/ +MEM_STATIC U16 MEM_read16(const void* memPtr); +MEM_STATIC U32 MEM_read32(const void* memPtr); +MEM_STATIC U64 MEM_read64(const void* memPtr); +MEM_STATIC size_t MEM_readST(const void* memPtr); + +MEM_STATIC void MEM_write16(void* memPtr, U16 value); +MEM_STATIC void MEM_write32(void* memPtr, U32 value); +MEM_STATIC void MEM_write64(void* memPtr, U64 value); + +/*=== Little endian unaligned read/write ===*/ +MEM_STATIC U16 MEM_readLE16(const void* memPtr); +MEM_STATIC U32 MEM_readLE24(const void* memPtr); +MEM_STATIC U32 MEM_readLE32(const void* memPtr); +MEM_STATIC U64 MEM_readLE64(const void* memPtr); +MEM_STATIC size_t MEM_readLEST(const void* memPtr); + +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val); +MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val); +MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32); +MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64); +MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val); + +/*=== Big endian unaligned read/write ===*/ +MEM_STATIC U32 MEM_readBE32(const void* memPtr); +MEM_STATIC U64 MEM_readBE64(const void* memPtr); +MEM_STATIC size_t MEM_readBEST(const void* memPtr); + +MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32); +MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64); +MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val); + +/*=== Byteswap ===*/ +MEM_STATIC U32 MEM_swap32(U32 in); +MEM_STATIC U64 MEM_swap64(U64 in); +MEM_STATIC size_t MEM_swapST(size_t in); + +/*-************************************************************** +* Memory I/O Implementation +*****************************************************************/ +MEM_STATIC unsigned MEM_32bits(void) +{ + return sizeof(size_t) == 4; +} + +MEM_STATIC unsigned MEM_64bits(void) +{ + return sizeof(size_t) == 8; +} + +#if defined(__LITTLE_ENDIAN) +#define MEM_LITTLE_ENDIAN 1 +#else +#define MEM_LITTLE_ENDIAN 0 +#endif + +MEM_STATIC unsigned MEM_isLittleEndian(void) +{ + return MEM_LITTLE_ENDIAN; +} + +MEM_STATIC U16 MEM_read16(const void *memPtr) +{ + return get_unaligned((const U16 *)memPtr); +} + +MEM_STATIC U32 MEM_read32(const void *memPtr) +{ + return get_unaligned((const U32 *)memPtr); +} + +MEM_STATIC U64 MEM_read64(const void *memPtr) +{ + return get_unaligned((const U64 *)memPtr); +} + +MEM_STATIC size_t MEM_readST(const void *memPtr) +{ + return get_unaligned((const size_t *)memPtr); +} + +MEM_STATIC void MEM_write16(void *memPtr, U16 value) +{ + put_unaligned(value, (U16 *)memPtr); +} + +MEM_STATIC void MEM_write32(void *memPtr, U32 value) +{ + put_unaligned(value, (U32 *)memPtr); +} + +MEM_STATIC void MEM_write64(void *memPtr, U64 value) +{ + put_unaligned(value, (U64 *)memPtr); +} + +/*=== Little endian r/w ===*/ + +MEM_STATIC U16 MEM_readLE16(const void *memPtr) +{ + return get_unaligned_le16(memPtr); +} + +MEM_STATIC void MEM_writeLE16(void *memPtr, U16 val) +{ + put_unaligned_le16(val, memPtr); +} + +MEM_STATIC U32 MEM_readLE24(const void *memPtr) +{ + return MEM_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); +} + +MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val) +{ + MEM_writeLE16(memPtr, (U16)val); + ((BYTE *)memPtr)[2] = (BYTE)(val >> 16); +} + +MEM_STATIC U32 MEM_readLE32(const void *memPtr) +{ + return get_unaligned_le32(memPtr); +} + +MEM_STATIC void MEM_writeLE32(void *memPtr, U32 val32) +{ + put_unaligned_le32(val32, memPtr); +} + +MEM_STATIC U64 MEM_readLE64(const void *memPtr) +{ + return get_unaligned_le64(memPtr); +} + +MEM_STATIC void MEM_writeLE64(void *memPtr, U64 val64) +{ + put_unaligned_le64(val64, memPtr); +} + +MEM_STATIC size_t MEM_readLEST(const void *memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readLE32(memPtr); + else + return (size_t)MEM_readLE64(memPtr); +} + +MEM_STATIC void MEM_writeLEST(void *memPtr, size_t val) +{ + if (MEM_32bits()) + MEM_writeLE32(memPtr, (U32)val); + else + MEM_writeLE64(memPtr, (U64)val); +} + +/*=== Big endian r/w ===*/ + +MEM_STATIC U32 MEM_readBE32(const void *memPtr) +{ + return get_unaligned_be32(memPtr); +} + +MEM_STATIC void MEM_writeBE32(void *memPtr, U32 val32) +{ + put_unaligned_be32(val32, memPtr); +} + +MEM_STATIC U64 MEM_readBE64(const void *memPtr) +{ + return get_unaligned_be64(memPtr); +} + +MEM_STATIC void MEM_writeBE64(void *memPtr, U64 val64) +{ + put_unaligned_be64(val64, memPtr); +} + +MEM_STATIC size_t MEM_readBEST(const void *memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readBE32(memPtr); + else + return (size_t)MEM_readBE64(memPtr); +} + +MEM_STATIC void MEM_writeBEST(void *memPtr, size_t val) +{ + if (MEM_32bits()) + MEM_writeBE32(memPtr, (U32)val); + else + MEM_writeBE64(memPtr, (U64)val); +} + +MEM_STATIC U32 MEM_swap32(U32 in) +{ + return swab32(in); +} + +MEM_STATIC U64 MEM_swap64(U64 in) +{ + return swab64(in); +} + +MEM_STATIC size_t MEM_swapST(size_t in) +{ + if (MEM_32bits()) + return (size_t)MEM_swap32((U32)in); + else + return (size_t)MEM_swap64((U64)in); +} + +#endif /* MEM_H_MODULE */ diff --git a/contrib/linux-kernel/test/.gitignore b/contrib/linux-kernel/test/.gitignore deleted file mode 100644 index 4fc10228dea..00000000000 --- a/contrib/linux-kernel/test/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*Test diff --git a/contrib/linux-kernel/test/DecompressCrash.c b/contrib/linux-kernel/test/DecompressCrash.c deleted file mode 100644 index 2ab7dfe528a..00000000000 --- a/contrib/linux-kernel/test/DecompressCrash.c +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - */ - -/* - This program takes a file in input, - performs a zstd round-trip test (compression - decompress) - compares the result with original - and generates a crash (double free) on corruption detection. -*/ - -/*=========================================== -* Dependencies -*==========================================*/ -#include /* size_t */ -#include /* malloc, free, exit */ -#include /* fprintf */ -#include - -/*=========================================== -* Macros -*==========================================*/ -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) - -static ZSTD_DCtx *dctx = NULL; -void *dws = NULL; -static void* rBuff = NULL; -static size_t buffSize = 0; - -static void crash(int errorCode){ - /* abort if AFL/libfuzzer, exit otherwise */ - #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION /* could also use __AFL_COMPILER */ - abort(); - #else - exit(errorCode); - #endif -} - -static void decompressCheck(const void* srcBuff, size_t srcBuffSize) -{ - size_t const neededBuffSize = 20 * srcBuffSize; - - /* Allocate all buffers and contexts if not already allocated */ - if (neededBuffSize > buffSize) { - free(rBuff); - buffSize = 0; - - rBuff = malloc(neededBuffSize); - if (!rBuff) { - fprintf(stderr, "not enough memory ! \n"); - crash(1); - } - buffSize = neededBuffSize; - } - if (!dctx) { - size_t const workspaceSize = ZSTD_DCtxWorkspaceBound(); - dws = malloc(workspaceSize); - if (!dws) { - fprintf(stderr, "not enough memory ! \n"); - crash(1); - } - dctx = ZSTD_initDCtx(dws, workspaceSize); - if (!dctx) { - fprintf(stderr, "not enough memory ! \n"); - crash(1); - } - } - ZSTD_decompressDCtx(dctx, rBuff, buffSize, srcBuff, srcBuffSize); - -#ifndef SKIP_FREE - free(dws); dws = NULL; dctx = NULL; - free(rBuff); rBuff = NULL; - buffSize = 0; -#endif -} - -int LLVMFuzzerTestOneInput(const unsigned char *srcBuff, size_t srcBuffSize) { - decompressCheck(srcBuff, srcBuffSize); - return 0; -} diff --git a/contrib/linux-kernel/test/Makefile b/contrib/linux-kernel/test/Makefile index 8411462c9d0..67b55e66504 100644 --- a/contrib/linux-kernel/test/Makefile +++ b/contrib/linux-kernel/test/Makefile @@ -1,43 +1,49 @@ - -IFLAGS := -isystem include/ -I ../include/ -I ../lib/zstd/ -isystem googletest/googletest/include -isystem ../../../lib/common/ - -SOURCES := $(wildcard ../lib/zstd/*.c) -OBJECTS := $(patsubst %.c,%.o,$(SOURCES)) - -ARFLAGS := rcs -CXXFLAGS += -std=c++11 -g -O3 -Wcast-align -CFLAGS += -g -O3 -Wframe-larger-than=400 -Wcast-align -CPPFLAGS += $(IFLAGS) - -../lib/zstd/libzstd.a: $(OBJECTS) +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ################################################################ + +LINUX := ../linux +LINUX_ZSTDLIB := $(LINUX)/lib/zstd + +CPPFLAGS += -I$(LINUX)/include -I$(LINUX_ZSTDLIB) -Iinclude -DNDEBUG -Wno-deprecated-declarations +# Don't poison the workspace, it currently doesn't work with static allocation and workspace reuse +CPPFLAGS += -DZSTD_ASAN_DONT_POISON_WORKSPACE + +LINUX_ZSTD_MODULE := $(wildcard $(LINUX_ZSTDLIB)/*.c) +LINUX_ZSTD_COMMON := $(wildcard $(LINUX_ZSTDLIB)/common/*.c) +LINUX_ZSTD_COMPRESS := $(wildcard $(LINUX_ZSTDLIB)/compress/*.c) +LINUX_ZSTD_DECOMPRESS := $(wildcard $(LINUX_ZSTDLIB)/decompress/*.c $(LINUX_ZSTDLIB)/decompress/*.S) +LINUX_ZSTD_FILES := $(LINUX_ZSTD_MODULE) $(LINUX_ZSTD_COMMON) $(LINUX_ZSTD_COMPRESS) $(LINUX_ZSTD_DECOMPRESS) +LINUX_ZSTD_OBJECTS0 := $(LINUX_ZSTD_FILES:.c=.o) +LINUX_ZSTD_OBJECTS := $(LINUX_ZSTD_OBJECTS0:.S=.o) + +%.o: %.S + $(COMPILE.S) $(OUTPUT_OPTION) $< + +liblinuxzstd.a: $(LINUX_ZSTD_OBJECTS) $(AR) $(ARFLAGS) $@ $^ -DecompressCrash: DecompressCrash.o $(OBJECTS) libFuzzer.a - $(CXX) $(CPPFLAGS) $(CXXFLAGS) $(LDFLAGS) $^ -o $@ - -RoundTripCrash: RoundTripCrash.o $(OBJECTS) ../lib/xxhash.o libFuzzer.a - $(CXX) $(CPPFLAGS) $(CXXFLAGS) $(LDFLAGS) $^ -o $@ - -UserlandTest: UserlandTest.cpp ../lib/zstd/libzstd.a ../lib/xxhash.o - $(CXX) $(CXXFLAGS) $(CPPFLAGS) $^ googletest/build/googlemock/gtest/libgtest.a googletest/build/googlemock/gtest/libgtest_main.a -o $@ - -XXHashUserlandTest: XXHashUserlandTest.cpp ../lib/xxhash.o ../../../lib/common/xxhash.o - $(CXX) $(CXXFLAGS) $(CFLAGS) $(CPPFLAGS) $^ googletest/build/googlemock/gtest/libgtest.a googletest/build/googlemock/gtest/libgtest_main.a -o $@ +test: test.c liblinuxzstd.a + $(CC) $(LDFLAGS) $(CPPFLAGS) $(CFLAGS) $^ -o $@ -# Install libfuzzer -libFuzzer.a: - @$(RM) -rf Fuzzer - @git clone https://chromium.googlesource.com/chromium/llvm-project/llvm/lib/Fuzzer - @./Fuzzer/build.sh +static_test: static_test.c + $(CC) $(LDFLAGS) $(CPPFLAGS) $(CFLAGS) $^ -o $@ -# Install googletest -.PHONY: googletest -googletest: - @$(RM) -rf googletest - @git clone https://github.com/google/googletest - @mkdir -p googletest/build - @cd googletest/build && cmake .. && $(MAKE) +run-test: test static_test + ./macro-test.sh + ./test + ./static_test +.PHONY: clean: - $(RM) -f *.{o,a} ../lib/zstd/*.{o,a} ../lib/*.o - $(RM) -f DecompressCrash RoundTripCrash UserlandTest XXHashUserlandTest + $(RM) -f $(LINUX_ZSTDLIB)/*.o + $(RM) -f $(LINUX_ZSTDLIB)/**/*.o + $(RM) -f *.o *.a + $(RM) -f static_test + $(RM) -f test diff --git a/contrib/linux-kernel/test/RoundTripCrash.c b/contrib/linux-kernel/test/RoundTripCrash.c deleted file mode 100644 index 4f968023d8a..00000000000 --- a/contrib/linux-kernel/test/RoundTripCrash.c +++ /dev/null @@ -1,162 +0,0 @@ -/** - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - */ - -/* - This program takes a file in input, - performs a zstd round-trip test (compression - decompress) - compares the result with original - and generates a crash (double free) on corruption detection. -*/ - -/*=========================================== -* Dependencies -*==========================================*/ -#include /* size_t */ -#include /* malloc, free, exit */ -#include /* fprintf */ -#include -#include - -/*=========================================== -* Macros -*==========================================*/ -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) - -static const int kMaxClevel = 22; - -static ZSTD_CCtx *cctx = NULL; -void *cws = NULL; -static ZSTD_DCtx *dctx = NULL; -void *dws = NULL; -static void* cBuff = NULL; -static void* rBuff = NULL; -static size_t buffSize = 0; - - -/** roundTripTest() : -* Compresses `srcBuff` into `compressedBuff`, -* then decompresses `compressedBuff` into `resultBuff`. -* Compression level used is derived from first content byte. -* @return : result of decompression, which should be == `srcSize` -* or an error code if either compression or decompression fails. -* Note : `compressedBuffCapacity` should be `>= ZSTD_compressBound(srcSize)` -* for compression to be guaranteed to work */ -static size_t roundTripTest(void* resultBuff, size_t resultBuffCapacity, - void* compressedBuff, size_t compressedBuffCapacity, - const void* srcBuff, size_t srcBuffSize) -{ - size_t const hashLength = MIN(128, srcBuffSize); - unsigned const h32 = xxh32(srcBuff, hashLength, 0); - int const cLevel = h32 % kMaxClevel; - ZSTD_parameters const params = ZSTD_getParams(cLevel, srcBuffSize, 0); - size_t const cSize = ZSTD_compressCCtx(cctx, compressedBuff, compressedBuffCapacity, srcBuff, srcBuffSize, params); - if (ZSTD_isError(cSize)) { - fprintf(stderr, "Compression error : %u \n", ZSTD_getErrorCode(cSize)); - return cSize; - } - return ZSTD_decompressDCtx(dctx, resultBuff, resultBuffCapacity, compressedBuff, cSize); -} - - -static size_t checkBuffers(const void* buff1, const void* buff2, size_t buffSize) -{ - const char* ip1 = (const char*)buff1; - const char* ip2 = (const char*)buff2; - size_t pos; - - for (pos=0; pos buffSize) { - free(cBuff); - free(rBuff); - buffSize = 0; - - cBuff = malloc(neededBuffSize); - rBuff = malloc(neededBuffSize); - if (!cBuff || !rBuff) { - fprintf(stderr, "not enough memory ! \n"); - crash(1); - } - buffSize = neededBuffSize; - } - if (!cctx) { - ZSTD_compressionParameters const params = ZSTD_getCParams(kMaxClevel, 0, 0); - size_t const workspaceSize = ZSTD_CCtxWorkspaceBound(params); - cws = malloc(workspaceSize); - if (!cws) { - fprintf(stderr, "not enough memory ! \n"); - crash(1); - } - cctx = ZSTD_initCCtx(cws, workspaceSize); - if (!cctx) { - fprintf(stderr, "not enough memory ! \n"); - crash(1); - } - } - if (!dctx) { - size_t const workspaceSize = ZSTD_DCtxWorkspaceBound(); - dws = malloc(workspaceSize); - if (!dws) { - fprintf(stderr, "not enough memory ! \n"); - crash(1); - } - dctx = ZSTD_initDCtx(dws, workspaceSize); - if (!dctx) { - fprintf(stderr, "not enough memory ! \n"); - crash(1); - } - } - - { size_t const result = roundTripTest(rBuff, buffSize, cBuff, buffSize, srcBuff, srcBuffSize); - if (ZSTD_isError(result)) { - fprintf(stderr, "roundTripTest error : %u \n", ZSTD_getErrorCode(result)); - crash(1); - } - if (result != srcBuffSize) { - fprintf(stderr, "Incorrect regenerated size : %u != %u\n", (unsigned)result, (unsigned)srcBuffSize); - crash(1); - } - if (checkBuffers(srcBuff, rBuff, srcBuffSize) != srcBuffSize) { - fprintf(stderr, "Silent decoding corruption !!!"); - crash(1); - } - } - -#ifndef SKIP_FREE - free(cws); cws = NULL; cctx = NULL; - free(dws); dws = NULL; dctx = NULL; - free(cBuff); cBuff = NULL; - free(rBuff); rBuff = NULL; - buffSize = 0; -#endif -} - -int LLVMFuzzerTestOneInput(const unsigned char *srcBuff, size_t srcBuffSize) { - roundTripCheck(srcBuff, srcBuffSize); - return 0; -} diff --git a/contrib/linux-kernel/test/UserlandTest.cpp b/contrib/linux-kernel/test/UserlandTest.cpp deleted file mode 100644 index 03058382fbf..00000000000 --- a/contrib/linux-kernel/test/UserlandTest.cpp +++ /dev/null @@ -1,565 +0,0 @@ -extern "C" { -#include -} -#include -#include -#include -#include - -using namespace std; - -namespace { -struct WorkspaceDeleter { - void *memory; - - template void operator()(T const *) { free(memory); } -}; - -std::unique_ptr -createCCtx(ZSTD_compressionParameters cParams) { - size_t const workspaceSize = ZSTD_CCtxWorkspaceBound(cParams); - void *workspace = malloc(workspaceSize); - std::unique_ptr cctx{ - ZSTD_initCCtx(workspace, workspaceSize), WorkspaceDeleter{workspace}}; - if (!cctx) { - throw std::runtime_error{"Bad cctx"}; - } - return cctx; -} - -std::unique_ptr -createCCtx(int level, unsigned long long estimatedSrcSize = 0, - size_t dictSize = 0) { - auto const cParams = ZSTD_getCParams(level, estimatedSrcSize, dictSize); - return createCCtx(cParams); -} - -std::unique_ptr -createDCtx() { - size_t const workspaceSize = ZSTD_DCtxWorkspaceBound(); - void *workspace = malloc(workspaceSize); - std::unique_ptr dctx{ - ZSTD_initDCtx(workspace, workspaceSize), WorkspaceDeleter{workspace}}; - if (!dctx) { - throw std::runtime_error{"Bad dctx"}; - } - return dctx; -} - -std::unique_ptr -createCDict(std::string const& dict, ZSTD_parameters params) { - size_t const workspaceSize = ZSTD_CDictWorkspaceBound(params.cParams); - void *workspace = malloc(workspaceSize); - std::unique_ptr cdict{ - ZSTD_initCDict(dict.data(), dict.size(), params, workspace, - workspaceSize), - WorkspaceDeleter{workspace}}; - if (!cdict) { - throw std::runtime_error{"Bad cdict"}; - } - return cdict; -} - -std::unique_ptr -createCDict(std::string const& dict, int level) { - auto const params = ZSTD_getParams(level, 0, dict.size()); - return createCDict(dict, params); -} - -std::unique_ptr -createDDict(std::string const& dict) { - size_t const workspaceSize = ZSTD_DDictWorkspaceBound(); - void *workspace = malloc(workspaceSize); - std::unique_ptr ddict{ - ZSTD_initDDict(dict.data(), dict.size(), workspace, workspaceSize), - WorkspaceDeleter{workspace}}; - if (!ddict) { - throw std::runtime_error{"Bad ddict"}; - } - return ddict; -} - -std::unique_ptr -createCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize = 0) { - size_t const workspaceSize = ZSTD_CStreamWorkspaceBound(params.cParams); - void *workspace = malloc(workspaceSize); - std::unique_ptr zcs{ - ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize)}; - if (!zcs) { - throw std::runtime_error{"bad cstream"}; - } - return zcs; -} - -std::unique_ptr -createCStream(ZSTD_compressionParameters cParams, ZSTD_CDict const &cdict, - unsigned long long pledgedSrcSize = 0) { - size_t const workspaceSize = ZSTD_CStreamWorkspaceBound(cParams); - void *workspace = malloc(workspaceSize); - std::unique_ptr zcs{ - ZSTD_initCStream_usingCDict(&cdict, pledgedSrcSize, workspace, - workspaceSize)}; - if (!zcs) { - throw std::runtime_error{"bad cstream"}; - } - return zcs; -} - -std::unique_ptr -createCStream(int level, unsigned long long pledgedSrcSize = 0) { - auto const params = ZSTD_getParams(level, pledgedSrcSize, 0); - return createCStream(params, pledgedSrcSize); -} - -std::unique_ptr -createDStream(size_t maxWindowSize = (1ULL << ZSTD_WINDOWLOG_MAX), - ZSTD_DDict const *ddict = nullptr) { - size_t const workspaceSize = ZSTD_DStreamWorkspaceBound(maxWindowSize); - void *workspace = malloc(workspaceSize); - std::unique_ptr zds{ - ddict == nullptr - ? ZSTD_initDStream(maxWindowSize, workspace, workspaceSize) - : ZSTD_initDStream_usingDDict(maxWindowSize, ddict, workspace, - workspaceSize)}; - if (!zds) { - throw std::runtime_error{"bad dstream"}; - } - return zds; -} - -std::string compress(ZSTD_CCtx &cctx, std::string const &data, - ZSTD_parameters params, std::string const &dict = "") { - std::string compressed; - compressed.resize(ZSTD_compressBound(data.size())); - size_t const rc = - dict.empty() - ? ZSTD_compressCCtx(&cctx, &compressed[0], compressed.size(), - data.data(), data.size(), params) - : ZSTD_compress_usingDict(&cctx, &compressed[0], compressed.size(), - data.data(), data.size(), dict.data(), - dict.size(), params); - if (ZSTD_isError(rc)) { - throw std::runtime_error{"compression error"}; - } - compressed.resize(rc); - return compressed; -} - -std::string compress(ZSTD_CCtx& cctx, std::string const& data, int level, std::string const& dict = "") { - auto const params = ZSTD_getParams(level, 0, dict.size()); - return compress(cctx, data, params, dict); -} - -std::string decompress(ZSTD_DCtx& dctx, std::string const& compressed, size_t decompressedSize, std::string const& dict = "") { - std::string decompressed; - decompressed.resize(decompressedSize); - size_t const rc = - dict.empty() - ? ZSTD_decompressDCtx(&dctx, &decompressed[0], decompressed.size(), - compressed.data(), compressed.size()) - : ZSTD_decompress_usingDict( - &dctx, &decompressed[0], decompressed.size(), compressed.data(), - compressed.size(), dict.data(), dict.size()); - if (ZSTD_isError(rc)) { - throw std::runtime_error{"decompression error"}; - } - decompressed.resize(rc); - return decompressed; -} - -std::string compress(ZSTD_CCtx& cctx, std::string const& data, ZSTD_CDict& cdict) { - std::string compressed; - compressed.resize(ZSTD_compressBound(data.size())); - size_t const rc = - ZSTD_compress_usingCDict(&cctx, &compressed[0], compressed.size(), - data.data(), data.size(), &cdict); - if (ZSTD_isError(rc)) { - throw std::runtime_error{"compression error"}; - } - compressed.resize(rc); - return compressed; -} - -std::string decompress(ZSTD_DCtx& dctx, std::string const& compressed, size_t decompressedSize, ZSTD_DDict& ddict) { - std::string decompressed; - decompressed.resize(decompressedSize); - size_t const rc = - ZSTD_decompress_usingDDict(&dctx, &decompressed[0], decompressed.size(), - compressed.data(), compressed.size(), &ddict); - if (ZSTD_isError(rc)) { - throw std::runtime_error{"decompression error"}; - } - decompressed.resize(rc); - return decompressed; -} - -std::string compress(ZSTD_CStream& zcs, std::string const& data) { - std::string compressed; - compressed.resize(ZSTD_compressBound(data.size())); - ZSTD_inBuffer in = {data.data(), data.size(), 0}; - ZSTD_outBuffer out = {&compressed[0], compressed.size(), 0}; - while (in.pos != in.size) { - size_t const rc = ZSTD_compressStream(&zcs, &out, &in); - if (ZSTD_isError(rc)) { - throw std::runtime_error{"compress stream failed"}; - } - } - size_t const rc = ZSTD_endStream(&zcs, &out); - if (rc != 0) { - throw std::runtime_error{"compress end failed"}; - } - compressed.resize(out.pos); - return compressed; -} - -std::string decompress(ZSTD_DStream &zds, std::string const &compressed, - size_t decompressedSize) { - std::string decompressed; - decompressed.resize(decompressedSize); - ZSTD_inBuffer in = {compressed.data(), compressed.size(), 0}; - ZSTD_outBuffer out = {&decompressed[0], decompressed.size(), 0}; - while (in.pos != in.size) { - size_t const rc = ZSTD_decompressStream(&zds, &out, &in); - if (ZSTD_isError(rc)) { - throw std::runtime_error{"decompress stream failed"}; - } - } - decompressed.resize(out.pos); - return decompressed; -} - -std::string makeData(size_t size) { - std::string result; - result.reserve(size + 20); - while (result.size() < size) { - result += "Hello world"; - } - return result; -} - -std::string const kData = "Hello world"; -std::string const kPlainDict = makeData(10000); -std::string const kZstdDict{ - "\x37\xA4\x30\xEC\x99\x69\x58\x1C\x21\x10\xD8\x4A\x84\x01\xCC\xF3" - "\x3C\xCF\x9B\x25\xBB\xC9\x6E\xB2\x9B\xEC\x26\xAD\xCF\xDF\x4E\xCD" - "\xF3\x2C\x3A\x21\x84\x10\x42\x08\x21\x01\x33\xF1\x78\x3C\x1E\x8F" - "\xC7\xE3\xF1\x78\x3C\xCF\xF3\xBC\xF7\xD4\x42\x41\x41\x41\x41\x41" - "\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41" - "\x41\x41\x41\x41\xA1\x50\x28\x14\x0A\x85\x42\xA1\x50\x28\x14\x0A" - "\x85\xA2\x28\x8A\xA2\x28\x4A\x29\x7D\x74\xE1\xE1\xE1\xE1\xE1\xE1" - "\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xF1\x78\x3C" - "\x1E\x8F\xC7\xE3\xF1\x78\x9E\xE7\x79\xEF\x01\x01\x00\x00\x00\x04" - "\x00\x00\x00\x08\x00\x00\x00" - "0123456789", - 161}; -} - -TEST(Block, CCtx) { - auto cctx = createCCtx(1); - auto const compressed = compress(*cctx, kData, 1); - auto dctx = createDCtx(); - auto const decompressed = decompress(*dctx, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); -} - -TEST(Block, NoContentSize) { - auto cctx = createCCtx(1); - auto const c = compress(*cctx, kData, 1); - auto const size = ZSTD_findDecompressedSize(c.data(), c.size()); - EXPECT_EQ(ZSTD_CONTENTSIZE_UNKNOWN, size); -} - -TEST(Block, ContentSize) { - auto cctx = createCCtx(1); - auto params = ZSTD_getParams(1, 0, 0); - params.fParams.contentSizeFlag = 1; - auto const c = compress(*cctx, kData, params); - auto const size = ZSTD_findDecompressedSize(c.data(), c.size()); - EXPECT_EQ(kData.size(), size); -} - -TEST(Block, CCtxLevelIncrease) { - std::string c; - auto cctx = createCCtx(22); - auto dctx = createDCtx(); - for (int level = 1; level <= 22; ++level) { - auto compressed = compress(*cctx, kData, level); - auto const decompressed = decompress(*dctx, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); - } -} - -TEST(Block, PlainDict) { - auto cctx = createCCtx(1); - auto const compressed = compress(*cctx, kData, 1, kPlainDict); - auto dctx = createDCtx(); - EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size())); - auto const decompressed = - decompress(*dctx, compressed, kData.size(), kPlainDict); - EXPECT_EQ(kData, decompressed); -} - -TEST(Block, ZstdDict) { - auto cctx = createCCtx(1); - auto const compressed = compress(*cctx, kData, 1, kZstdDict); - auto dctx = createDCtx(); - EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size())); - auto const decompressed = - decompress(*dctx, compressed, kData.size(), kZstdDict); - EXPECT_EQ(kData, decompressed); -} - -TEST(Block, PreprocessedPlainDict) { - auto cctx = createCCtx(1); - auto const cdict = createCDict(kPlainDict, 1); - auto const compressed = compress(*cctx, kData, *cdict); - auto dctx = createDCtx(); - auto const ddict = createDDict(kPlainDict); - EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size())); - auto const decompressed = - decompress(*dctx, compressed, kData.size(), *ddict); - EXPECT_EQ(kData, decompressed); -} - -TEST(Block, PreprocessedZstdDict) { - auto cctx = createCCtx(1); - auto const cdict = createCDict(kZstdDict, 1); - auto const compressed = compress(*cctx, kData, *cdict); - auto dctx = createDCtx(); - auto const ddict = createDDict(kZstdDict); - EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size())); - auto const decompressed = - decompress(*dctx, compressed, kData.size(), *ddict); - EXPECT_EQ(kData, decompressed); -} - -TEST(Block, ReinitializeCCtx) { - auto cctx = createCCtx(1); - { - auto const compressed = compress(*cctx, kData, 1); - auto dctx = createDCtx(); - auto const decompressed = decompress(*dctx, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); - } - // Create the cctx with the same memory - auto d = cctx.get_deleter(); - auto raw = cctx.release(); - auto params = ZSTD_getParams(1, 0, 0); - cctx.reset( - ZSTD_initCCtx(d.memory, ZSTD_CCtxWorkspaceBound(params.cParams))); - // Repeat - { - auto const compressed = compress(*cctx, kData, 1); - auto dctx = createDCtx(); - auto const decompressed = decompress(*dctx, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); - } -} - -TEST(Block, ReinitializeDCtx) { - auto dctx = createDCtx(); - { - auto cctx = createCCtx(1); - auto const compressed = compress(*cctx, kData, 1); - auto const decompressed = decompress(*dctx, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); - } - // Create the cctx with the same memory - auto d = dctx.get_deleter(); - auto raw = dctx.release(); - dctx.reset(ZSTD_initDCtx(d.memory, ZSTD_DCtxWorkspaceBound())); - // Repeat - { - auto cctx = createCCtx(1); - auto const compressed = compress(*cctx, kData, 1); - auto dctx = createDCtx(); - auto const decompressed = decompress(*dctx, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); - } -} - -TEST(Stream, Basic) { - auto zcs = createCStream(1); - auto const compressed = compress(*zcs, kData); - auto zds = createDStream(); - auto const decompressed = decompress(*zds, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); -} - -TEST(Stream, PlainDict) { - auto params = ZSTD_getParams(1, kData.size(), kPlainDict.size()); - params.cParams.windowLog = 17; - auto cdict = createCDict(kPlainDict, params); - auto zcs = createCStream(params.cParams, *cdict, kData.size()); - auto const compressed = compress(*zcs, kData); - auto const contentSize = - ZSTD_findDecompressedSize(compressed.data(), compressed.size()); - EXPECT_ANY_THROW(decompress(*createDStream(), compressed, kData.size())); - auto ddict = createDDict(kPlainDict); - auto zds = createDStream(1 << 17, ddict.get()); - auto const decompressed = decompress(*zds, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); -} - -TEST(Stream, ZstdDict) { - auto params = ZSTD_getParams(1, 0, 0); - params.cParams.windowLog = 17; - auto cdict = createCDict(kZstdDict, 1); - auto zcs = createCStream(params.cParams, *cdict); - auto const compressed = compress(*zcs, kData); - EXPECT_ANY_THROW(decompress(*createDStream(), compressed, kData.size())); - auto ddict = createDDict(kZstdDict); - auto zds = createDStream(1 << 17, ddict.get()); - auto const decompressed = decompress(*zds, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); -} - -TEST(Stream, ResetCStream) { - auto zcs = createCStream(1); - auto zds = createDStream(); - { - auto const compressed = compress(*zcs, kData); - auto const decompressed = decompress(*zds, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); - } - { - ZSTD_resetCStream(zcs.get(), 0); - auto const compressed = compress(*zcs, kData); - auto const decompressed = decompress(*zds, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); - } -} - -TEST(Stream, ResetDStream) { - auto zcs = createCStream(1); - auto zds = createDStream(); - auto const compressed = compress(*zcs, kData); - EXPECT_ANY_THROW(decompress(*zds, kData, kData.size())); - EXPECT_ANY_THROW(decompress(*zds, compressed, kData.size())); - ZSTD_resetDStream(zds.get()); - auto const decompressed = decompress(*zds, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); -} - -TEST(Stream, Flush) { - auto zcs = createCStream(1); - auto zds = createDStream(); - std::string compressed; - { - compressed.resize(ZSTD_compressBound(kData.size())); - ZSTD_inBuffer in = {kData.data(), kData.size(), 0}; - ZSTD_outBuffer out = {&compressed[0], compressed.size(), 0}; - while (in.pos != in.size) { - size_t const rc = ZSTD_compressStream(zcs.get(), &out, &in); - if (ZSTD_isError(rc)) { - throw std::runtime_error{"compress stream failed"}; - } - } - EXPECT_EQ(0, out.pos); - size_t const rc = ZSTD_flushStream(zcs.get(), &out); - if (rc != 0) { - throw std::runtime_error{"compress end failed"}; - } - compressed.resize(out.pos); - EXPECT_LT(0, out.pos); - } - std::string decompressed; - { - decompressed.resize(kData.size()); - ZSTD_inBuffer in = {compressed.data(), compressed.size(), 0}; - ZSTD_outBuffer out = {&decompressed[0], decompressed.size(), 0}; - while (in.pos != in.size) { - size_t const rc = ZSTD_decompressStream(zds.get(), &out, &in); - if (ZSTD_isError(rc)) { - throw std::runtime_error{"decompress stream failed"}; - } - } - } - EXPECT_EQ(kData, decompressed); -} - -TEST(Stream, DStreamLevelIncrease) { - auto zds = createDStream(); - for (int level = 1; level <= 22; ++level) { - auto zcs = createCStream(level); - auto compressed = compress(*zcs, kData); - ZSTD_resetDStream(zds.get()); - auto const decompressed = decompress(*zds, compressed, kData.size()); - EXPECT_EQ(kData, decompressed); - } -} - -#define TEST_SYMBOL(symbol) \ - do { \ - extern void *__##symbol; \ - EXPECT_NE((void *)0, __##symbol); \ - } while (0) - -TEST(API, Symbols) { - TEST_SYMBOL(ZSTD_CCtxWorkspaceBound); - TEST_SYMBOL(ZSTD_initCCtx); - TEST_SYMBOL(ZSTD_compressCCtx); - TEST_SYMBOL(ZSTD_compress_usingDict); - TEST_SYMBOL(ZSTD_DCtxWorkspaceBound); - TEST_SYMBOL(ZSTD_initDCtx); - TEST_SYMBOL(ZSTD_decompressDCtx); - TEST_SYMBOL(ZSTD_decompress_usingDict); - - TEST_SYMBOL(ZSTD_CDictWorkspaceBound); - TEST_SYMBOL(ZSTD_initCDict); - TEST_SYMBOL(ZSTD_compress_usingCDict); - TEST_SYMBOL(ZSTD_DDictWorkspaceBound); - TEST_SYMBOL(ZSTD_initDDict); - TEST_SYMBOL(ZSTD_decompress_usingDDict); - - TEST_SYMBOL(ZSTD_CStreamWorkspaceBound); - TEST_SYMBOL(ZSTD_initCStream); - TEST_SYMBOL(ZSTD_initCStream_usingCDict); - TEST_SYMBOL(ZSTD_resetCStream); - TEST_SYMBOL(ZSTD_compressStream); - TEST_SYMBOL(ZSTD_flushStream); - TEST_SYMBOL(ZSTD_endStream); - TEST_SYMBOL(ZSTD_CStreamInSize); - TEST_SYMBOL(ZSTD_CStreamOutSize); - TEST_SYMBOL(ZSTD_DStreamWorkspaceBound); - TEST_SYMBOL(ZSTD_initDStream); - TEST_SYMBOL(ZSTD_initDStream_usingDDict); - TEST_SYMBOL(ZSTD_resetDStream); - TEST_SYMBOL(ZSTD_decompressStream); - TEST_SYMBOL(ZSTD_DStreamInSize); - TEST_SYMBOL(ZSTD_DStreamOutSize); - - TEST_SYMBOL(ZSTD_findFrameCompressedSize); - TEST_SYMBOL(ZSTD_getFrameContentSize); - TEST_SYMBOL(ZSTD_findDecompressedSize); - - TEST_SYMBOL(ZSTD_getCParams); - TEST_SYMBOL(ZSTD_getParams); - TEST_SYMBOL(ZSTD_checkCParams); - TEST_SYMBOL(ZSTD_adjustCParams); - - TEST_SYMBOL(ZSTD_isFrame); - TEST_SYMBOL(ZSTD_getDictID_fromDict); - TEST_SYMBOL(ZSTD_getDictID_fromDDict); - TEST_SYMBOL(ZSTD_getDictID_fromFrame); - - TEST_SYMBOL(ZSTD_compressBegin); - TEST_SYMBOL(ZSTD_compressBegin_usingDict); - TEST_SYMBOL(ZSTD_compressBegin_advanced); - TEST_SYMBOL(ZSTD_copyCCtx); - TEST_SYMBOL(ZSTD_compressBegin_usingCDict); - TEST_SYMBOL(ZSTD_compressContinue); - TEST_SYMBOL(ZSTD_compressEnd); - TEST_SYMBOL(ZSTD_getFrameParams); - TEST_SYMBOL(ZSTD_decompressBegin); - TEST_SYMBOL(ZSTD_decompressBegin_usingDict); - TEST_SYMBOL(ZSTD_copyDCtx); - TEST_SYMBOL(ZSTD_nextSrcSizeToDecompress); - TEST_SYMBOL(ZSTD_decompressContinue); - TEST_SYMBOL(ZSTD_nextInputType); - - TEST_SYMBOL(ZSTD_getBlockSizeMax); - TEST_SYMBOL(ZSTD_compressBlock); - TEST_SYMBOL(ZSTD_decompressBlock); - TEST_SYMBOL(ZSTD_insertBlock); -} diff --git a/contrib/linux-kernel/test/XXHashUserlandTest.cpp b/contrib/linux-kernel/test/XXHashUserlandTest.cpp deleted file mode 100644 index f50401a2e19..00000000000 --- a/contrib/linux-kernel/test/XXHashUserlandTest.cpp +++ /dev/null @@ -1,166 +0,0 @@ -extern "C" { -#include -#include -} -#include -#include -#include -#include -#include -#define XXH_STATIC_LINKING_ONLY -#include - -using namespace std; - -namespace { -const std::array kTestInputs = { - "", - "0", - "01234", - "0123456789abcde", - "0123456789abcdef", - "0123456789abcdef0", - "0123456789abcdef0123", - "0123456789abcdef0123456789abcde", - "0123456789abcdef0123456789abcdef", - "0123456789abcdef0123456789abcdef0", - "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", -}; - -bool testXXH32(const void *input, const size_t length, uint32_t seed) { - return XXH32(input, length, seed) == xxh32(input, length, seed); -} - -bool testXXH64(const void *input, const size_t length, uint32_t seed) { - return XXH64(input, length, seed) == xxh64(input, length, seed); -} - -class XXH32State { - struct xxh32_state kernelState; - XXH32_state_t state; - -public: - explicit XXH32State(const uint32_t seed) { reset(seed); } - XXH32State(XXH32State const& other) noexcept { - xxh32_copy_state(&kernelState, &other.kernelState); - XXH32_copyState(&state, &other.state); - } - XXH32State& operator=(XXH32State const& other) noexcept { - xxh32_copy_state(&kernelState, &other.kernelState); - XXH32_copyState(&state, &other.state); - return *this; - } - - void reset(const uint32_t seed) { - xxh32_reset(&kernelState, seed); - EXPECT_EQ(0, XXH32_reset(&state, seed)); - } - - void update(const void *input, const size_t length) { - EXPECT_EQ(0, xxh32_update(&kernelState, input, length)); - EXPECT_EQ(0, (int)XXH32_update(&state, input, length)); - } - - bool testDigest() const { - return xxh32_digest(&kernelState) == XXH32_digest(&state); - } -}; - -class XXH64State { - struct xxh64_state kernelState; - XXH64_state_t state; - -public: - explicit XXH64State(const uint64_t seed) { reset(seed); } - XXH64State(XXH64State const& other) noexcept { - xxh64_copy_state(&kernelState, &other.kernelState); - XXH64_copyState(&state, &other.state); - } - XXH64State& operator=(XXH64State const& other) noexcept { - xxh64_copy_state(&kernelState, &other.kernelState); - XXH64_copyState(&state, &other.state); - return *this; - } - - void reset(const uint64_t seed) { - xxh64_reset(&kernelState, seed); - EXPECT_EQ(0, XXH64_reset(&state, seed)); - } - - void update(const void *input, const size_t length) { - EXPECT_EQ(0, xxh64_update(&kernelState, input, length)); - EXPECT_EQ(0, (int)XXH64_update(&state, input, length)); - } - - bool testDigest() const { - return xxh64_digest(&kernelState) == XXH64_digest(&state); - } -}; -} - -TEST(Simple, Null) { - EXPECT_TRUE(testXXH32(NULL, 0, 0)); - EXPECT_TRUE(testXXH64(NULL, 0, 0)); -} - -TEST(Stream, Null) { - struct xxh32_state state32; - xxh32_reset(&state32, 0); - EXPECT_EQ(-EINVAL, xxh32_update(&state32, NULL, 0)); - - struct xxh64_state state64; - xxh64_reset(&state64, 0); - EXPECT_EQ(-EINVAL, xxh64_update(&state64, NULL, 0)); -} - -TEST(Simple, TestInputs) { - for (uint32_t seed = 0; seed < 100000; seed = (seed + 1) * 3) { - for (auto const input : kTestInputs) { - EXPECT_TRUE(testXXH32(input.data(), input.size(), seed)); - EXPECT_TRUE(testXXH64(input.data(), input.size(), (uint64_t)seed)); - } - } -} - -TEST(Stream, TestInputs) { - for (uint32_t seed = 0; seed < 100000; seed = (seed + 1) * 3) { - for (auto const input : kTestInputs) { - XXH32State s32(seed); - XXH64State s64(seed); - s32.update(input.data(), input.size()); - s64.update(input.data(), input.size()); - EXPECT_TRUE(s32.testDigest()); - EXPECT_TRUE(s64.testDigest()); - } - } -} - -TEST(Stream, MultipleTestInputs) { - for (uint32_t seed = 0; seed < 100000; seed = (seed + 1) * 3) { - XXH32State s32(seed); - XXH64State s64(seed); - for (auto const input : kTestInputs) { - s32.update(input.data(), input.size()); - s64.update(input.data(), input.size()); - } - EXPECT_TRUE(s32.testDigest()); - EXPECT_TRUE(s64.testDigest()); - } -} - -TEST(Stream, CopyState) { - for (uint32_t seed = 0; seed < 100000; seed = (seed + 1) * 3) { - XXH32State s32(seed); - XXH64State s64(seed); - for (auto const input : kTestInputs) { - auto t32(s32); - t32.update(input.data(), input.size()); - s32 = t32; - auto t64(s64); - t64.update(input.data(), input.size()); - s64 = t64; - } - EXPECT_TRUE(s32.testDigest()); - EXPECT_TRUE(s64.testDigest()); - } -} diff --git a/contrib/linux-kernel/test/include/linux/compiler.h b/contrib/linux-kernel/test/include/linux/compiler.h index 4fb4f42e29c..988ce4a20ff 100644 --- a/contrib/linux-kernel/test/include/linux/compiler.h +++ b/contrib/linux-kernel/test/include/linux/compiler.h @@ -1,12 +1,23 @@ -#ifndef LINUX_COMPILER_H_ -#define LINUX_COMPILER_H_ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#ifndef LINUX_COMPILER_H +#define LINUX_COMPILER_H -#ifndef __always_inline -# define __always_inline inline +#ifndef inline +#define inline __inline __attribute__((unused)) #endif #ifndef noinline -# define noinline __attribute__((__noinline__)) +#define noinline __attribute__((noinline)) #endif -#endif // LINUX_COMPILER_H_ +#define fallthrough __attribute__((__fallthrough__)) + +#endif diff --git a/contrib/linux-kernel/test/include/linux/errno.h b/contrib/linux-kernel/test/include/linux/errno.h index b9db0852443..b4bdcba0ea6 100644 --- a/contrib/linux-kernel/test/include/linux/errno.h +++ b/contrib/linux-kernel/test/include/linux/errno.h @@ -1,6 +1,15 @@ -#ifndef LINUX_ERRNO_H_ -#define LINUX_ERRNO_H_ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#ifndef LINUX_ERRNO_H +#define LINUX_ERRNO_H #define EINVAL 22 -#endif // LINUX_ERRNO_H_ +#endif diff --git a/contrib/linux-kernel/test/include/linux/kernel.h b/contrib/linux-kernel/test/include/linux/kernel.h index 3ef2f7fe81d..a4d791cd1d5 100644 --- a/contrib/linux-kernel/test/include/linux/kernel.h +++ b/contrib/linux-kernel/test/include/linux/kernel.h @@ -1,16 +1,19 @@ -#ifndef LINUX_KERNEL_H_ -#define LINUX_KERNEL_H_ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#ifndef LINUX_KERNEL_H +#define LINUX_KERNEL_H -#define ALIGN(x, a) ({ \ - typeof(x) const __xe = (x); \ - typeof(a) const __ae = (a); \ - typeof(a) const __m = __ae - 1; \ - typeof(x) const __r = __xe & __m; \ - __xe + (__r ? (__ae - __r) : 0); \ - }) +#define WARN_ON(x) #define PTR_ALIGN(p, a) (typeof(p))ALIGN((unsigned long long)(p), (a)) +#define ALIGN(x, a) ALIGN_MASK((x), (a) - 1) +#define ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) -#define current Something that doesn't compile :) - -#endif // LINUX_KERNEL_H_ +#endif diff --git a/contrib/linux-kernel/test/include/linux/limits.h b/contrib/linux-kernel/test/include/linux/limits.h new file mode 100644 index 00000000000..574aa7b343f --- /dev/null +++ b/contrib/linux-kernel/test/include/linux/limits.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#ifndef LINUX_LIMITS_H +#define LINUX_LIMITS_H + +#include + +#endif diff --git a/contrib/linux-kernel/test/include/linux/math64.h b/contrib/linux-kernel/test/include/linux/math64.h index 3d0ae72d583..7f6713e73bc 100644 --- a/contrib/linux-kernel/test/include/linux/math64.h +++ b/contrib/linux-kernel/test/include/linux/math64.h @@ -1,11 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ #ifndef LINUX_MATH64_H #define LINUX_MATH64_H -#include - -static uint64_t div_u64(uint64_t n, uint32_t d) -{ - return n / d; -} +#define div_u64(dividend, divisor) ((dividend) / (divisor)) #endif diff --git a/contrib/linux-kernel/test/include/linux/module.h b/contrib/linux-kernel/test/include/linux/module.h index ef514c3494a..06ef56f9ebe 100644 --- a/contrib/linux-kernel/test/include/linux/module.h +++ b/contrib/linux-kernel/test/include/linux/module.h @@ -1,10 +1,20 @@ -#ifndef LINUX_MODULE_H_ -#define LINUX_MODULE_H_ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#ifndef LINUX_MODULE_H +#define LINUX_MODULE_H #define EXPORT_SYMBOL(symbol) \ void* __##symbol = symbol -#define MODULE_LICENSE(license) static char const *const LICENSE = license -#define MODULE_DESCRIPTION(description) \ - static char const *const DESCRIPTION = description +#define EXPORT_SYMBOL_GPL(symbol) \ + void* __##symbol = symbol +#define MODULE_LICENSE(license) +#define MODULE_DESCRIPTION(description) -#endif // LINUX_MODULE_H_ +#endif diff --git a/contrib/linux-kernel/test/include/linux/printk.h b/contrib/linux-kernel/test/include/linux/printk.h new file mode 100644 index 00000000000..92a25278e71 --- /dev/null +++ b/contrib/linux-kernel/test/include/linux/printk.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#ifndef LINUX_PRINTK_H +#define LINUX_PRINTK_H + +#define pr_debug(...) + +#endif diff --git a/contrib/linux-kernel/test/include/linux/stddef.h b/contrib/linux-kernel/test/include/linux/stddef.h new file mode 100644 index 00000000000..15c7408fcdf --- /dev/null +++ b/contrib/linux-kernel/test/include/linux/stddef.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#ifndef LINUX_STDDEF_H +#define LINUX_STDDEF_H + +#include + +#endif diff --git a/contrib/linux-kernel/test/include/linux/string.h b/contrib/linux-kernel/test/include/linux/string.h deleted file mode 100644 index 3b2f5900276..00000000000 --- a/contrib/linux-kernel/test/include/linux/string.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/contrib/linux-kernel/test/include/linux/swab.h b/contrib/linux-kernel/test/include/linux/swab.h new file mode 100644 index 00000000000..2b48b434c97 --- /dev/null +++ b/contrib/linux-kernel/test/include/linux/swab.h @@ -0,0 +1,16 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#ifndef LINUX_SWAB_H +#define LINUX_SWAB_H + +#define swab32(x) __builtin_bswap32((x)) +#define swab64(x) __builtin_bswap64((x)) + +#endif diff --git a/contrib/linux-kernel/test/include/linux/types.h b/contrib/linux-kernel/test/include/linux/types.h index c2d4f4b7252..b413db6f983 100644 --- a/contrib/linux-kernel/test/include/linux/types.h +++ b/contrib/linux-kernel/test/include/linux/types.h @@ -1,2 +1,16 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#ifndef LINUX_TYPES_H +#define LINUX_TYPES_H + #include #include + +#endif diff --git a/contrib/linux-kernel/test/include/asm/unaligned.h b/contrib/linux-kernel/test/include/linux/unaligned.h similarity index 88% rename from contrib/linux-kernel/test/include/asm/unaligned.h rename to contrib/linux-kernel/test/include/linux/unaligned.h index 4f482812634..86ec4ca3899 100644 --- a/contrib/linux-kernel/test/include/asm/unaligned.h +++ b/contrib/linux-kernel/test/include/linux/unaligned.h @@ -2,16 +2,26 @@ #define ASM_UNALIGNED_H #include -#include #include -#define _LITTLE_ENDIAN 1 +#ifndef __LITTLE_ENDIAN +# if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN__) +# define __LITTLE_ENDIAN 1 +# endif +#endif + +#ifdef __LITTLE_ENDIAN +# define _IS_LITTLE_ENDIAN 1 +#else +# define _IS_LITTLE_ENDIAN 0 +#endif static unsigned _isLittleEndian(void) { const union { uint32_t u; uint8_t c[4]; } one = { 1 }; - assert(_LITTLE_ENDIAN == one.c[0]); - return _LITTLE_ENDIAN; + assert(_IS_LITTLE_ENDIAN == one.c[0]); + (void)one; + return _IS_LITTLE_ENDIAN; } static uint16_t _swap16(uint16_t in) @@ -33,7 +43,7 @@ static uint64_t _swap64(uint64_t in) static uint16_t get_unaligned_le16(const void* memPtr) { uint16_t val; - memcpy(&val, memPtr, sizeof(val)); + __builtin_memcpy(&val, memPtr, sizeof(val)); if (!_isLittleEndian()) _swap16(val); return val; } @@ -41,7 +51,7 @@ static uint16_t get_unaligned_le16(const void* memPtr) static uint32_t get_unaligned_le32(const void* memPtr) { uint32_t val; - memcpy(&val, memPtr, sizeof(val)); + __builtin_memcpy(&val, memPtr, sizeof(val)); if (!_isLittleEndian()) _swap32(val); return val; } @@ -49,7 +59,7 @@ static uint32_t get_unaligned_le32(const void* memPtr) static uint64_t get_unaligned_le64(const void* memPtr) { uint64_t val; - memcpy(&val, memPtr, sizeof(val)); + __builtin_memcpy(&val, memPtr, sizeof(val)); if (!_isLittleEndian()) _swap64(val); return val; } @@ -57,26 +67,26 @@ static uint64_t get_unaligned_le64(const void* memPtr) static void put_unaligned_le16(uint16_t value, void* memPtr) { if (!_isLittleEndian()) value = _swap16(value); - memcpy(memPtr, &value, sizeof(value)); + __builtin_memcpy(memPtr, &value, sizeof(value)); } static void put_unaligned_le32(uint32_t value, void* memPtr) { if (!_isLittleEndian()) value = _swap32(value); - memcpy(memPtr, &value, sizeof(value)); + __builtin_memcpy(memPtr, &value, sizeof(value)); } static void put_unaligned_le64(uint64_t value, void* memPtr) { if (!_isLittleEndian()) value = _swap64(value); - memcpy(memPtr, &value, sizeof(value)); + __builtin_memcpy(memPtr, &value, sizeof(value)); } /* big endian */ static uint32_t get_unaligned_be32(const void* memPtr) { uint32_t val; - memcpy(&val, memPtr, sizeof(val)); + __builtin_memcpy(&val, memPtr, sizeof(val)); if (_isLittleEndian()) _swap32(val); return val; } @@ -84,7 +94,7 @@ static uint32_t get_unaligned_be32(const void* memPtr) static uint64_t get_unaligned_be64(const void* memPtr) { uint64_t val; - memcpy(&val, memPtr, sizeof(val)); + __builtin_memcpy(&val, memPtr, sizeof(val)); if (_isLittleEndian()) _swap64(val); return val; } @@ -92,13 +102,13 @@ static uint64_t get_unaligned_be64(const void* memPtr) static void put_unaligned_be32(uint32_t value, void* memPtr) { if (_isLittleEndian()) value = _swap32(value); - memcpy(memPtr, &value, sizeof(value)); + __builtin_memcpy(memPtr, &value, sizeof(value)); } static void put_unaligned_be64(uint64_t value, void* memPtr) { if (_isLittleEndian()) value = _swap64(value); - memcpy(memPtr, &value, sizeof(value)); + __builtin_memcpy(memPtr, &value, sizeof(value)); } /* generic */ @@ -166,7 +176,7 @@ extern void __bad_unaligned_access_size(void); (void)0; \ }) -#if _LITTLE_ENDIAN +#if _IS_LITTLE_ENDIAN # define get_unaligned __get_unaligned_le # define put_unaligned __put_unaligned_le #else diff --git a/contrib/linux-kernel/lib/xxhash.c b/contrib/linux-kernel/test/include/linux/xxhash.h similarity index 52% rename from contrib/linux-kernel/lib/xxhash.c rename to contrib/linux-kernel/test/include/linux/xxhash.h index aa61e2a3802..f993eb9eed1 100644 --- a/contrib/linux-kernel/lib/xxhash.c +++ b/contrib/linux-kernel/test/include/linux/xxhash.h @@ -2,7 +2,7 @@ * xxHash - Extremely Fast Hash algorithm * Copyright (C) 2012-2016, Yann Collet. * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are @@ -34,16 +34,272 @@ * ("BSD"). * * You can contact the author at: - * - xxHash homepage: http://cyan4973.github.io/xxHash/ + * - xxHash homepage: https://cyan4973.github.io/xxHash/ * - xxHash source repository: https://github.com/Cyan4973/xxHash */ -#include +/* + * Notice extracted from xxHash homepage: + * + * xxHash is an extremely fast Hash algorithm, running at RAM speed limits. + * It also successfully passes all tests from the SMHasher suite. + * + * Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 + * Duo @3GHz) + * + * Name Speed Q.Score Author + * xxHash 5.4 GB/s 10 + * CrapWow 3.2 GB/s 2 Andrew + * MumurHash 3a 2.7 GB/s 10 Austin Appleby + * SpookyHash 2.0 GB/s 10 Bob Jenkins + * SBox 1.4 GB/s 9 Bret Mulvey + * Lookup3 1.2 GB/s 9 Bob Jenkins + * SuperFastHash 1.2 GB/s 1 Paul Hsieh + * CityHash64 1.05 GB/s 10 Pike & Alakuijala + * FNV 0.55 GB/s 5 Fowler, Noll, Vo + * CRC32 0.43 GB/s 9 + * MD5-32 0.33 GB/s 10 Ronald L. Rivest + * SHA1-32 0.28 GB/s 10 + * + * Q.Score is a measure of quality of the hash function. + * It depends on successfully passing SMHasher test set. + * 10 is a perfect score. + * + * A 64-bits version, named xxh64 offers much better speed, + * but for 64-bits applications only. + * Name Speed on 64 bits Speed on 32 bits + * xxh64 13.8 GB/s 1.9 GB/s + * xxh32 6.8 GB/s 6.0 GB/s + */ + +#ifndef XXHASH_H +#define XXHASH_H + +#include + +#define XXH_API static inline __attribute__((unused)) +/*-**************************** + * Simple Hash Functions + *****************************/ + +/** + * xxh32() - calculate the 32-bit hash of the input with a given seed. + * + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s + * + * Return: The 32-bit hash of the data. + */ +XXH_API uint32_t xxh32(const void *input, size_t length, uint32_t seed); + +/** + * xxh64() - calculate the 64-bit hash of the input with a given seed. + * + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * This function runs 2x faster on 64-bit systems, but slower on 32-bit systems. + * + * Return: The 64-bit hash of the data. + */ +XXH_API uint64_t xxh64(const void *input, size_t length, uint64_t seed); + +/** + * xxhash() - calculate wordsize hash of the input with a given seed + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * If the hash does not need to be comparable between machines with + * different word sizes, this function will call whichever of xxh32() + * or xxh64() is faster. + * + * Return: wordsize hash of the data. + */ + +static inline unsigned long xxhash(const void *input, size_t length, + uint64_t seed) +{ + if (sizeof(size_t) == 8) + return xxh64(input, length, seed); + else + return xxh32(input, length, seed); +} + +/*-**************************** + * Streaming Hash Functions + *****************************/ + +/* + * These definitions are only meant to allow allocation of XXH state + * statically, on stack, or in a struct for example. + * Do not use members directly. + */ + +/** + * struct xxh32_state - private xxh32 state, do not use members directly + */ +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +/** + * struct xxh32_state - private xxh64 state, do not use members directly + */ +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; +}; + +/** + * xxh32_reset() - reset the xxh32 state to start a new hashing operation + * + * @state: The xxh32 state to reset. + * @seed: Initialize the hash state with this seed. + * + * Call this function on any xxh32_state to prepare for a new hashing operation. + */ +XXH_API void xxh32_reset(struct xxh32_state *state, uint32_t seed); + +/** + * xxh32_update() - hash the data given and update the xxh32 state + * + * @state: The xxh32 state to update. + * @input: The data to hash. + * @length: The length of the data to hash. + * + * After calling xxh32_reset() call xxh32_update() as many times as necessary. + * + * Return: Zero on success, otherwise an error code. + */ +XXH_API int xxh32_update(struct xxh32_state *state, const void *input, size_t length); + +/** + * xxh32_digest() - produce the current xxh32 hash + * + * @state: Produce the current xxh32 hash of this state. + * + * A hash value can be produced at any time. It is still possible to continue + * inserting input into the hash state after a call to xxh32_digest(), and + * generate new hashes later on, by calling xxh32_digest() again. + * + * Return: The xxh32 hash stored in the state. + */ +XXH_API uint32_t xxh32_digest(const struct xxh32_state *state); + +/** + * xxh64_reset() - reset the xxh64 state to start a new hashing operation + * + * @state: The xxh64 state to reset. + * @seed: Initialize the hash state with this seed. + */ +XXH_API void xxh64_reset(struct xxh64_state *state, uint64_t seed); + +/** + * xxh64_update() - hash the data given and update the xxh64 state + * @state: The xxh64 state to update. + * @input: The data to hash. + * @length: The length of the data to hash. + * + * After calling xxh64_reset() call xxh64_update() as many times as necessary. + * + * Return: Zero on success, otherwise an error code. + */ +XXH_API int xxh64_update(struct xxh64_state *state, const void *input, size_t length); + +/** + * xxh64_digest() - produce the current xxh64 hash + * + * @state: Produce the current xxh64 hash of this state. + * + * A hash value can be produced at any time. It is still possible to continue + * inserting input into the hash state after a call to xxh64_digest(), and + * generate new hashes later on, by calling xxh64_digest() again. + * + * Return: The xxh64 hash stored in the state. + */ +XXH_API uint64_t xxh64_digest(const struct xxh64_state *state); + +/*-************************** + * Utils + ***************************/ + +/** + * xxh32_copy_state() - copy the source state into the destination state + * + * @src: The source xxh32 state. + * @dst: The destination xxh32 state. + */ +XXH_API void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src); + +/** + * xxh64_copy_state() - copy the source state into the destination state + * + * @src: The source xxh64 state. + * @dst: The destination xxh64 state. + */ +XXH_API void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src); + +/* + * xxHash - Extremely Fast Hash algorithm + * Copyright (C) 2012-2016, Yann Collet. + * + * BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. This program is dual-licensed; you may select + * either version 2 of the GNU General Public License ("GPL") or BSD license + * ("BSD"). + * + * You can contact the author at: + * - xxHash homepage: https://cyan4973.github.io/xxHash/ + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +#include #include -#include #include #include -#include #include /*-************************************* @@ -76,17 +332,15 @@ static const uint64_t PRIME64_5 = 2870177450012600261ULL; /*-************************** * Utils ***************************/ -void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src) +XXH_API void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src) { - memcpy(dst, src, sizeof(*dst)); + __builtin_memcpy(dst, src, sizeof(*dst)); } -EXPORT_SYMBOL(xxh32_copy_state); -void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src) +XXH_API void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src) { - memcpy(dst, src, sizeof(*dst)); + __builtin_memcpy(dst, src, sizeof(*dst)); } -EXPORT_SYMBOL(xxh64_copy_state); /*-*************************** * Simple Hash Functions @@ -99,7 +353,7 @@ static uint32_t xxh32_round(uint32_t seed, const uint32_t input) return seed; } -uint32_t xxh32(const void *input, const size_t len, const uint32_t seed) +XXH_API uint32_t xxh32(const void *input, const size_t len, const uint32_t seed) { const uint8_t *p = (const uint8_t *)input; const uint8_t *b_end = p + len; @@ -151,7 +405,6 @@ uint32_t xxh32(const void *input, const size_t len, const uint32_t seed) return h32; } -EXPORT_SYMBOL(xxh32); static uint64_t xxh64_round(uint64_t acc, const uint64_t input) { @@ -169,7 +422,7 @@ static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val) return acc; } -uint64_t xxh64(const void *input, const size_t len, const uint64_t seed) +XXH_API uint64_t xxh64(const void *input, const size_t len, const uint64_t seed) { const uint8_t *p = (const uint8_t *)input; const uint8_t *const b_end = p + len; @@ -234,40 +487,37 @@ uint64_t xxh64(const void *input, const size_t len, const uint64_t seed) return h64; } -EXPORT_SYMBOL(xxh64); /*-************************************************** * Advanced Hash Functions ***************************************************/ -void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed) +XXH_API void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed) { /* use a local state for memcpy() to avoid strict-aliasing warnings */ struct xxh32_state state; - memset(&state, 0, sizeof(state)); + __builtin_memset(&state, 0, sizeof(state)); state.v1 = seed + PRIME32_1 + PRIME32_2; state.v2 = seed + PRIME32_2; state.v3 = seed + 0; state.v4 = seed - PRIME32_1; - memcpy(statePtr, &state, sizeof(state)); + __builtin_memcpy(statePtr, &state, sizeof(state)); } -EXPORT_SYMBOL(xxh32_reset); -void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed) +XXH_API void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed) { /* use a local state for memcpy() to avoid strict-aliasing warnings */ struct xxh64_state state; - memset(&state, 0, sizeof(state)); + __builtin_memset(&state, 0, sizeof(state)); state.v1 = seed + PRIME64_1 + PRIME64_2; state.v2 = seed + PRIME64_2; state.v3 = seed + 0; state.v4 = seed - PRIME64_1; - memcpy(statePtr, &state, sizeof(state)); + __builtin_memcpy(statePtr, &state, sizeof(state)); } -EXPORT_SYMBOL(xxh64_reset); -int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) +XXH_API int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) { const uint8_t *p = (const uint8_t *)input; const uint8_t *const b_end = p + len; @@ -279,7 +529,7 @@ int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) state->large_len |= (len >= 16) | (state->total_len_32 >= 16); if (state->memsize + len < 16) { /* fill in tmp buffer */ - memcpy((uint8_t *)(state->mem32) + state->memsize, input, len); + __builtin_memcpy((uint8_t *)(state->mem32) + state->memsize, input, len); state->memsize += (uint32_t)len; return 0; } @@ -287,7 +537,7 @@ int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) if (state->memsize) { /* some data left from previous update */ const uint32_t *p32 = state->mem32; - memcpy((uint8_t *)(state->mem32) + state->memsize, input, + __builtin_memcpy((uint8_t *)(state->mem32) + state->memsize, input, 16 - state->memsize); state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32)); @@ -328,15 +578,14 @@ int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) } if (p < b_end) { - memcpy(state->mem32, p, (size_t)(b_end-p)); + __builtin_memcpy(state->mem32, p, (size_t)(b_end-p)); state->memsize = (uint32_t)(b_end-p); } return 0; } -EXPORT_SYMBOL(xxh32_update); -uint32_t xxh32_digest(const struct xxh32_state *state) +XXH_API uint32_t xxh32_digest(const struct xxh32_state *state) { const uint8_t *p = (const uint8_t *)state->mem32; const uint8_t *const b_end = (const uint8_t *)(state->mem32) + @@ -372,9 +621,8 @@ uint32_t xxh32_digest(const struct xxh32_state *state) return h32; } -EXPORT_SYMBOL(xxh32_digest); -int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) +XXH_API int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) { const uint8_t *p = (const uint8_t *)input; const uint8_t *const b_end = p + len; @@ -385,7 +633,7 @@ int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) state->total_len += len; if (state->memsize + len < 32) { /* fill in tmp buffer */ - memcpy(((uint8_t *)state->mem64) + state->memsize, input, len); + __builtin_memcpy(((uint8_t *)state->mem64) + state->memsize, input, len); state->memsize += (uint32_t)len; return 0; } @@ -393,7 +641,7 @@ int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) if (state->memsize) { /* tmp buffer is full */ uint64_t *p64 = state->mem64; - memcpy(((uint8_t *)p64) + state->memsize, input, + __builtin_memcpy(((uint8_t *)p64) + state->memsize, input, 32 - state->memsize); state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64)); @@ -433,15 +681,14 @@ int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) } if (p < b_end) { - memcpy(state->mem64, p, (size_t)(b_end-p)); + __builtin_memcpy(state->mem64, p, (size_t)(b_end-p)); state->memsize = (uint32_t)(b_end - p); } return 0; } -EXPORT_SYMBOL(xxh64_update); -uint64_t xxh64_digest(const struct xxh64_state *state) +XXH_API uint64_t xxh64_digest(const struct xxh64_state *state) { const uint8_t *p = (const uint8_t *)state->mem64; const uint8_t *const b_end = (const uint8_t *)state->mem64 + @@ -494,7 +741,5 @@ uint64_t xxh64_digest(const struct xxh64_state *state) return h64; } -EXPORT_SYMBOL(xxh64_digest); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("xxHash"); +#endif /* XXHASH_H */ diff --git a/contrib/linux-kernel/test/macro-test.sh b/contrib/linux-kernel/test/macro-test.sh new file mode 100755 index 00000000000..9ea84aa66b1 --- /dev/null +++ b/contrib/linux-kernel/test/macro-test.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env sh + +set -e + +SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd) +INCLUDE_DIR="$SCRIPT_DIR/../linux/include" +LIB_DIR="$SCRIPT_DIR/../linux/lib" + + +print() { + printf '%b' "${*}" +} + +println() { + printf '%b\n' "${*}" +} + +die() { + println "$@" 1>&2 + exit 1 +} + +test_not_present() { + print "Testing that '$1' is not present... " + grep -r $1 "$INCLUDE_DIR" "$LIB_DIR" && die "Fail!" + println "Okay" +} + +println "This test checks that the macro removal process worked as expected" +println "If this test fails, then freestanding.py wasn't able to remove one of these" +println "macros from the source code completely. You'll either need to rewrite the check" +println "or improve freestanding.py." +println "" + +test_not_present "ZSTD_NO_INTRINSICS" +test_not_present "ZSTD_NO_UNUSED_FUNCTIONS" +test_not_present "ZSTD_LEGACY_SUPPORT" +test_not_present "STATIC_BMI2" +test_not_present "ZSTD_DLL_EXPORT" +test_not_present "ZSTD_DLL_IMPORT" +test_not_present "__ICCARM__" +test_not_present "_MSC_VER" +test_not_present "_WIN32" +test_not_present "__linux__" diff --git a/contrib/linux-kernel/test/static_test.c b/contrib/linux-kernel/test/static_test.c new file mode 100644 index 00000000000..ba4a420d417 --- /dev/null +++ b/contrib/linux-kernel/test/static_test.c @@ -0,0 +1,52 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#include +#include +#include +#include + +#include "decompress_sources.h" +#include + +#define CONTROL(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "%s:%u: %s failed!\n", __FUNCTION__, __LINE__, #x); \ + abort(); \ + } \ + } while (0) + + +static const char kEmptyZstdFrame[] = { + 0x28, 0xb5, 0x2f, 0xfd, 0x24, 0x00, 0x01, 0x00, 0x00, 0x99, 0xe9, 0xd8, 0x51 +}; + +static void test_decompress_unzstd(void) { + fprintf(stderr, "Testing decompress unzstd... "); + { + size_t const wkspSize = zstd_dctx_workspace_bound(); + void* wksp = malloc(wkspSize); + ZSTD_DCtx* dctx = zstd_init_dctx(wksp, wkspSize); + CONTROL(wksp != NULL); + CONTROL(dctx != NULL); + { + size_t const dSize = zstd_decompress_dctx(dctx, NULL, 0, kEmptyZstdFrame, sizeof(kEmptyZstdFrame)); + CONTROL(!zstd_is_error(dSize)); + CONTROL(dSize == 0); + } + free(wksp); + } + fprintf(stderr, "Ok\n"); +} + +int main(void) { + test_decompress_unzstd(); + return 0; +} diff --git a/contrib/linux-kernel/test/test.c b/contrib/linux-kernel/test/test.c new file mode 100644 index 00000000000..0f4ba3f45ef --- /dev/null +++ b/contrib/linux-kernel/test/test.c @@ -0,0 +1,229 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#include +#include +#include +#include + +#include + +#define CONTROL(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "%s:%u: %s failed!\n", __FUNCTION__, __LINE__, #x); \ + abort(); \ + } \ + } while (0) + +typedef struct { + char *data; + char *data2; + size_t dataSize; + char *comp; + size_t compSize; +} test_data_t; + +static test_data_t create_test_data(void) { + test_data_t data; + data.dataSize = 128 * 1024; + data.data = (char*)malloc(data.dataSize); + CONTROL(data.data != NULL); + data.data2 = (char*)malloc(data.dataSize); + CONTROL(data.data2 != NULL); + data.compSize = zstd_compress_bound(data.dataSize); + data.comp = (char*)malloc(data.compSize); + CONTROL(data.comp != NULL); + memset(data.data, 0, data.dataSize); + return data; +} + +static void free_test_data(test_data_t const *data) { + free(data->data); + free(data->data2); + free(data->comp); +} + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +static void test_btrfs(test_data_t const *data) { + size_t const size = MIN(data->dataSize, 128 * 1024); + fprintf(stderr, "testing btrfs use cases... "); + for (int level = -1; level < 16; ++level) { + zstd_parameters params = zstd_get_params(level, size); + size_t const workspaceSize = + MAX(zstd_cstream_workspace_bound(¶ms.cParams), + zstd_dstream_workspace_bound(size)); + void *workspace = malloc(workspaceSize); + + char const *ip = data->data; + char const *iend = ip + size; + char *op = data->comp; + char *oend = op + data->compSize; + + CONTROL(params.cParams.windowLog <= 17); + CONTROL(workspace != NULL); + { + zstd_cstream *cctx = zstd_init_cstream(¶ms, size, workspace, workspaceSize); + zstd_out_buffer out = {NULL, 0, 0}; + zstd_in_buffer in = {NULL, 0, 0}; + CONTROL(cctx != NULL); + for (;;) { + if (in.pos == in.size) { + in.src = ip; + in.size = MIN(4096, iend - ip); + in.pos = 0; + ip += in.size; + } + + if (out.pos == out.size) { + out.dst = op; + out.size = MIN(4096, oend - op); + out.pos = 0; + op += out.size; + } + + if (ip != iend || in.pos < in.size) { + CONTROL(!zstd_is_error(zstd_compress_stream(cctx, &out, &in))); + } else { + size_t const ret = zstd_end_stream(cctx, &out); + CONTROL(!zstd_is_error(ret)); + if (ret == 0) { + break; + } + } + } + op += out.pos; + } + + ip = data->comp; + iend = op; + op = data->data2; + oend = op + size; + { + zstd_dstream *dctx = zstd_init_dstream(1ULL << params.cParams.windowLog, workspace, workspaceSize); + zstd_out_buffer out = {NULL, 0, 0}; + zstd_in_buffer in = {NULL, 0, 0}; + CONTROL(dctx != NULL); + for (;;) { + if (in.pos == in.size) { + in.src = ip; + in.size = MIN(4096, iend - ip); + in.pos = 0; + ip += in.size; + } + + if (out.pos == out.size) { + out.dst = op; + out.size = MIN(4096, oend - op); + out.pos = 0; + op += out.size; + } + { + size_t const ret = zstd_decompress_stream(dctx, &out, &in); + CONTROL(!zstd_is_error(ret)); + if (ret == 0) { + break; + } + } + } + } + CONTROL((size_t)(op - data->data2) == data->dataSize); + CONTROL(!memcmp(data->data, data->data2, data->dataSize)); + free(workspace); + } + fprintf(stderr, "Ok\n"); +} + +static void test_decompress_unzstd(test_data_t const *data) { + size_t cSize; + fprintf(stderr, "Testing decompress unzstd... "); + { + zstd_parameters params = zstd_get_params(19, 0); + size_t const wkspSize = zstd_cctx_workspace_bound(¶ms.cParams); + void* wksp = malloc(wkspSize); + zstd_cctx* cctx = zstd_init_cctx(wksp, wkspSize); + CONTROL(wksp != NULL); + CONTROL(cctx != NULL); + cSize = zstd_compress_cctx(cctx, data->comp, data->compSize, data->data, data->dataSize, ¶ms); + CONTROL(!zstd_is_error(cSize)); + free(wksp); + } + { + size_t const wkspSize = zstd_dctx_workspace_bound(); + void* wksp = malloc(wkspSize); + zstd_dctx* dctx = zstd_init_dctx(wksp, wkspSize); + CONTROL(wksp != NULL); + CONTROL(dctx != NULL); + { + size_t const dSize = zstd_decompress_dctx(dctx, data->data2, data->dataSize, data->comp, cSize); + CONTROL(!zstd_is_error(dSize)); + CONTROL(dSize == data->dataSize); + } + CONTROL(!memcmp(data->data, data->data2, data->dataSize)); + free(wksp); + } + fprintf(stderr, "Ok\n"); +} + +static void test_f2fs(void) { + fprintf(stderr, "testing f2fs uses... "); + CONTROL(zstd_min_clevel() < 0); + CONTROL(zstd_max_clevel() == 22); + fprintf(stderr, "Ok\n"); +} + +static char *g_stack = NULL; + +static void __attribute__((noinline)) use(void *x) { + asm volatile("" : "+r"(x)); +} + +static void __attribute__((noinline)) fill_stack(void) { + memset(g_stack, 0x33, 8192); +} + +static void __attribute__((noinline)) set_stack(void) { + + char stack[8192]; + g_stack = stack; + use(g_stack); +} + +static void __attribute__((noinline)) check_stack(void) { + size_t cleanStack = 0; + while (cleanStack < 8192 && g_stack[cleanStack] == 0x33) { + ++cleanStack; + } + { + size_t const stackSize = 8192 - cleanStack; + fprintf(stderr, "Maximum stack size: %zu\n", stackSize); + CONTROL(stackSize <= 2048 + 512); + } +} + +static void test_stack_usage(test_data_t const *data) { + set_stack(); + fill_stack(); + test_f2fs(); + test_btrfs(data); + test_decompress_unzstd(data); + check_stack(); +} + +int main(void) { + test_data_t data = create_test_data(); + test_f2fs(); + test_btrfs(&data); + test_decompress_unzstd(&data); + test_stack_usage(&data); + free_test_data(&data); + return 0; +} diff --git a/contrib/linux-kernel/xxhash_test.c b/contrib/linux-kernel/xxhash_test.c deleted file mode 100644 index eb0fb1cd79b..00000000000 --- a/contrib/linux-kernel/xxhash_test.c +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - */ - -/* DO_XXH should be 32 or 64 for xxh32 and xxh64 respectively */ -#define DO_XXH 0 -/* DO_CRC should be 0 or 1 */ -#define DO_CRC 0 -/* Buffer size */ -#define BUFFER_SIZE 4096 - -#include -#include -#include -#include - -#if DO_XXH -#include -#endif - -#if DO_CRC -#include -#endif - -/* Device name to pass to register_chrdev(). */ -#define DEVICE_NAME "xxhash_test" - -/* Dynamically allocated device major number */ -static int device_major; - -/* - * We reuse the same hash state, and thus can hash only one - * file at a time. - */ -static bool device_is_open; - -static uint64_t total_length; - - -#if (DO_XXH == 32) - -#define xxh_state xxh32_state -#define xxh_reset xxh32_reset -#define xxh_update xxh32_update -#define xxh_digest xxh32_digest -#define XXH_FORMAT "XXH32 = 0x%x" - -#elif (DO_XXH == 64) - -#define xxh_state xxh64_state -#define xxh_reset xxh64_reset -#define xxh_update xxh64_update -#define xxh_digest xxh64_digest -#define XXH_FORMAT "XXH64 = 0x%llx" - -#elif DO_XXH - -#error "Invalid value of DO_XXH" - -#endif - -#if DO_XXH - -/* XXH state */ -static struct xxh_state state; - -#endif /* DO_XXH */ - -#if DO_CRC - -static uint32_t crc; - -#endif /* DO_CRC */ - -/* - * Input buffer used to put data coming from userspace. - */ -static uint8_t buffer_in[BUFFER_SIZE]; - -static int xxhash_test_open(struct inode *i, struct file *f) -{ - if (device_is_open) - return -EBUSY; - - device_is_open = true; - - total_length = 0; -#if DO_XXH - xxh_reset(&state, 0); -#endif -#if DO_CRC - crc = 0xFFFFFFFF; -#endif - - printk(KERN_INFO DEVICE_NAME ": opened\n"); - return 0; -} - -static int xxhash_test_release(struct inode *i, struct file *f) -{ - device_is_open = false; - - printk(KERN_INFO DEVICE_NAME ": total_len = %llu\n", total_length); -#if DO_XXH - printk(KERN_INFO DEVICE_NAME ": " XXH_FORMAT "\n", xxh_digest(&state)); -#endif -#if DO_CRC - printk(KERN_INFO DEVICE_NAME ": CRC32 = 0x%08x\n", ~crc); -#endif - printk(KERN_INFO DEVICE_NAME ": closed\n"); - return 0; -} - -/* - * Hash the data given to us from userspace. - */ -static ssize_t xxhash_test_write(struct file *file, const char __user *buf, - size_t size, loff_t *pos) -{ - size_t remaining = size; - - while (remaining > 0) { -#if DO_XXH - int ret; -#endif - size_t const copy_size = min(remaining, sizeof(buffer_in)); - - if (copy_from_user(buffer_in, buf, copy_size)) - return -EFAULT; - buf += copy_size; - remaining -= copy_size; - total_length += copy_size; -#if DO_XXH - if ((ret = xxh_update(&state, buffer_in, copy_size))) { - printk(KERN_INFO DEVICE_NAME ": xxh failure."); - return ret; - } -#endif -#if DO_CRC - crc = crc32(crc, buffer_in, copy_size); -#endif - } - return size; -} -/* register the character device. */ -static int __init xxhash_test_init(void) -{ - static const struct file_operations fileops = { - .owner = THIS_MODULE, - .open = &xxhash_test_open, - .release = &xxhash_test_release, - .write = &xxhash_test_write - }; - - device_major = register_chrdev(0, DEVICE_NAME, &fileops); - if (device_major < 0) { - return device_major; - } - - printk(KERN_INFO DEVICE_NAME ": module loaded\n"); - printk(KERN_INFO DEVICE_NAME ": Create a device node with " - "'mknod " DEVICE_NAME " c %d 0' and write data " - "to it.\n", device_major); - return 0; -} - -static void __exit xxhash_test_exit(void) -{ - unregister_chrdev(device_major, DEVICE_NAME); - printk(KERN_INFO DEVICE_NAME ": module unloaded\n"); -} - -module_init(xxhash_test_init); -module_exit(xxhash_test_exit); - -MODULE_DESCRIPTION("XXHash tester"); -MODULE_VERSION("1.0"); - - -MODULE_LICENSE("Dual BSD/GPL"); diff --git a/contrib/linux-kernel/zstd_common_module.c b/contrib/linux-kernel/zstd_common_module.c new file mode 100644 index 00000000000..466828e3575 --- /dev/null +++ b/contrib/linux-kernel/zstd_common_module.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include + +#include "common/huf.h" +#include "common/fse.h" +#include "common/zstd_internal.h" + +// Export symbols shared by compress and decompress into a common module + +#undef ZSTD_isError /* defined within zstd_internal.h */ +EXPORT_SYMBOL_GPL(FSE_readNCount); +EXPORT_SYMBOL_GPL(HUF_readStats); +EXPORT_SYMBOL_GPL(HUF_readStats_wksp); +EXPORT_SYMBOL_GPL(ZSTD_isError); +EXPORT_SYMBOL_GPL(ZSTD_getErrorName); +EXPORT_SYMBOL_GPL(ZSTD_getErrorCode); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Zstd Common"); diff --git a/contrib/linux-kernel/zstd_compress_module.c b/contrib/linux-kernel/zstd_compress_module.c new file mode 100644 index 00000000000..7651b53551c --- /dev/null +++ b/contrib/linux-kernel/zstd_compress_module.c @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include +#include +#include +#include + +#include "common/zstd_deps.h" +#include "common/zstd_internal.h" +#include "compress/zstd_compress_internal.h" + +#define ZSTD_FORWARD_IF_ERR(ret) \ + do { \ + size_t const __ret = (ret); \ + if (ZSTD_isError(__ret)) \ + return __ret; \ + } while (0) + +static size_t zstd_cctx_init(zstd_cctx *cctx, const zstd_parameters *parameters, + unsigned long long pledged_src_size) +{ + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_reset( + cctx, ZSTD_reset_session_and_parameters)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setPledgedSrcSize( + cctx, pledged_src_size)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_windowLog, parameters->cParams.windowLog)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_hashLog, parameters->cParams.hashLog)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_chainLog, parameters->cParams.chainLog)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_searchLog, parameters->cParams.searchLog)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_minMatch, parameters->cParams.minMatch)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_targetLength, parameters->cParams.targetLength)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_strategy, parameters->cParams.strategy)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_contentSizeFlag, parameters->fParams.contentSizeFlag)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_checksumFlag, parameters->fParams.checksumFlag)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_dictIDFlag, !parameters->fParams.noDictIDFlag)); + return 0; +} + +int zstd_min_clevel(void) +{ + return ZSTD_minCLevel(); +} +EXPORT_SYMBOL(zstd_min_clevel); + +int zstd_max_clevel(void) +{ + return ZSTD_maxCLevel(); +} +EXPORT_SYMBOL(zstd_max_clevel); + +int zstd_default_clevel(void) +{ + return ZSTD_defaultCLevel(); +} +EXPORT_SYMBOL(zstd_default_clevel); + +size_t zstd_compress_bound(size_t src_size) +{ + return ZSTD_compressBound(src_size); +} +EXPORT_SYMBOL(zstd_compress_bound); + +zstd_parameters zstd_get_params(int level, + unsigned long long estimated_src_size) +{ + return ZSTD_getParams(level, estimated_src_size, 0); +} +EXPORT_SYMBOL(zstd_get_params); + +zstd_compression_parameters zstd_get_cparams(int level, + unsigned long long estimated_src_size, size_t dict_size) +{ + return ZSTD_getCParams(level, estimated_src_size, dict_size); +} +EXPORT_SYMBOL(zstd_get_cparams); + +size_t zstd_cctx_set_param(zstd_cctx *cctx, ZSTD_cParameter param, int value) +{ + return ZSTD_CCtx_setParameter(cctx, param, value); +} +EXPORT_SYMBOL(zstd_cctx_set_param); + +size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams) +{ + return ZSTD_estimateCCtxSize_usingCParams(*cparams); +} +EXPORT_SYMBOL(zstd_cctx_workspace_bound); + +// Used by zstd_cctx_workspace_bound_with_ext_seq_prod() +static size_t dummy_external_sequence_producer( + void *sequenceProducerState, + ZSTD_Sequence *outSeqs, size_t outSeqsCapacity, + const void *src, size_t srcSize, + const void *dict, size_t dictSize, + int compressionLevel, + size_t windowSize) +{ + (void)sequenceProducerState; + (void)outSeqs; (void)outSeqsCapacity; + (void)src; (void)srcSize; + (void)dict; (void)dictSize; + (void)compressionLevel; + (void)windowSize; + return ZSTD_SEQUENCE_PRODUCER_ERROR; +} + +static void init_cctx_params_from_compress_params( + ZSTD_CCtx_params *cctx_params, + const zstd_compression_parameters *compress_params) +{ + ZSTD_parameters zstd_params; + memset(&zstd_params, 0, sizeof(zstd_params)); + zstd_params.cParams = *compress_params; + ZSTD_CCtxParams_init_advanced(cctx_params, zstd_params); +} + +size_t zstd_cctx_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *compress_params) +{ + ZSTD_CCtx_params cctx_params; + init_cctx_params_from_compress_params(&cctx_params, compress_params); + ZSTD_CCtxParams_registerSequenceProducer(&cctx_params, NULL, dummy_external_sequence_producer); + return ZSTD_estimateCCtxSize_usingCCtxParams(&cctx_params); +} +EXPORT_SYMBOL(zstd_cctx_workspace_bound_with_ext_seq_prod); + +size_t zstd_cstream_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *compress_params) +{ + ZSTD_CCtx_params cctx_params; + init_cctx_params_from_compress_params(&cctx_params, compress_params); + ZSTD_CCtxParams_registerSequenceProducer(&cctx_params, NULL, dummy_external_sequence_producer); + return ZSTD_estimateCStreamSize_usingCCtxParams(&cctx_params); +} +EXPORT_SYMBOL(zstd_cstream_workspace_bound_with_ext_seq_prod); + +zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size) +{ + if (workspace == NULL) + return NULL; + return ZSTD_initStaticCCtx(workspace, workspace_size); +} +EXPORT_SYMBOL(zstd_init_cctx); + +zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem) +{ + return ZSTD_createCCtx_advanced(custom_mem); +} +EXPORT_SYMBOL(zstd_create_cctx_advanced); + +size_t zstd_free_cctx(zstd_cctx *cctx) +{ + return ZSTD_freeCCtx(cctx); +} +EXPORT_SYMBOL(zstd_free_cctx); + +zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size, + zstd_compression_parameters cparams, + zstd_custom_mem custom_mem) +{ + return ZSTD_createCDict_advanced(dict, dict_size, ZSTD_dlm_byRef, + ZSTD_dct_auto, cparams, custom_mem); +} +EXPORT_SYMBOL(zstd_create_cdict_byreference); + +size_t zstd_free_cdict(zstd_cdict *cdict) +{ + return ZSTD_freeCDict(cdict); +} +EXPORT_SYMBOL(zstd_free_cdict); + +size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity, + const void *src, size_t src_size, const zstd_parameters *parameters) +{ + ZSTD_FORWARD_IF_ERR(zstd_cctx_init(cctx, parameters, src_size)); + return ZSTD_compress2(cctx, dst, dst_capacity, src, src_size); +} +EXPORT_SYMBOL(zstd_compress_cctx); + +size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst, + size_t dst_capacity, const void *src, size_t src_size, + const ZSTD_CDict *cdict) +{ + return ZSTD_compress_usingCDict(cctx, dst, dst_capacity, + src, src_size, cdict); +} +EXPORT_SYMBOL(zstd_compress_using_cdict); + +size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams) +{ + return ZSTD_estimateCStreamSize_usingCParams(*cparams); +} +EXPORT_SYMBOL(zstd_cstream_workspace_bound); + +zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters, + unsigned long long pledged_src_size, void *workspace, size_t workspace_size) +{ + zstd_cstream *cstream; + + if (workspace == NULL) + return NULL; + + cstream = ZSTD_initStaticCStream(workspace, workspace_size); + if (cstream == NULL) + return NULL; + + /* 0 means unknown in linux zstd API but means 0 in new zstd API */ + if (pledged_src_size == 0) + pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN; + + if (ZSTD_isError(zstd_cctx_init(cstream, parameters, pledged_src_size))) + return NULL; + + return cstream; +} +EXPORT_SYMBOL(zstd_init_cstream); + +size_t zstd_reset_cstream(zstd_cstream *cstream, + unsigned long long pledged_src_size) +{ + if (pledged_src_size == 0) + pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN; + ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_reset(cstream, ZSTD_reset_session_only) ); + ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_setPledgedSrcSize(cstream, pledged_src_size) ); + return 0; +} +EXPORT_SYMBOL(zstd_reset_cstream); + +size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output, + zstd_in_buffer *input) +{ + return ZSTD_compressStream(cstream, output, input); +} +EXPORT_SYMBOL(zstd_compress_stream); + +size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output) +{ + return ZSTD_flushStream(cstream, output); +} +EXPORT_SYMBOL(zstd_flush_stream); + +size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output) +{ + return ZSTD_endStream(cstream, output); +} +EXPORT_SYMBOL(zstd_end_stream); + +void zstd_register_sequence_producer( + zstd_cctx *cctx, + void* sequence_producer_state, + zstd_sequence_producer_f sequence_producer +) { + ZSTD_registerSequenceProducer(cctx, sequence_producer_state, sequence_producer); +} +EXPORT_SYMBOL(zstd_register_sequence_producer); + +size_t zstd_compress_sequences_and_literals(zstd_cctx *cctx, void* dst, size_t dst_capacity, + const zstd_sequence *in_seqs, size_t in_seqs_size, + const void* literals, size_t lit_size, size_t lit_capacity, + size_t decompressed_size) +{ + return ZSTD_compressSequencesAndLiterals(cctx, dst, dst_capacity, in_seqs, + in_seqs_size, literals, lit_size, + lit_capacity, decompressed_size); +} +EXPORT_SYMBOL(zstd_compress_sequences_and_literals); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Zstd Compressor"); diff --git a/contrib/linux-kernel/zstd_compress_test.c b/contrib/linux-kernel/zstd_compress_test.c deleted file mode 100644 index dc17adf8198..00000000000 --- a/contrib/linux-kernel/zstd_compress_test.c +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - */ - -/* Compression level or 0 to disable */ -#define DO_ZLIB 9 -/* Compression level or 0 to disable */ -#define DO_ZSTD 0 -/* Buffer size */ -#define BUFFER_SIZE 4096 - -#include -#include -#include -#include -#include - -#if DO_ZSTD -#include -#endif - -#if DO_ZLIB -#include -#endif - -/* Device name to pass to register_chrdev(). */ -#define DEVICE_NAME "zstd_compress_test" - -/* Dynamically allocated device major number */ -static int device_major; - -/* - * We reuse the same state, and thus can compress only one file at a time. - */ -static bool device_is_open; - - -static void *workspace = NULL; - -/* - * Input buffer used to put data coming from userspace. - */ -static uint8_t buffer_in[BUFFER_SIZE]; -static uint8_t buffer_out[BUFFER_SIZE]; - -static uint64_t uncompressed_len; -static uint64_t compressed_len; - -#if DO_ZSTD - -static ZSTD_CStream *state; - -static ZSTD_inBuffer input = { - .src = buffer_in, - .size = sizeof(buffer_in), - .pos = sizeof(buffer_in), -}; - -static ZSTD_outBuffer output = { - .dst = buffer_out, - .size = sizeof(buffer_out), - .pos = sizeof(buffer_out), -}; - -#endif /* DO_ZSTD */ - -#if DO_ZLIB - -static z_stream state = { - .next_in = buffer_in, - .avail_in = 0, - .total_in = 0, - - .next_out = buffer_out, - .avail_out = sizeof(buffer_out), - .total_out = 0, - - .msg = NULL, - .state = NULL, - .workspace = NULL, -}; - -#endif /* DO_ZLIB */ - -static int zstd_compress_test_open(struct inode *i, struct file *f) -{ - if (device_is_open) - return -EBUSY; - - device_is_open = true; - - uncompressed_len = compressed_len = 0; - -#if DO_ZSTD - if (ZSTD_isError(ZSTD_resetCStream(state, 0))) - return -EIO; -#endif - -#if DO_ZLIB - if (zlib_deflateReset(&state) != Z_OK) - return -EIO; -#endif - - printk(KERN_INFO DEVICE_NAME ": opened\n"); - return 0; -} - -static int zstd_compress_test_release(struct inode *i, struct file *f) -{ - device_is_open = false; - -#if DO_ZSTD - do { - size_t ret; - - output.pos = 0; - ret = ZSTD_endStream(state, &output); - if (ZSTD_isError(ret)) { - printk(KERN_INFO DEVICE_NAME ": zstd end error %u\n", ZSTD_getErrorCode(ret)); - return -EIO; - } - compressed_len += output.pos; - } while (output.pos != output.size); -#endif - -#if DO_ZLIB - for (;;) { - int ret; - - state.next_out = buffer_out; - state.avail_out = sizeof(buffer_out); - ret = zlib_deflate(&state, Z_FINISH); - compressed_len += sizeof(buffer_out) - state.avail_out; - if (ret == Z_STREAM_END) - break; - if (ret != Z_OK) { - printk(KERN_INFO DEVICE_NAME ": zlib end error %d: %s\n", ret, state.msg); - return -EIO; - } - } -#endif - - printk(KERN_INFO DEVICE_NAME ": uncompressed_len = %llu\n", uncompressed_len); - printk(KERN_INFO DEVICE_NAME ": compressed_len = %llu\n", compressed_len); - printk(KERN_INFO DEVICE_NAME ": closed\n"); - return 0; -} - -/* - * Hash the data given to us from userspace. - */ -static ssize_t zstd_compress_test_write(struct file *file, - const char __user *buf, size_t size, loff_t *pos) -{ - size_t remaining = size; - - while (remaining > 0) { - size_t const copy_size = min(remaining, sizeof(buffer_in)); - - if (copy_from_user(buffer_in, buf, copy_size)) - return -EFAULT; - buf += copy_size; - remaining -= copy_size; - uncompressed_len += copy_size; - -#if DO_ZSTD - input.pos = 0; - input.size = copy_size; - while (input.pos != input.size) { - size_t ret; - - output.pos = 0; - ret = ZSTD_compressStream(state, &output, &input); - if (ZSTD_isError(ret)) { - printk(KERN_INFO DEVICE_NAME ": zstd compress error %u\n", ZSTD_getErrorCode(ret)); - return -EIO; - } - compressed_len += output.pos; - } -#endif -#if DO_ZLIB - state.next_in = buffer_in; - state.avail_in = copy_size; - while (state.avail_in > 0) { - int ret; - - state.next_out = buffer_out; - state.avail_out = sizeof(buffer_out); - ret = zlib_deflate(&state, Z_NO_FLUSH); - compressed_len += sizeof(buffer_out) - state.avail_out; - if (ret != Z_OK) { - printk(KERN_INFO DEVICE_NAME ": zlib end error %d: %s\n", ret, state.msg); - return -EIO; - } - } -#endif - } - return size; -} -/* register the character device. */ -static int __init zstd_compress_test_init(void) -{ - static const struct file_operations fileops = { - .owner = THIS_MODULE, - .open = &zstd_compress_test_open, - .release = &zstd_compress_test_release, - .write = &zstd_compress_test_write - }; - size_t workspace_size = 0; -#if DO_ZSTD - ZSTD_parameters params; -#endif - - device_major = register_chrdev(0, DEVICE_NAME, &fileops); - if (device_major < 0) { - return device_major; - } - -#if DO_ZSTD - params = ZSTD_getParams(DO_ZSTD, 0, 0); - workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams); - - if (!(workspace = vmalloc(workspace_size))) - goto fail; - if (!(state = ZSTD_initCStream(params, 0, workspace, workspace_size))) - goto fail; -#endif - -#if DO_ZLIB - workspace_size = zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL); - - if (!(workspace = vmalloc(workspace_size))) - goto fail; - state.workspace = workspace; - if (zlib_deflateInit(&state, DO_ZLIB) != Z_OK) - goto fail; -#endif - - printk(KERN_INFO DEVICE_NAME ": module loaded\n"); - printk(KERN_INFO DEVICE_NAME ": compression requires %zu bytes of memory\n", workspace_size); - printk(KERN_INFO DEVICE_NAME ": Create a device node with " - "'mknod " DEVICE_NAME " c %d 0' and write data " - "to it.\n", device_major); - return 0; - -fail: - printk(KERN_INFO DEVICE_NAME ": failed to load module\n"); - if (workspace) { - vfree(workspace); - workspace = NULL; - } - return -ENOMEM; -} - -static void __exit zstd_compress_test_exit(void) -{ - unregister_chrdev(device_major, DEVICE_NAME); -#if DO_ZLIB - zlib_deflateEnd(&state); -#endif - if (workspace) { - vfree(workspace); - workspace = NULL; - } - printk(KERN_INFO DEVICE_NAME ": module unloaded\n"); -} - -module_init(zstd_compress_test_init); -module_exit(zstd_compress_test_exit); - -MODULE_DESCRIPTION("Zstd compression tester"); -MODULE_VERSION("1.0"); - -MODULE_LICENSE("Dual BSD/GPL"); diff --git a/contrib/linux-kernel/zstd_decompress_module.c b/contrib/linux-kernel/zstd_decompress_module.c new file mode 100644 index 00000000000..0ae819f0c92 --- /dev/null +++ b/contrib/linux-kernel/zstd_decompress_module.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include +#include +#include +#include + +#include "common/zstd_deps.h" + +/* Common symbols. zstd_compress must depend on zstd_decompress. */ + +unsigned int zstd_is_error(size_t code) +{ + return ZSTD_isError(code); +} +EXPORT_SYMBOL(zstd_is_error); + +zstd_error_code zstd_get_error_code(size_t code) +{ + return ZSTD_getErrorCode(code); +} +EXPORT_SYMBOL(zstd_get_error_code); + +const char *zstd_get_error_name(size_t code) +{ + return ZSTD_getErrorName(code); +} +EXPORT_SYMBOL(zstd_get_error_name); + +/* Decompression symbols. */ + +size_t zstd_dctx_workspace_bound(void) +{ + return ZSTD_estimateDCtxSize(); +} +EXPORT_SYMBOL(zstd_dctx_workspace_bound); + +zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem) +{ + return ZSTD_createDCtx_advanced(custom_mem); +} +EXPORT_SYMBOL(zstd_create_dctx_advanced); + +size_t zstd_free_dctx(zstd_dctx *dctx) +{ + return ZSTD_freeDCtx(dctx); +} +EXPORT_SYMBOL(zstd_free_dctx); + +zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size, + zstd_custom_mem custom_mem) +{ + return ZSTD_createDDict_advanced(dict, dict_size, ZSTD_dlm_byRef, + ZSTD_dct_auto, custom_mem); + +} +EXPORT_SYMBOL(zstd_create_ddict_byreference); + +size_t zstd_free_ddict(zstd_ddict *ddict) +{ + return ZSTD_freeDDict(ddict); +} +EXPORT_SYMBOL(zstd_free_ddict); + +zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size) +{ + if (workspace == NULL) + return NULL; + return ZSTD_initStaticDCtx(workspace, workspace_size); +} +EXPORT_SYMBOL(zstd_init_dctx); + +size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity, + const void *src, size_t src_size) +{ + return ZSTD_decompressDCtx(dctx, dst, dst_capacity, src, src_size); +} +EXPORT_SYMBOL(zstd_decompress_dctx); + +size_t zstd_decompress_using_ddict(zstd_dctx *dctx, + void *dst, size_t dst_capacity, const void* src, size_t src_size, + const zstd_ddict* ddict) +{ + return ZSTD_decompress_usingDDict(dctx, dst, dst_capacity, src, + src_size, ddict); +} +EXPORT_SYMBOL(zstd_decompress_using_ddict); + +size_t zstd_dstream_workspace_bound(size_t max_window_size) +{ + return ZSTD_estimateDStreamSize(max_window_size); +} +EXPORT_SYMBOL(zstd_dstream_workspace_bound); + +zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace, + size_t workspace_size) +{ + if (workspace == NULL) + return NULL; + (void)max_window_size; + return ZSTD_initStaticDStream(workspace, workspace_size); +} +EXPORT_SYMBOL(zstd_init_dstream); + +size_t zstd_reset_dstream(zstd_dstream *dstream) +{ + return ZSTD_DCtx_reset(dstream, ZSTD_reset_session_only); +} +EXPORT_SYMBOL(zstd_reset_dstream); + +size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output, + zstd_in_buffer *input) +{ + return ZSTD_decompressStream(dstream, output, input); +} +EXPORT_SYMBOL(zstd_decompress_stream); + +size_t zstd_find_frame_compressed_size(const void *src, size_t src_size) +{ + return ZSTD_findFrameCompressedSize(src, src_size); +} +EXPORT_SYMBOL(zstd_find_frame_compressed_size); + +size_t zstd_get_frame_header(zstd_frame_header *header, const void *src, + size_t src_size) +{ + return ZSTD_getFrameHeader(header, src, src_size); +} +EXPORT_SYMBOL(zstd_get_frame_header); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Zstd Decompressor"); diff --git a/contrib/linux-kernel/zstd_decompress_test.c b/contrib/linux-kernel/zstd_decompress_test.c deleted file mode 100644 index f6efddd3059..00000000000 --- a/contrib/linux-kernel/zstd_decompress_test.c +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - */ - -/* Compression level or 0 to disable */ -#define DO_ZLIB 1 -/* Compression level or 0 to disable */ -#define DO_ZSTD 0 -/* Buffer size */ -#define BUFFER_SIZE 4096 - -#include -#include -#include -#include -#include - -#if DO_ZSTD -#include -#endif - -#if DO_ZLIB -#include -#endif - -/* Device name to pass to register_chrdev(). */ -#define DEVICE_NAME "zstd_decompress_test" - -/* Dynamically allocated device major number */ -static int device_major; - -/* - * We reuse the same state, and thus can compress only one file at a time. - */ -static bool device_is_open; - - -static void *workspace = NULL; - -/* - * Input buffer used to put data coming from userspace. - */ -static uint8_t buffer_in[BUFFER_SIZE]; -static uint8_t buffer_out[BUFFER_SIZE]; - -static uint64_t uncompressed_len; -static uint64_t compressed_len; - -#if DO_ZSTD - -static ZSTD_DStream *state; - -static ZSTD_inBuffer input = { - .src = buffer_in, - .size = sizeof(buffer_in), - .pos = sizeof(buffer_in), -}; - -static ZSTD_outBuffer output = { - .dst = buffer_out, - .size = sizeof(buffer_out), - .pos = sizeof(buffer_out), -}; - -#endif /* DO_ZSTD */ - -#if DO_ZLIB - -static z_stream state = { - .next_in = buffer_in, - .avail_in = 0, - .total_in = 0, - - .next_out = buffer_out, - .avail_out = sizeof(buffer_out), - .total_out = 0, - - .msg = NULL, - .state = NULL, - .workspace = NULL, -}; - -#endif /* DO_ZLIB */ - -static int zstd_decompress_test_open(struct inode *i, struct file *f) -{ - if (device_is_open) - return -EBUSY; - - device_is_open = true; - - uncompressed_len = compressed_len = 0; - -#if DO_ZSTD - if (ZSTD_isError(ZSTD_resetDStream(state))) - return -EIO; -#endif - -#if DO_ZLIB - if (zlib_inflateReset(&state) != Z_OK) - return -EIO; -#endif - - printk(KERN_INFO DEVICE_NAME ": opened\n"); - return 0; -} - -static int zstd_decompress_test_release(struct inode *i, struct file *f) -{ - device_is_open = false; - - printk(KERN_INFO DEVICE_NAME ": uncompressed_len = %llu\n", uncompressed_len); - printk(KERN_INFO DEVICE_NAME ": compressed_len = %llu\n", compressed_len); - printk(KERN_INFO DEVICE_NAME ": closed\n"); - return 0; -} - -/* - * Hash the data given to us from userspace. - */ -static ssize_t zstd_decompress_test_write(struct file *file, - const char __user *buf, size_t size, loff_t *pos) -{ - size_t remaining = size; - - while (remaining > 0) { - size_t const copy_size = min(remaining, sizeof(buffer_in)); - - if (copy_from_user(buffer_in, buf, copy_size)) - return -EFAULT; - buf += copy_size; - remaining -= copy_size; - compressed_len += copy_size; - -#if DO_ZSTD - input.pos = 0; - input.size = copy_size; - while (input.pos != input.size) { - size_t ret; - - output.pos = 0; - ret = ZSTD_decompressStream(state, &output, &input); - if (ZSTD_isError(ret)) { - printk(KERN_INFO DEVICE_NAME ": zstd decompress error %u\n", ZSTD_getErrorCode(ret)); - return -EIO; - } - uncompressed_len += output.pos; - } -#endif -#if DO_ZLIB - state.next_in = buffer_in; - state.avail_in = copy_size; - while (state.avail_in > 0) { - int ret; - - state.next_out = buffer_out; - state.avail_out = sizeof(buffer_out); - ret = zlib_inflate(&state, Z_NO_FLUSH); - uncompressed_len += sizeof(buffer_out) - state.avail_out; - if (ret != Z_OK && ret != Z_STREAM_END) { - printk(KERN_INFO DEVICE_NAME ": zlib decompress error %d: %s\n", ret, state.msg); - return -EIO; - } - } -#endif - } - return size; -} -/* register the character device. */ -static int __init zstd_decompress_test_init(void) -{ - static const struct file_operations fileops = { - .owner = THIS_MODULE, - .open = &zstd_decompress_test_open, - .release = &zstd_decompress_test_release, - .write = &zstd_decompress_test_write - }; - size_t workspace_size = 0; -#if DO_ZSTD - ZSTD_parameters params; - size_t max_window_size; -#endif - - device_major = register_chrdev(0, DEVICE_NAME, &fileops); - if (device_major < 0) { - return device_major; - } - -#if DO_ZSTD - params = ZSTD_getParams(DO_ZSTD, 0, 0); - max_window_size = (size_t)1 << params.cParams.windowLog; - workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size); - - if (!(workspace = vmalloc(workspace_size))) - goto fail; - if (!(state = ZSTD_initDStream(max_window_size, workspace, workspace_size))) - goto fail; -#endif - -#if DO_ZLIB - workspace_size = zlib_inflate_workspacesize(); - - if (!(workspace = vmalloc(workspace_size))) - goto fail; - state.workspace = workspace; - if (zlib_inflateInit(&state) != Z_OK) - goto fail; -#endif - - printk(KERN_INFO DEVICE_NAME ": module loaded\n"); - printk(KERN_INFO DEVICE_NAME ": decompression requires %zu bytes of memory\n", workspace_size); - printk(KERN_INFO DEVICE_NAME ": Create a device node with " - "'mknod " DEVICE_NAME " c %d 0' and write data " - "to it.\n", device_major); - return 0; - -fail: - printk(KERN_INFO DEVICE_NAME ": failed to load module\n"); - if (workspace) { - vfree(workspace); - workspace = NULL; - } - return -ENOMEM; -} - -static void __exit zstd_decompress_test_exit(void) -{ - unregister_chrdev(device_major, DEVICE_NAME); -#if DO_ZLIB - zlib_deflateEnd(&state); -#endif - if (workspace) { - vfree(workspace); - workspace = NULL; - } - printk(KERN_INFO DEVICE_NAME ": module unloaded\n"); -} - -module_init(zstd_decompress_test_init); -module_exit(zstd_decompress_test_exit); - -MODULE_DESCRIPTION("Zstd decompression tester"); -MODULE_VERSION("1.0"); - -MODULE_LICENSE("Dual BSD/GPL"); diff --git a/contrib/linux-kernel/zstd_deps.h b/contrib/linux-kernel/zstd_deps.h new file mode 100644 index 00000000000..f931f7d0e29 --- /dev/null +++ b/contrib/linux-kernel/zstd_deps.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* + * This file provides common libc dependencies that zstd requires. + * The purpose is to allow replacing this file with a custom implementation + * to compile zstd without libc support. + */ + +/* Need: + * NULL + * INT_MAX + * UINT_MAX + * ZSTD_memcpy() + * ZSTD_memset() + * ZSTD_memmove() + */ +#ifndef ZSTD_DEPS_COMMON +#define ZSTD_DEPS_COMMON + +#include +#include + +#define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n)) +#define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n)) +#define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n)) + +#endif /* ZSTD_DEPS_COMMON */ + +/* + * Define malloc as always failing. That means the user must + * either use ZSTD_customMem or statically allocate memory. + * Need: + * ZSTD_malloc() + * ZSTD_free() + * ZSTD_calloc() + */ +#ifdef ZSTD_DEPS_NEED_MALLOC +#ifndef ZSTD_DEPS_MALLOC +#define ZSTD_DEPS_MALLOC + +#define ZSTD_malloc(s) ({ (void)(s); NULL; }) +#define ZSTD_free(p) ((void)(p)) +#define ZSTD_calloc(n,s) ({ (void)(n); (void)(s); NULL; }) + +#endif /* ZSTD_DEPS_MALLOC */ +#endif /* ZSTD_DEPS_NEED_MALLOC */ + +/* + * Provides 64-bit math support. + * Need: + * U64 ZSTD_div64(U64 dividend, U32 divisor) + */ +#ifdef ZSTD_DEPS_NEED_MATH64 +#ifndef ZSTD_DEPS_MATH64 +#define ZSTD_DEPS_MATH64 + +#include + +static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) { + return div_u64(dividend, divisor); +} + +#endif /* ZSTD_DEPS_MATH64 */ +#endif /* ZSTD_DEPS_NEED_MATH64 */ + +/* + * This is only requested when DEBUGLEVEL >= 1, meaning + * it is disabled in production. + * Need: + * assert() + */ +#ifdef ZSTD_DEPS_NEED_ASSERT +#ifndef ZSTD_DEPS_ASSERT +#define ZSTD_DEPS_ASSERT + +#include + +#define assert(x) WARN_ON(!(x)) + +#endif /* ZSTD_DEPS_ASSERT */ +#endif /* ZSTD_DEPS_NEED_ASSERT */ + +/* + * This is only requested when DEBUGLEVEL >= 2, meaning + * it is disabled in production. + * Need: + * ZSTD_DEBUG_PRINT() + */ +#ifdef ZSTD_DEPS_NEED_IO +#ifndef ZSTD_DEPS_IO +#define ZSTD_DEPS_IO + +#include + +#define ZSTD_DEBUG_PRINT(...) pr_debug(__VA_ARGS__) + +#endif /* ZSTD_DEPS_IO */ +#endif /* ZSTD_DEPS_NEED_IO */ + +/* + * Only requested when MSAN is enabled. + * Need: + * intptr_t + */ +#ifdef ZSTD_DEPS_NEED_STDINT +#ifndef ZSTD_DEPS_STDINT +#define ZSTD_DEPS_STDINT + +/* intptr_t already provided by ZSTD_DEPS_COMMON */ + +#endif /* ZSTD_DEPS_STDINT */ +#endif /* ZSTD_DEPS_NEED_STDINT */ diff --git a/contrib/match_finders/README.md b/contrib/match_finders/README.md new file mode 100644 index 00000000000..54055c37086 --- /dev/null +++ b/contrib/match_finders/README.md @@ -0,0 +1,42 @@ +## Edit Distance Match Finder + +``` +/* This match finder leverages techniques used in file comparison algorithms + * to find matches between a dictionary and a source file. + * + * The original motivation for studying this approach was to try and optimize + * Zstandard for the use case of patching: the most common scenario being + * updating an existing software package with the next version. When patching, + * the difference between the old version of the package and the new version + * is generally tiny (most of the new file will be identical to + * the old one). In more technical terms, the edit distance (the minimal number + * of changes required to take one sequence of bytes to another) between the + * files would be small relative to the size of the file. + * + * Various 'diffing' algorithms utilize this notion of edit distance and + * the corresponding concept of a minimal edit script between two + * sequences to identify the regions within two files where they differ. + * The core algorithm used in this match finder is described in: + * + * "An O(ND) Difference Algorithm and its Variations", Eugene W. Myers, + * Algorithmica Vol. 1, 1986, pp. 251-266, + * . + * + * Additional algorithmic heuristics for speed improvement have also been included. + * These we inspired from implementations of various regular and binary diffing + * algorithms such as GNU diff, bsdiff, and Xdelta. + * + * Note: after some experimentation, this approach proved to not provide enough + * utility to justify the additional CPU used in finding matches. The one area + * where this approach consistently outperforms Zstandard even on level 19 is + * when compressing small files (<10 KB) using an equally small dictionary that + * is very similar to the source file. For the use case that this was intended, + * (large similar files) this approach by itself took 5-10X longer than zstd-19 and + * generally resulted in 2-3X larger files. The core advantage that zstd-19 has + * over this approach for match finding is the overlapping matches. This approach + * cannot find any. + * + * I'm leaving this in the contrib section in case this ever becomes interesting + * to explore again. + * */ +``` diff --git a/contrib/match_finders/zstd_edist.c b/contrib/match_finders/zstd_edist.c new file mode 100644 index 00000000000..d685cdd9e2f --- /dev/null +++ b/contrib/match_finders/zstd_edist.c @@ -0,0 +1,558 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/*-************************************* +* Dependencies +***************************************/ + +/* Currently relies on qsort when combining contiguous matches. This can probably + * be avoided but would require changes to the algorithm. The qsort is far from + * the bottleneck in this algorithm even for medium sized files so it's probably + * not worth trying to address */ +#include +#include + +#include "zstd_edist.h" +#include "mem.h" + +/*-************************************* +* Constants +***************************************/ + +/* Just a sential for the entries of the diagonal matrix */ +#define ZSTD_EDIST_DIAG_MAX (S32)(1 << 30) + +/* How large should a snake be to be considered a 'big' snake. + * For an explanation of what a 'snake' is with respect to the + * edit distance matrix, see the linked paper in zstd_edist.h */ +#define ZSTD_EDIST_SNAKE_THRESH 20 + +/* After how many iterations should we start to use the heuristic + * based on 'big' snakes */ +#define ZSTD_EDIST_SNAKE_ITER_THRESH 200 + +/* After how many iterations should be just give up and take + * the best available edit script for this round */ +#define ZSTD_EDIST_EXPENSIVE_THRESH 1024 + +/*-************************************* +* Structures +***************************************/ + +typedef struct { + U32 dictIdx; + U32 srcIdx; + U32 matchLength; +} ZSTD_eDist_match; + +typedef struct { + const BYTE* dict; + const BYTE* src; + size_t dictSize; + size_t srcSize; + S32* forwardDiag; /* Entries of the forward diagonal stored here */ + S32* backwardDiag; /* Entries of the backward diagonal stored here. + * Note: this buffer and the 'forwardDiag' buffer + * are contiguous. See the ZSTD_eDist_genSequences */ + ZSTD_eDist_match* matches; /* Accumulate matches of length 1 in this buffer. + * In a subsequence post-processing step, we combine + * contiguous matches. */ + U32 nbMatches; +} ZSTD_eDist_state; + +typedef struct { + S32 dictMid; /* The mid diagonal for the dictionary */ + S32 srcMid; /* The mid diagonal for the source */ + int lowUseHeuristics; /* Should we use heuristics for the low part */ + int highUseHeuristics; /* Should we use heuristics for the high part */ +} ZSTD_eDist_partition; + +/*-************************************* +* Internal +***************************************/ + +static void ZSTD_eDist_diag(ZSTD_eDist_state* state, + ZSTD_eDist_partition* partition, + S32 dictLow, S32 dictHigh, S32 srcLow, + S32 srcHigh, int useHeuristics) +{ + S32* const forwardDiag = state->forwardDiag; + S32* const backwardDiag = state->backwardDiag; + const BYTE* const dict = state->dict; + const BYTE* const src = state->src; + + S32 const diagMin = dictLow - srcHigh; + S32 const diagMax = dictHigh - srcLow; + S32 const forwardMid = dictLow - srcLow; + S32 const backwardMid = dictHigh - srcHigh; + + S32 forwardMin = forwardMid; + S32 forwardMax = forwardMid; + S32 backwardMin = backwardMid; + S32 backwardMax = backwardMid; + int odd = (forwardMid - backwardMid) & 1; + U32 iterations; + + forwardDiag[forwardMid] = dictLow; + backwardDiag[backwardMid] = dictHigh; + + /* Main loop for updating diag entries. Unless useHeuristics is + * set to false, this loop will run until it finds the minimal + * edit script */ + for (iterations = 1;;iterations++) { + S32 diag; + int bigSnake = 0; + + if (forwardMin > diagMin) { + forwardMin--; + forwardDiag[forwardMin - 1] = -1; + } else { + forwardMin++; + } + + if (forwardMax < diagMax) { + forwardMax++; + forwardDiag[forwardMax + 1] = -1; + } else { + forwardMax--; + } + + for (diag = forwardMax; diag >= forwardMin; diag -= 2) { + S32 dictIdx; + S32 srcIdx; + S32 low = forwardDiag[diag - 1]; + S32 high = forwardDiag[diag + 1]; + S32 dictIdx0 = low < high ? high : low + 1; + + for (dictIdx = dictIdx0, srcIdx = dictIdx0 - diag; + dictIdx < dictHigh && srcIdx < srcHigh && dict[dictIdx] == src[srcIdx]; + dictIdx++, srcIdx++) continue; + + if (dictIdx - dictIdx0 > ZSTD_EDIST_SNAKE_THRESH) + bigSnake = 1; + + forwardDiag[diag] = dictIdx; + + if (odd && backwardMin <= diag && diag <= backwardMax && backwardDiag[diag] <= dictIdx) { + partition->dictMid = dictIdx; + partition->srcMid = srcIdx; + partition->lowUseHeuristics = 0; + partition->highUseHeuristics = 0; + return; + } + } + + if (backwardMin > diagMin) { + backwardMin--; + backwardDiag[backwardMin - 1] = ZSTD_EDIST_DIAG_MAX; + } else { + backwardMin++; + } + + if (backwardMax < diagMax) { + backwardMax++; + backwardDiag[backwardMax + 1] = ZSTD_EDIST_DIAG_MAX; + } else { + backwardMax--; + } + + + for (diag = backwardMax; diag >= backwardMin; diag -= 2) { + S32 dictIdx; + S32 srcIdx; + S32 low = backwardDiag[diag - 1]; + S32 high = backwardDiag[diag + 1]; + S32 dictIdx0 = low < high ? low : high - 1; + + for (dictIdx = dictIdx0, srcIdx = dictIdx0 - diag; + dictLow < dictIdx && srcLow < srcIdx && dict[dictIdx - 1] == src[srcIdx - 1]; + dictIdx--, srcIdx--) continue; + + if (dictIdx0 - dictIdx > ZSTD_EDIST_SNAKE_THRESH) + bigSnake = 1; + + backwardDiag[diag] = dictIdx; + + if (!odd && forwardMin <= diag && diag <= forwardMax && dictIdx <= forwardDiag[diag]) { + partition->dictMid = dictIdx; + partition->srcMid = srcIdx; + partition->lowUseHeuristics = 0; + partition->highUseHeuristics = 0; + return; + } + } + + if (!useHeuristics) + continue; + + /* Everything under this point is a heuristic. Using these will + * substantially speed up the match finding. In some cases, taking + * the total match finding time from several minutes to seconds. + * Of course, the caveat is that the edit script found may no longer + * be optimal */ + + /* Big snake heuristic */ + if (iterations > ZSTD_EDIST_SNAKE_ITER_THRESH && bigSnake) { + { + S32 best = 0; + + for (diag = forwardMax; diag >= forwardMin; diag -= 2) { + S32 diagDiag = diag - forwardMid; + S32 dictIdx = forwardDiag[diag]; + S32 srcIdx = dictIdx - diag; + S32 v = (dictIdx - dictLow) * 2 - diagDiag; + + if (v > 12 * (iterations + (diagDiag < 0 ? -diagDiag : diagDiag))) { + if (v > best + && dictLow + ZSTD_EDIST_SNAKE_THRESH <= dictIdx && dictIdx <= dictHigh + && srcLow + ZSTD_EDIST_SNAKE_THRESH <= srcIdx && srcIdx <= srcHigh) { + S32 k; + for (k = 1; dict[dictIdx - k] == src[srcIdx - k]; k++) { + if (k == ZSTD_EDIST_SNAKE_THRESH) { + best = v; + partition->dictMid = dictIdx; + partition->srcMid = srcIdx; + break; + } + } + } + } + } + + if (best > 0) { + partition->lowUseHeuristics = 0; + partition->highUseHeuristics = 1; + return; + } + } + + { + S32 best = 0; + + for (diag = backwardMax; diag >= backwardMin; diag -= 2) { + S32 diagDiag = diag - backwardMid; + S32 dictIdx = backwardDiag[diag]; + S32 srcIdx = dictIdx - diag; + S32 v = (dictHigh - dictIdx) * 2 + diagDiag; + + if (v > 12 * (iterations + (diagDiag < 0 ? -diagDiag : diagDiag))) { + if (v > best + && dictLow < dictIdx && dictIdx <= dictHigh - ZSTD_EDIST_SNAKE_THRESH + && srcLow < srcIdx && srcIdx <= srcHigh - ZSTD_EDIST_SNAKE_THRESH) { + int k; + for (k = 0; dict[dictIdx + k] == src[srcIdx + k]; k++) { + if (k == ZSTD_EDIST_SNAKE_THRESH - 1) { + best = v; + partition->dictMid = dictIdx; + partition->srcMid = srcIdx; + break; + } + } + } + } + } + + if (best > 0) { + partition->lowUseHeuristics = 1; + partition->highUseHeuristics = 0; + return; + } + } + } + + /* More general 'too expensive' heuristic */ + if (iterations >= ZSTD_EDIST_EXPENSIVE_THRESH) { + S32 forwardDictSrcBest; + S32 forwardDictBest = 0; + S32 backwardDictSrcBest; + S32 backwardDictBest = 0; + + forwardDictSrcBest = -1; + for (diag = forwardMax; diag >= forwardMin; diag -= 2) { + S32 dictIdx = MIN(forwardDiag[diag], dictHigh); + S32 srcIdx = dictIdx - diag; + + if (srcHigh < srcIdx) { + dictIdx = srcHigh + diag; + srcIdx = srcHigh; + } + + if (forwardDictSrcBest < dictIdx + srcIdx) { + forwardDictSrcBest = dictIdx + srcIdx; + forwardDictBest = dictIdx; + } + } + + backwardDictSrcBest = ZSTD_EDIST_DIAG_MAX; + for (diag = backwardMax; diag >= backwardMin; diag -= 2) { + S32 dictIdx = MAX(dictLow, backwardDiag[diag]); + S32 srcIdx = dictIdx - diag; + + if (srcIdx < srcLow) { + dictIdx = srcLow + diag; + srcIdx = srcLow; + } + + if (dictIdx + srcIdx < backwardDictSrcBest) { + backwardDictSrcBest = dictIdx + srcIdx; + backwardDictBest = dictIdx; + } + } + + if ((dictHigh + srcHigh) - backwardDictSrcBest < forwardDictSrcBest - (dictLow + srcLow)) { + partition->dictMid = forwardDictBest; + partition->srcMid = forwardDictSrcBest - forwardDictBest; + partition->lowUseHeuristics = 0; + partition->highUseHeuristics = 1; + } else { + partition->dictMid = backwardDictBest; + partition->srcMid = backwardDictSrcBest - backwardDictBest; + partition->lowUseHeuristics = 1; + partition->highUseHeuristics = 0; + } + return; + } + } +} + +static void ZSTD_eDist_insertMatch(ZSTD_eDist_state* state, + S32 const dictIdx, S32 const srcIdx) +{ + state->matches[state->nbMatches].dictIdx = dictIdx; + state->matches[state->nbMatches].srcIdx = srcIdx; + state->matches[state->nbMatches].matchLength = 1; + state->nbMatches++; +} + +static int ZSTD_eDist_compare(ZSTD_eDist_state* state, + S32 dictLow, S32 dictHigh, S32 srcLow, + S32 srcHigh, int useHeuristics) +{ + const BYTE* const dict = state->dict; + const BYTE* const src = state->src; + + /* Found matches while traversing from the low end */ + while (dictLow < dictHigh && srcLow < srcHigh && dict[dictLow] == src[srcLow]) { + ZSTD_eDist_insertMatch(state, dictLow, srcLow); + dictLow++; + srcLow++; + } + + /* Found matches while traversing from the high end */ + while (dictLow < dictHigh && srcLow < srcHigh && dict[dictHigh - 1] == src[srcHigh - 1]) { + ZSTD_eDist_insertMatch(state, dictHigh - 1, srcHigh - 1); + dictHigh--; + srcHigh--; + } + + /* If the low and high end end up touching. If we wanted to make + * note of the differences like most diffing algorithms do, we would + * do so here. In our case, we're only concerned with matches + * Note: if you wanted to find the edit distance of the algorithm, + * you could just accumulate the cost for an insertion/deletion + * below. */ + if (dictLow == dictHigh) { + while (srcLow < srcHigh) { + /* Reaching this point means inserting src[srcLow] into + * the current position of dict */ + srcLow++; + } + } else if (srcLow == srcHigh) { + while (dictLow < dictHigh) { + /* Reaching this point means deleting dict[dictLow] from + * the current position of dict */ + dictLow++; + } + } else { + ZSTD_eDist_partition partition; + partition.dictMid = 0; + partition.srcMid = 0; + ZSTD_eDist_diag(state, &partition, dictLow, dictHigh, + srcLow, srcHigh, useHeuristics); + if (ZSTD_eDist_compare(state, dictLow, partition.dictMid, + srcLow, partition.srcMid, partition.lowUseHeuristics)) + return 1; + if (ZSTD_eDist_compare(state, partition.dictMid, dictHigh, + partition.srcMid, srcHigh, partition.highUseHeuristics)) + return 1; + } + + return 0; +} + +static int ZSTD_eDist_matchComp(const void* p, const void* q) +{ + S32 const l = ((ZSTD_eDist_match*)p)->srcIdx; + S32 const r = ((ZSTD_eDist_match*)q)->srcIdx; + return (l - r); +} + +/* The matches from the approach above will all be of the form + * (dictIdx, srcIdx, 1). This method combines contiguous matches + * of length MINMATCH or greater. Matches less than MINMATCH + * are discarded */ +static void ZSTD_eDist_combineMatches(ZSTD_eDist_state* state) +{ + /* Create a new buffer to put the combined matches into + * and memcpy to state->matches after */ + ZSTD_eDist_match* combinedMatches = + ZSTD_malloc(state->nbMatches * sizeof(ZSTD_eDist_match), + ZSTD_defaultCMem); + + U32 nbCombinedMatches = 1; + size_t i; + + /* Make sure that the srcIdx and dictIdx are in sorted order. + * The combination step won't work otherwise */ + qsort(state->matches, state->nbMatches, sizeof(ZSTD_eDist_match), ZSTD_eDist_matchComp); + + memcpy(combinedMatches, state->matches, sizeof(ZSTD_eDist_match)); + for (i = 1; i < state->nbMatches; i++) { + ZSTD_eDist_match const match = state->matches[i]; + ZSTD_eDist_match const combinedMatch = + combinedMatches[nbCombinedMatches - 1]; + if (combinedMatch.srcIdx + combinedMatch.matchLength == match.srcIdx && + combinedMatch.dictIdx + combinedMatch.matchLength == match.dictIdx) { + combinedMatches[nbCombinedMatches - 1].matchLength++; + } else { + /* Discard matches that are less than MINMATCH */ + if (combinedMatches[nbCombinedMatches - 1].matchLength < MINMATCH) { + nbCombinedMatches--; + } + + memcpy(combinedMatches + nbCombinedMatches, + state->matches + i, sizeof(ZSTD_eDist_match)); + nbCombinedMatches++; + } + } + memcpy(state->matches, combinedMatches, nbCombinedMatches * sizeof(ZSTD_eDist_match)); + state->nbMatches = nbCombinedMatches; + ZSTD_free(combinedMatches, ZSTD_defaultCMem); +} + +static size_t ZSTD_eDist_convertMatchesToSequences(ZSTD_Sequence* sequences, + ZSTD_eDist_state* state) +{ + const ZSTD_eDist_match* matches = state->matches; + size_t const nbMatches = state->nbMatches; + size_t const dictSize = state->dictSize; + size_t nbSequences = 0; + size_t i; + for (i = 0; i < nbMatches; i++) { + ZSTD_eDist_match const match = matches[i]; + U32 const litLength = !i ? match.srcIdx : + match.srcIdx - (matches[i - 1].srcIdx + matches[i - 1].matchLength); + U32 const offset = (match.srcIdx + dictSize) - match.dictIdx; + U32 const matchLength = match.matchLength; + sequences[nbSequences].offset = offset; + sequences[nbSequences].litLength = litLength; + sequences[nbSequences].matchLength = matchLength; + nbSequences++; + } + return nbSequences; +} + +/*-************************************* +* Internal utils +***************************************/ + +static size_t ZSTD_eDist_hamingDist(const BYTE* const a, + const BYTE* const b, size_t n) +{ + size_t i; + size_t dist = 0; + for (i = 0; i < n; i++) + dist += a[i] != b[i]; + return dist; +} + +/* This is a pretty naive recursive implementation that should only + * be used for quick tests obviously. Don't try and run this on a + * GB file or something. There are faster implementations. Use those + * if you need to run it for large files. */ +static size_t ZSTD_eDist_levenshteinDist(const BYTE* const s, + size_t const sn, const BYTE* const t, + size_t const tn) +{ + size_t a, b, c; + + if (!sn) + return tn; + if (!tn) + return sn; + + if (s[sn - 1] == t[tn - 1]) + return ZSTD_eDist_levenshteinDist( + s, sn - 1, t, tn - 1); + + a = ZSTD_eDist_levenshteinDist(s, sn - 1, t, tn - 1); + b = ZSTD_eDist_levenshteinDist(s, sn, t, tn - 1); + c = ZSTD_eDist_levenshteinDist(s, sn - 1, t, tn); + + if (a > b) + a = b; + if (a > c) + a = c; + + return a + 1; +} + +static void ZSTD_eDist_validateMatches(ZSTD_eDist_match* matches, + size_t const nbMatches, const BYTE* const dict, + size_t const dictSize, const BYTE* const src, + size_t const srcSize) +{ + size_t i; + for (i = 0; i < nbMatches; i++) { + ZSTD_eDist_match match = matches[i]; + U32 const dictIdx = match.dictIdx; + U32 const srcIdx = match.srcIdx; + U32 const matchLength = match.matchLength; + + assert(dictIdx + matchLength < dictSize); + assert(srcIdx + matchLength < srcSize); + assert(!memcmp(dict + dictIdx, src + srcIdx, matchLength)); + } +} + +/*-************************************* +* API +***************************************/ + +size_t ZSTD_eDist_genSequences(ZSTD_Sequence* sequences, + const void* dict, size_t dictSize, + const void* src, size_t srcSize, + int useHeuristics) +{ + size_t const nbDiags = dictSize + srcSize + 3; + S32* buffer = ZSTD_malloc(nbDiags * 2 * sizeof(S32), ZSTD_defaultCMem); + ZSTD_eDist_state state; + size_t nbSequences = 0; + + state.dict = (const BYTE*)dict; + state.src = (const BYTE*)src; + state.dictSize = dictSize; + state.srcSize = srcSize; + state.forwardDiag = buffer; + state.backwardDiag = buffer + nbDiags; + state.forwardDiag += srcSize + 1; + state.backwardDiag += srcSize + 1; + state.matches = ZSTD_malloc(srcSize * sizeof(ZSTD_eDist_match), ZSTD_defaultCMem); + state.nbMatches = 0; + + ZSTD_eDist_compare(&state, 0, dictSize, 0, srcSize, 1); + ZSTD_eDist_combineMatches(&state); + nbSequences = ZSTD_eDist_convertMatchesToSequences(sequences, &state); + + ZSTD_free(buffer, ZSTD_defaultCMem); + ZSTD_free(state.matches, ZSTD_defaultCMem); + + return nbSequences; +} diff --git a/contrib/match_finders/zstd_edist.h b/contrib/match_finders/zstd_edist.h new file mode 100644 index 00000000000..c739e2abc51 --- /dev/null +++ b/contrib/match_finders/zstd_edist.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* This match finder leverages techniques used in file comparison algorithms + * to find matches between a dictionary and a source file. + * + * The original motivation for studying this approach was to try and optimize + * Zstandard for the use case of patching: the most common scenario being + * updating an existing software package with the next version. When patching, + * the difference between the old version of the package and the new version + * is generally tiny (most of the new file will be identical to + * the old one). In more technical terms, the edit distance (the minimal number + * of changes required to take one sequence of bytes to another) between the + * files would be small relative to the size of the file. + * + * Various 'diffing' algorithms utilize this notion of edit distance and + * the corresponding concept of a minimal edit script between two + * sequences to identify the regions within two files where they differ. + * The core algorithm used in this match finder is described in: + * + * "An O(ND) Difference Algorithm and its Variations", Eugene W. Myers, + * Algorithmica Vol. 1, 1986, pp. 251-266, + * . + * + * Additional algorithmic heuristics for speed improvement have also been included. + * These we inspired from implementations of various regular and binary diffing + * algorithms such as GNU diff, bsdiff, and Xdelta. + * + * Note: after some experimentation, this approach proved to not provide enough + * utility to justify the additional CPU used in finding matches. The one area + * where this approach consistently outperforms Zstandard even on level 19 is + * when compressing small files (<10 KB) using an equally small dictionary that + * is very similar to the source file. For the use case that this was intended, + * (large similar files) this approach by itself took 5-10X longer than zstd-19 and + * generally resulted in 2-3X larger files. The core advantage that zstd-19 has + * over this approach for match finding is the overlapping matches. This approach + * cannot find any. + * + * I'm leaving this in the contrib section in case this ever becomes interesting + * to explore again. + * */ + +#ifndef ZSTD_EDIST_H +#define ZSTD_EDIST_H + +/*-************************************* +* Dependencies +***************************************/ + +#include +#include "zstd_internal.h" /* ZSTD_Sequence */ + +/*! ZSTD_eDist_genSequences() : + * Will populate the provided ZSTD_Sequence buffer with sequences + * based on the optimal or near-optimal (depending on 'useHeuristics') + * edit script between 'dict' and 'src.' + * @return : the number of sequences found */ +size_t ZSTD_eDist_genSequences(ZSTD_Sequence* sequences, + const void* dict, size_t dictSize, + const void* src, size_t srcSize, + int useHeuristics); + +#endif diff --git a/contrib/premake/zstd.lua b/contrib/premake/zstd.lua index df1ace3ee8e..f3fd5b2935a 100644 --- a/contrib/premake/zstd.lua +++ b/contrib/premake/zstd.lua @@ -2,6 +2,7 @@ -- Basic usage: project_zstd(ZSTD_DIR) function project_zstd(dir, compression, decompression, deprecated, dictbuilder, legacy) + if string.sub(dir, -1, 1) ~= '/' then dir = dir .. '/' end if compression == nil then compression = true end if decompression == nil then decompression = true end if deprecated == nil then deprecated = false end diff --git a/contrib/pzstd/ErrorHolder.h b/contrib/pzstd/ErrorHolder.h index 829651c5961..2c2797edea0 100644 --- a/contrib/pzstd/ErrorHolder.h +++ b/contrib/pzstd/ErrorHolder.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/Logging.h b/contrib/pzstd/Logging.h index 16a63932c0a..aa73a976b80 100644 --- a/contrib/pzstd/Logging.h +++ b/contrib/pzstd/Logging.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,13 +10,14 @@ #include #include +#include namespace pzstd { -constexpr int ERROR = 1; -constexpr int INFO = 2; -constexpr int DEBUG = 3; -constexpr int VERBOSE = 4; +constexpr int kLogError = 1; +constexpr int kLogInfo = 2; +constexpr int kLogDebug = 3; +constexpr int kLogVerbose = 4; class Logger { std::mutex mutex_; diff --git a/contrib/pzstd/Makefile b/contrib/pzstd/Makefile index 8d2b1932e91..e4b3e8a21f2 100644 --- a/contrib/pzstd/Makefile +++ b/contrib/pzstd/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -10,7 +10,7 @@ # Standard variables for installation DESTDIR ?= PREFIX ?= /usr/local -BINDIR := $(DESTDIR)$(PREFIX)/bin +BINDIR := $(PREFIX)/bin ZSTDDIR = ../../lib PROGDIR = ../../programs @@ -30,14 +30,20 @@ CXXFLAGS ?= -O3 -Wall -Wextra -pedantic CPPFLAGS ?= LDFLAGS ?= +# PZstd uses legacy APIs +CFLAGS += -Wno-deprecated-declarations + # Include flags PZSTD_INC = -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(PROGDIR) -I. GTEST_INC = -isystem googletest/googletest/include +# Set the minimum required by gtest +PZSTD_CXX_STD := -std=c++14 + PZSTD_CPPFLAGS = $(PZSTD_INC) PZSTD_CCXXFLAGS = PZSTD_CFLAGS = $(PZSTD_CCXXFLAGS) -PZSTD_CXXFLAGS = $(PZSTD_CCXXFLAGS) -std=c++11 +PZSTD_CXXFLAGS = $(PZSTD_CCXXFLAGS) $(PZSTD_CXX_STD) PZSTD_LDFLAGS = EXTRA_FLAGS = ALL_CFLAGS = $(EXTRA_FLAGS) $(CPPFLAGS) $(PZSTD_CPPFLAGS) $(CFLAGS) $(PZSTD_CFLAGS) @@ -54,19 +60,6 @@ LD_COMMAND = $(CXX) $^ $(ALL_LDFLAGS) $(LIBS) -pthread -o $@ CC_COMMAND = $(CC) $(DEPFLAGS) $(ALL_CFLAGS) -c $< -o $@ CXX_COMMAND = $(CXX) $(DEPFLAGS) $(ALL_CXXFLAGS) -c $< -o $@ -# Get a list of all zstd files so we rebuild the static library when we need to -ZSTDCOMMON_FILES := $(wildcard $(ZSTDDIR)/common/*.c) \ - $(wildcard $(ZSTDDIR)/common/*.h) -ZSTDCOMP_FILES := $(wildcard $(ZSTDDIR)/compress/*.c) \ - $(wildcard $(ZSTDDIR)/compress/*.h) -ZSTDDECOMP_FILES := $(wildcard $(ZSTDDIR)/decompress/*.c) \ - $(wildcard $(ZSTDDIR)/decompress/*.h) -ZSTDPROG_FILES := $(wildcard $(PROGDIR)/*.c) \ - $(wildcard $(PROGDIR)/*.h) -ZSTD_FILES := $(wildcard $(ZSTDDIR)/*.h) \ - $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES) \ - $(ZSTDPROG_FILES) - # List all the pzstd source files so we can determine their dependencies PZSTD_SRCS := $(wildcard *.cpp) PZSTD_TESTS := $(wildcard test/*.cpp) @@ -116,12 +109,12 @@ check: .PHONY: install install: PZSTD_CPPFLAGS += -DNDEBUG install: pzstd$(EXT) - install -d -m 755 $(BINDIR)/ - install -m 755 pzstd$(EXT) $(BINDIR)/pzstd$(EXT) + install -d -m 755 $(DESTDIR)$(BINDIR)/ + install -m 755 pzstd$(EXT) $(DESTDIR)$(BINDIR)/pzstd$(EXT) .PHONY: uninstall uninstall: - $(RM) $(BINDIR)/pzstd$(EXT) + $(RM) $(DESTDIR)$(BINDIR)/pzstd$(EXT) # Targets for many different builds .PHONY: all @@ -186,7 +179,8 @@ roundtrip: test/RoundTripTest$(EXT) # Use the static library that zstd builds for simplicity and # so we get the compiler options correct -$(ZSTDDIR)/libzstd.a: $(ZSTD_FILES) +.PHONY: $(ZSTDDIR)/libzstd.a +$(ZSTDDIR)/libzstd.a: CFLAGS="$(ALL_CFLAGS)" LDFLAGS="$(ALL_LDFLAGS)" $(MAKE) -C $(ZSTDDIR) libzstd.a # Rules to build the tests diff --git a/contrib/pzstd/Options.cpp b/contrib/pzstd/Options.cpp index 2123f8894c3..90841b9bb04 100644 --- a/contrib/pzstd/Options.cpp +++ b/contrib/pzstd/Options.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -87,7 +87,7 @@ void usage() { std::fprintf(stderr, " -V, --version : display version number and exit\n"); std::fprintf(stderr, " -v, --verbose : verbose mode; specify multiple times to increase log level (default:2)\n"); std::fprintf(stderr, " -q, --quiet : suppress warnings; specify twice to suppress errors too\n"); - std::fprintf(stderr, " -c, --stdout : force write to standard output, even if it is the console\n"); + std::fprintf(stderr, " -c, --stdout : write to standard output (even if it is the console)\n"); #ifdef UTIL_HAS_CREATEFILELIST std::fprintf(stderr, " -r : operate recursively on directories\n"); #endif @@ -322,7 +322,7 @@ Options::Status Options::parse(int argc, const char **argv) { g_utilDisplayLevel = verbosity; // Remove local input files that are symbolic links if (!followLinks) { - std::remove_if(localInputFiles.begin(), localInputFiles.end(), + localInputFiles.erase(std::remove_if(localInputFiles.begin(), localInputFiles.end(), [&](const char *path) { bool isLink = UTIL_isLink(path); if (isLink && verbosity >= 2) { @@ -332,28 +332,24 @@ Options::Status Options::parse(int argc, const char **argv) { path); } return isLink; - }); + }), localInputFiles.end()); } // Translate input files/directories into files to (de)compress if (recursive) { - char *scratchBuffer = nullptr; - unsigned numFiles = 0; - const char **files = - UTIL_createFileList(localInputFiles.data(), localInputFiles.size(), - &scratchBuffer, &numFiles, followLinks); + FileNamesTable* const files = UTIL_createExpandedFNT(localInputFiles.data(), localInputFiles.size(), followLinks); if (files == nullptr) { std::fprintf(stderr, "Error traversing directories\n"); return Status::Failure; } auto guard = - makeScopeGuard([&] { UTIL_freeFileList(files, scratchBuffer); }); - if (numFiles == 0) { + makeScopeGuard([&] { UTIL_freeFileNamesTable(files); }); + if (files->tableSize == 0) { std::fprintf(stderr, "No files found\n"); return Status::Failure; } - inputFiles.resize(numFiles); - std::copy(files, files + numFiles, inputFiles.begin()); + inputFiles.resize(files->tableSize); + std::copy(files->fileNames, files->fileNames + files->tableSize, inputFiles.begin()); } else { inputFiles.resize(localInputFiles.size()); std::copy(localInputFiles.begin(), localInputFiles.end(), diff --git a/contrib/pzstd/Options.h b/contrib/pzstd/Options.h index f4f2aaa499c..92c18a350cf 100644 --- a/contrib/pzstd/Options.h +++ b/contrib/pzstd/Options.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -9,6 +9,9 @@ #pragma once #define ZSTD_STATIC_LINKING_ONLY +#define ZSTD_DISABLE_DEPRECATE_WARNINGS /* No deprecation warnings, pzstd itself is deprecated + * and uses deprecated functions + */ #include "zstd.h" #undef ZSTD_STATIC_LINKING_ONLY diff --git a/contrib/pzstd/Pzstd.cpp b/contrib/pzstd/Pzstd.cpp index 652187c3bd0..048a006b3b3 100644 --- a/contrib/pzstd/Pzstd.cpp +++ b/contrib/pzstd/Pzstd.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,11 +10,13 @@ #include "Pzstd.h" #include "SkippableFrame.h" #include "utils/FileSystem.h" +#include "utils/Portability.h" #include "utils/Range.h" #include "utils/ScopeGuard.h" #include "utils/ThreadPool.h" #include "utils/WorkQueue.h" +#include #include #include #include @@ -94,12 +96,12 @@ static std::uint64_t handleOneInput(const Options &options, if (!options.decompress) { double ratio = static_cast(bytesWritten) / static_cast(bytesRead + !bytesRead); - state.log(INFO, "%-20s :%6.2f%% (%6" PRIu64 " => %6" PRIu64 + state.log(kLogInfo, "%-20s :%6.2f%% (%6" PRIu64 " => %6" PRIu64 " bytes, %s)\n", inputFileName.c_str(), ratio * 100, bytesRead, bytesWritten, outputFileName.c_str()); } else { - state.log(INFO, "%-20s: %" PRIu64 " bytes \n", + state.log(kLogInfo, "%-20s: %" PRIu64 " bytes \n", inputFileName.c_str(),bytesWritten); } } @@ -139,12 +141,12 @@ static FILE *openOutputFile(const Options &options, auto outputFd = std::fopen(outputFile.c_str(), "rb"); if (outputFd != nullptr) { std::fclose(outputFd); - if (!state.log.logsAt(INFO)) { + if (!state.log.logsAt(kLogInfo)) { state.errorHolder.setError("Output file exists"); return nullptr; } state.log( - INFO, + kLogInfo, "pzstd: %s already exists; do you wish to overwrite (y/n) ? ", outputFile.c_str()); int c = getchar(); @@ -170,7 +172,7 @@ int pzstdMain(const Options &options) { auto printErrorGuard = makeScopeGuard([&] { if (state.errorHolder.hasError()) { returnCode = 1; - state.log(ERROR, "pzstd: %s: %s.\n", input.c_str(), + state.log(kLogError, "pzstd: %s: %s.\n", input.c_str(), state.errorHolder.getError().c_str()); } }); @@ -267,14 +269,17 @@ static void compress( std::shared_ptr out, size_t maxInputSize) { auto& errorHolder = state.errorHolder; - auto guard = makeScopeGuard([&] { out->finish(); }); + auto guard = makeScopeGuard([&] { + in->finish(); + out->finish(); + }); // Initialize the CCtx auto ctx = state.cStreamPool->get(); if (!errorHolder.check(ctx != nullptr, "Failed to allocate ZSTD_CStream")) { return; } { - auto err = ZSTD_resetCStream(ctx.get(), 0); + auto err = ZSTD_CCtx_reset(ctx.get(), ZSTD_reset_session_only); if (!errorHolder.check(!ZSTD_isError(err), ZSTD_getErrorName(err))) { return; } @@ -336,6 +341,10 @@ static size_t calculateStep( const ZSTD_parameters ¶ms) { (void)size; (void)numThreads; + // Not validated to work correctly for window logs > 23. + // It will definitely fail if windowLog + 2 is >= 4GB because + // the skippable frame can only store sizes up to 4GB. + assert(params.cParams.windowLog <= 23); return size_t{1} << (params.cParams.windowLog + 2); } @@ -388,7 +397,7 @@ std::uint64_t asyncCompressChunks( // Break the input up into chunks of size `step` and compress each chunk // independently. size_t step = calculateStep(size, numThreads, params); - state.log(DEBUG, "Chosen frame size: %zu\n", step); + state.log(kLogDebug, "Chosen frame size: %zu\n", step); auto status = FileStatus::Continue; while (status == FileStatus::Continue && !state.errorHolder.hasError()) { // Make a new input queue that we will put the chunk's input data into. @@ -403,7 +412,7 @@ std::uint64_t asyncCompressChunks( }); // Pass the output queue to the writer thread. chunks.push(std::move(out)); - state.log(VERBOSE, "%s\n", "Starting a new frame"); + state.log(kLogVerbose, "%s\n", "Starting a new frame"); // Fill the input queue for the compression job we just started status = readData(*in, ZSTD_CStreamInSize(), step, fd, &bytesRead); } @@ -425,14 +434,17 @@ static void decompress( std::shared_ptr in, std::shared_ptr out) { auto& errorHolder = state.errorHolder; - auto guard = makeScopeGuard([&] { out->finish(); }); + auto guard = makeScopeGuard([&] { + in->finish(); + out->finish(); + }); // Initialize the DCtx auto ctx = state.dStreamPool->get(); if (!errorHolder.check(ctx != nullptr, "Failed to allocate ZSTD_DStream")) { return; } { - auto err = ZSTD_resetDStream(ctx.get()); + auto err = ZSTD_DCtx_reset(ctx.get(), ZSTD_reset_session_only); if (!errorHolder.check(!ZSTD_isError(err), ZSTD_getErrorName(err))) { return; } @@ -540,14 +552,14 @@ std::uint64_t asyncDecompressFrames( if (frameSize == 0) { // We hit a non SkippableFrame ==> not compressed by pzstd or corrupted // Pass the rest of the source to this decompression task - state.log(VERBOSE, "%s\n", + state.log(kLogVerbose, "%s\n", "Input not in pzstd format, falling back to serial decompression"); while (status == FileStatus::Continue && !state.errorHolder.hasError()) { status = readData(*in, chunkSize, chunkSize, fd, &totalBytesRead); } break; } - state.log(VERBOSE, "Decompressing a frame of size %zu", frameSize); + state.log(kLogVerbose, "Decompressing a frame of size %zu", frameSize); // Fill the input queue for the decompression job we just started status = readData(*in, chunkSize, frameSize, fd, &totalBytesRead); } @@ -572,13 +584,15 @@ std::uint64_t writeFile( FILE* outputFd, bool decompress) { auto& errorHolder = state.errorHolder; + auto outsFinishGuard = makeScopeGuard([&outs] { outs.finish(); }); auto lineClearGuard = makeScopeGuard([&state] { - state.log.clear(INFO); + state.log.clear(kLogInfo); }); std::uint64_t bytesWritten = 0; std::shared_ptr out; // Grab the output queue for each decompression job (in order). while (outs.pop(out)) { + auto outFinishGuard = makeScopeGuard([&out] { out->finish(); }); if (errorHolder.hasError()) { continue; } @@ -587,7 +601,8 @@ std::uint64_t writeFile( // start writing before compression is done because we need to know the // compressed size. // Wait for the compressed size to be available and write skippable frame - SkippableFrame frame(out->size()); + assert(uint64_t(out->size()) < uint64_t(1) << 32); + SkippableFrame frame(uint32_t(out->size())); if (!writeData(frame.data(), outputFd)) { errorHolder.setError("Failed to write output"); return bytesWritten; @@ -602,7 +617,7 @@ std::uint64_t writeFile( return bytesWritten; } bytesWritten += buffer.size(); - state.log.update(INFO, "Written: %u MB ", + state.log.update(kLogInfo, "Written: %u MB ", static_cast(bytesWritten >> 20)); } } diff --git a/contrib/pzstd/Pzstd.h b/contrib/pzstd/Pzstd.h index 79d1fcca265..3645e594268 100644 --- a/contrib/pzstd/Pzstd.h +++ b/contrib/pzstd/Pzstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -17,6 +17,9 @@ #include "utils/ThreadPool.h" #include "utils/WorkQueue.h" #define ZSTD_STATIC_LINKING_ONLY +#define ZSTD_DISABLE_DEPRECATE_WARNINGS /* No deprecation warnings, pzstd itself is deprecated + * and uses deprecated functions + */ #include "zstd.h" #undef ZSTD_STATIC_LINKING_ONLY @@ -41,7 +44,7 @@ class SharedState { auto parameters = options.determineParameters(); cStreamPool.reset(new ResourcePool{ [this, parameters]() -> ZSTD_CStream* { - this->log(VERBOSE, "%s\n", "Creating new ZSTD_CStream"); + this->log(kLogVerbose, "%s\n", "Creating new ZSTD_CStream"); auto zcs = ZSTD_createCStream(); if (zcs) { auto err = ZSTD_initCStream_advanced( @@ -59,7 +62,7 @@ class SharedState { } else { dStreamPool.reset(new ResourcePool{ [this]() -> ZSTD_DStream* { - this->log(VERBOSE, "%s\n", "Creating new ZSTD_DStream"); + this->log(kLogVerbose, "%s\n", "Creating new ZSTD_DStream"); auto zds = ZSTD_createDStream(); if (zds) { auto err = ZSTD_initDStream(zds); diff --git a/contrib/pzstd/README.md b/contrib/pzstd/README.md index 84d94581583..bc8f831f82c 100644 --- a/contrib/pzstd/README.md +++ b/contrib/pzstd/README.md @@ -31,7 +31,7 @@ If this number is not suitable, during compilation you can define `PZSTD_NUM_THR ## Benchmarks -As a reference, PZstandard and Pigz were compared on an Intel Core i7 @ 3.1 GHz, each using 4 threads, with the [Silesia compression corpus](http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia). +As a reference, PZstandard and Pigz were compared on an Intel Core i7 @ 3.1 GHz, each using 4 threads, with the [Silesia compression corpus](https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia). Compression Speed vs Ratio with 4 Threads | Decompression Speed with 4 Threads ------------------------------------------|----------------------------------- diff --git a/contrib/pzstd/SkippableFrame.cpp b/contrib/pzstd/SkippableFrame.cpp index 769866dfc81..3bea4eb65bf 100644 --- a/contrib/pzstd/SkippableFrame.cpp +++ b/contrib/pzstd/SkippableFrame.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/SkippableFrame.h b/contrib/pzstd/SkippableFrame.h index 60deed0405b..817415e9231 100644 --- a/contrib/pzstd/SkippableFrame.h +++ b/contrib/pzstd/SkippableFrame.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/main.cpp b/contrib/pzstd/main.cpp index b93f043b16b..422b4a56a71 100644 --- a/contrib/pzstd/main.cpp +++ b/contrib/pzstd/main.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/test/OptionsTest.cpp b/contrib/pzstd/test/OptionsTest.cpp index e601148255d..91e39750d0c 100644 --- a/contrib/pzstd/test/OptionsTest.cpp +++ b/contrib/pzstd/test/OptionsTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/test/PzstdTest.cpp b/contrib/pzstd/test/PzstdTest.cpp index 5c7d6631080..3249f860af0 100644 --- a/contrib/pzstd/test/PzstdTest.cpp +++ b/contrib/pzstd/test/PzstdTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -7,9 +7,7 @@ * in the COPYING file in the root directory of this source tree). */ #include "Pzstd.h" -extern "C" { #include "datagen.h" -} #include "test/RoundTrip.h" #include "utils/ScopeGuard.h" diff --git a/contrib/pzstd/test/RoundTrip.h b/contrib/pzstd/test/RoundTrip.h index c6364ecb422..f777622a393 100644 --- a/contrib/pzstd/test/RoundTrip.h +++ b/contrib/pzstd/test/RoundTrip.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/test/RoundTripTest.cpp b/contrib/pzstd/test/RoundTripTest.cpp index 36af0673ae6..27c5028e880 100644 --- a/contrib/pzstd/test/RoundTripTest.cpp +++ b/contrib/pzstd/test/RoundTripTest.cpp @@ -1,14 +1,12 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). */ -extern "C" { #include "datagen.h" -} #include "Options.h" #include "test/RoundTrip.h" #include "utils/ScopeGuard.h" diff --git a/contrib/pzstd/utils/Buffer.h b/contrib/pzstd/utils/Buffer.h index f69c3b4d9f7..a85f770ba18 100644 --- a/contrib/pzstd/utils/Buffer.h +++ b/contrib/pzstd/utils/Buffer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -45,7 +45,7 @@ class Buffer { : buffer_(buffer), range_(data) {} Buffer(Buffer&&) = default; - Buffer& operator=(Buffer&&) & = default; + Buffer& operator=(Buffer&&) = default; /** * Splits the data into two pieces: [begin, begin + n), [begin + n, end). diff --git a/contrib/pzstd/utils/FileSystem.h b/contrib/pzstd/utils/FileSystem.h index 3cfbe86e507..8d57d05f0fa 100644 --- a/contrib/pzstd/utils/FileSystem.h +++ b/contrib/pzstd/utils/FileSystem.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,16 +8,18 @@ */ #pragma once +#include "utils/Portability.h" #include "utils/Range.h" #include #include #include +#include #include // A small subset of `std::filesystem`. // `std::filesystem` should be a drop in replacement. -// See http://en.cppreference.com/w/cpp/filesystem for documentation. +// See https://en.cppreference.com/w/cpp/filesystem for documentation. namespace pzstd { @@ -28,7 +30,7 @@ typedef struct ::_stat64 file_status; typedef struct ::stat file_status; #endif -/// http://en.cppreference.com/w/cpp/filesystem/status +/// https://en.cppreference.com/w/cpp/filesystem/status inline file_status status(StringPiece path, std::error_code& ec) noexcept { file_status status; #if defined(_MSC_VER) @@ -44,7 +46,7 @@ inline file_status status(StringPiece path, std::error_code& ec) noexcept { return status; } -/// http://en.cppreference.com/w/cpp/filesystem/is_regular_file +/// https://en.cppreference.com/w/cpp/filesystem/is_regular_file inline bool is_regular_file(file_status status) noexcept { #if defined(S_ISREG) return S_ISREG(status.st_mode); @@ -55,12 +57,12 @@ inline bool is_regular_file(file_status status) noexcept { #endif } -/// http://en.cppreference.com/w/cpp/filesystem/is_regular_file +/// https://en.cppreference.com/w/cpp/filesystem/is_regular_file inline bool is_regular_file(StringPiece path, std::error_code& ec) noexcept { return is_regular_file(status(path, ec)); } -/// http://en.cppreference.com/w/cpp/filesystem/is_directory +/// https://en.cppreference.com/w/cpp/filesystem/is_directory inline bool is_directory(file_status status) noexcept { #if defined(S_ISDIR) return S_ISDIR(status.st_mode); @@ -71,22 +73,22 @@ inline bool is_directory(file_status status) noexcept { #endif } -/// http://en.cppreference.com/w/cpp/filesystem/is_directory +/// https://en.cppreference.com/w/cpp/filesystem/is_directory inline bool is_directory(StringPiece path, std::error_code& ec) noexcept { return is_directory(status(path, ec)); } -/// http://en.cppreference.com/w/cpp/filesystem/file_size +/// https://en.cppreference.com/w/cpp/filesystem/file_size inline std::uintmax_t file_size( StringPiece path, std::error_code& ec) noexcept { auto stat = status(path, ec); if (ec) { - return -1; + return std::numeric_limits::max(); } if (!is_regular_file(stat)) { ec.assign(ENOTSUP, std::generic_category()); - return -1; + return std::numeric_limits::max(); } ec.clear(); return stat.st_size; diff --git a/contrib/pzstd/utils/Likely.h b/contrib/pzstd/utils/Likely.h index 7cea8da2771..52243a64eab 100644 --- a/contrib/pzstd/utils/Likely.h +++ b/contrib/pzstd/utils/Likely.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/Portability.h b/contrib/pzstd/utils/Portability.h new file mode 100644 index 00000000000..ef1f86e51f6 --- /dev/null +++ b/contrib/pzstd/utils/Portability.h @@ -0,0 +1,16 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +#pragma once + +#include + +// Required for windows, which defines min/max, but we want the std:: version. +#undef min +#undef max diff --git a/contrib/pzstd/utils/Range.h b/contrib/pzstd/utils/Range.h index fedb5d786c6..0fd8f9f8655 100644 --- a/contrib/pzstd/utils/Range.h +++ b/contrib/pzstd/utils/Range.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). */ - + /** * A subset of `folly/Range.h`. * All code copied verbatim modulo formatting @@ -14,7 +14,9 @@ #pragma once #include "utils/Likely.h" +#include "utils/Portability.h" +#include #include #include #include @@ -83,8 +85,8 @@ class Range { Range(const Range&) = default; Range(Range&&) = default; - Range& operator=(const Range&) & = default; - Range& operator=(Range&&) & = default; + Range& operator=(const Range&) = default; + Range& operator=(Range&&) = default; constexpr size_type size() const { return e_ - b_; diff --git a/contrib/pzstd/utils/ResourcePool.h b/contrib/pzstd/utils/ResourcePool.h index 8dfcdd76590..7c4bb623580 100644 --- a/contrib/pzstd/utils/ResourcePool.h +++ b/contrib/pzstd/utils/ResourcePool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/ScopeGuard.h b/contrib/pzstd/utils/ScopeGuard.h index 31768f43d22..911fd984214 100644 --- a/contrib/pzstd/utils/ScopeGuard.h +++ b/contrib/pzstd/utils/ScopeGuard.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,7 +15,7 @@ namespace pzstd { /** * Dismissable scope guard. * `Function` must be callable and take no parameters. - * Unless `dissmiss()` is called, the callable is executed upon destruction of + * Unless `dismiss()` is called, the callable is executed upon destruction of * `ScopeGuard`. * * Example: diff --git a/contrib/pzstd/utils/ThreadPool.h b/contrib/pzstd/utils/ThreadPool.h index 8ece8e0da4e..a087d7c1cff 100644 --- a/contrib/pzstd/utils/ThreadPool.h +++ b/contrib/pzstd/utils/ThreadPool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/WorkQueue.h b/contrib/pzstd/utils/WorkQueue.h index 1d14d922c64..07842e59817 100644 --- a/contrib/pzstd/utils/WorkQueue.h +++ b/contrib/pzstd/utils/WorkQueue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -115,13 +115,14 @@ class WorkQueue { } /** - * Promise that `push()` won't be called again, so once the queue is empty - * there will never any more work. + * Promise that either the reader side or the writer side is done. + * If the writer is done, `push()` won't be called again, so once the queue + * is empty there will never be any more work. If the reader is done, `pop()` + * won't be called again, so further items pushed will just be ignored. */ void finish() { { std::lock_guard lock(mutex_); - assert(!done_); done_ = true; } readerCv_.notify_all(); diff --git a/contrib/pzstd/utils/test/BufferTest.cpp b/contrib/pzstd/utils/test/BufferTest.cpp index fbba74e8262..58bf08dcd69 100644 --- a/contrib/pzstd/utils/test/BufferTest.cpp +++ b/contrib/pzstd/utils/test/BufferTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/RangeTest.cpp b/contrib/pzstd/utils/test/RangeTest.cpp index 755b50fa6e8..8b7dee27193 100644 --- a/contrib/pzstd/utils/test/RangeTest.cpp +++ b/contrib/pzstd/utils/test/RangeTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/ResourcePoolTest.cpp b/contrib/pzstd/utils/test/ResourcePoolTest.cpp index 6fe145180be..750ee084b07 100644 --- a/contrib/pzstd/utils/test/ResourcePoolTest.cpp +++ b/contrib/pzstd/utils/test/ResourcePoolTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/ScopeGuardTest.cpp b/contrib/pzstd/utils/test/ScopeGuardTest.cpp index 7bc624da79b..0f77cdf38b7 100644 --- a/contrib/pzstd/utils/test/ScopeGuardTest.cpp +++ b/contrib/pzstd/utils/test/ScopeGuardTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/ThreadPoolTest.cpp b/contrib/pzstd/utils/test/ThreadPoolTest.cpp index 703fd4c9ca1..a01052e605f 100644 --- a/contrib/pzstd/utils/test/ThreadPoolTest.cpp +++ b/contrib/pzstd/utils/test/ThreadPoolTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/WorkQueueTest.cpp b/contrib/pzstd/utils/test/WorkQueueTest.cpp index 14cf77304f2..16600bb6031 100644 --- a/contrib/pzstd/utils/test/WorkQueueTest.cpp +++ b/contrib/pzstd/utils/test/WorkQueueTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/recovery/Makefile b/contrib/recovery/Makefile new file mode 100644 index 00000000000..be6ea4b0e9c --- /dev/null +++ b/contrib/recovery/Makefile @@ -0,0 +1,35 @@ +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ + +.PHONY: all +all: recover_directory + +ZSTDLIBDIR ?= ../../lib +PROGRAMDIR ?= ../../programs + +CFLAGS ?= -O3 +CFLAGS += -I$(ZSTDLIBDIR) -I$(PROGRAMDIR) +CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum \ + -Wstrict-prototypes -Wundef \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls -Wmissing-prototypes +CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) +FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) + +.PHONY: $(ZSTDLIBDIR)/libzstd.a +$(ZSTDLIBDIR)/libzstd.a: + $(MAKE) -C $(ZSTDLIBDIR) libzstd.a + +recover_directory: recover_directory.c $(ZSTDLIBDIR)/libzstd.a $(PROGRAMDIR)/util.c + $(CC) $(FLAGS) $^ -o $@$(EXT) + +.PHONY: clean +clean: + rm -f recover_directory diff --git a/contrib/recovery/recover_directory.c b/contrib/recovery/recover_directory.c new file mode 100644 index 00000000000..b9bd7ab4996 --- /dev/null +++ b/contrib/recovery/recover_directory.c @@ -0,0 +1,152 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include +#include +#include + +#define ZSTD_STATIC_LINKING_ONLY +#include "util.h" +#include "zstd.h" + +#define CHECK(cond, ...) \ + do { \ + if (!(cond)) { \ + fprintf(stderr, "%s:%d CHECK(%s) failed: ", __FILE__, __LINE__, #cond); \ + fprintf(stderr, "" __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(1); \ + } \ + } while (0) + +static void usage(char const *program) { + fprintf(stderr, "USAGE: %s FILE.zst PREFIX\n", program); + fprintf(stderr, "FILE.zst: A zstd compressed file with multiple frames\n"); + fprintf(stderr, "PREFIX: The output prefix. Uncompressed files will be " + "created named ${PREFIX}0 ${PREFIX}1...\n\n"); + fprintf(stderr, "This program takes concatenated zstd frames and " + "decompresses them into individual files.\n"); + fprintf(stderr, "E.g. files created with a command like: zstd -r directory " + "-o file.zst\n"); +} + +typedef struct { + char *data; + size_t size; + size_t frames; + size_t maxFrameSize; +} ZstdFrames; + +static ZstdFrames readFile(char const *fileName) { + U64 const fileSize = UTIL_getFileSize(fileName); + CHECK(fileSize != UTIL_FILESIZE_UNKNOWN, "Unknown file size!"); + + char *const data = (char *)malloc(fileSize); + CHECK(data != NULL, "Allocation failed"); + + FILE *file = fopen(fileName, "rb"); + CHECK(file != NULL, "fopen failed"); + + size_t const readSize = fread(data, 1, fileSize, file); + CHECK(readSize == fileSize, "fread failed"); + + fclose(file); + ZstdFrames frames; + frames.data = (char *)data; + frames.size = fileSize; + frames.frames = 0; + + size_t index; + size_t maxFrameSize = 0; + for (index = 0; index < fileSize;) { + size_t const frameSize = + ZSTD_findFrameCompressedSize(data + index, fileSize - index); + CHECK(!ZSTD_isError(frameSize), "Bad zstd frame: %s", + ZSTD_getErrorName(frameSize)); + if (frameSize > maxFrameSize) + maxFrameSize = frameSize; + frames.frames += 1; + index += frameSize; + } + CHECK(index == fileSize, "Zstd file corrupt!"); + frames.maxFrameSize = maxFrameSize; + + return frames; +} + +static int computePadding(size_t numFrames) { + return snprintf(NULL, 0, "%u", (unsigned)numFrames); +} + +int main(int argc, char **argv) { + if (argc != 3) { + usage(argv[0]); + exit(1); + } + char const *const zstdFile = argv[1]; + char const *const prefix = argv[2]; + + ZstdFrames frames = readFile(zstdFile); + + if (frames.frames <= 1) { + fprintf( + stderr, + "%s only has %u zstd frame. Simply use `zstd -d` to decompress it.\n", + zstdFile, (unsigned)frames.frames); + exit(1); + } + + int const padding = computePadding(frames.frames - 1); + + size_t const outFileNameSize = strlen(prefix) + padding + 1; + char* outFileName = malloc(outFileNameSize); + CHECK(outFileName != NULL, "Allocation failure"); + + size_t const bufferSize = 128 * 1024; + void *buffer = malloc(bufferSize); + CHECK(buffer != NULL, "Allocation failure"); + + ZSTD_DCtx* dctx = ZSTD_createDCtx(); + CHECK(dctx != NULL, "Allocation failure"); + + fprintf(stderr, "Recovering %u files...\n", (unsigned)frames.frames); + + size_t index; + size_t frame = 0; + for (index = 0; index < frames.size; ++frame) { + size_t const frameSize = + ZSTD_findFrameCompressedSize(frames.data + index, frames.size - index); + + int const ret = snprintf(outFileName, outFileNameSize, "%s%0*u", prefix, padding, (unsigned)frame); + CHECK(ret >= 0 && (size_t)ret <= outFileNameSize, "snprintf failed!"); + + FILE* outFile = fopen(outFileName, "wb"); + CHECK(outFile != NULL, "fopen failed"); + + ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only); + ZSTD_inBuffer in = {frames.data + index, frameSize, 0}; + while (in.pos < in.size) { + ZSTD_outBuffer out = {buffer, bufferSize, 0}; + CHECK(!ZSTD_isError(ZSTD_decompressStream(dctx, &out, &in)), "decompression failed"); + size_t const writeSize = fwrite(out.dst, 1, out.pos, outFile); + CHECK(writeSize == out.pos, "fwrite failed"); + } + fclose(outFile); + fprintf(stderr, "Recovered %s\n", outFileName); + index += frameSize; + } + fprintf(stderr, "Complete\n"); + + free(outFileName); + ZSTD_freeDCtx(dctx); + free(buffer); + free(frames.data); + return 0; +} diff --git a/contrib/seekable_format/README.md b/contrib/seekable_format/README.md new file mode 100644 index 00000000000..fedf96bab4d --- /dev/null +++ b/contrib/seekable_format/README.md @@ -0,0 +1,42 @@ +# Zstandard Seekable Format + +The seekable format splits compressed data into a series of independent "frames", +each compressed individually, +so that decompression of a section in the middle of an archive +only requires zstd to decompress at most a frame's worth of extra data, +instead of the entire archive. + +The frames are appended, so that the decompression of the entire payload +still regenerates the original content, using any compliant zstd decoder. + +On top of that, the seekable format generates a jump table, +which makes it possible to jump directly to the position of the relevant frame +when requesting only a segment of the data. +The jump table is simply ignored by zstd decoders unaware of the seekable format. + +The format is delivered with an API to create seekable archives +and to retrieve arbitrary segments inside the archive. + +### Maximum Frame Size parameter + +When creating a seekable archive, the main parameter is the maximum frame size. + +At compression time, user can manually select the boundaries between segments, +but they don't have to: long segments will be automatically split +when larger than selected maximum frame size. + +Small frame sizes reduce decompression cost when requesting small segments, +because the decoder will nonetheless have to decompress an entire frame +to recover just a single byte from it. + +A good rule of thumb is to select a maximum frame size roughly equivalent +to the access pattern when it's known. +For example, if the application tends to request 4KB blocks, +then it's a good idea to set a maximum frame size in the vicinity of 4 KB. + +But small frame sizes also reduce compression ratio, +and increase the cost for the jump table, +so there is a balance to find. + +In general, try to avoid really tiny frame sizes (<1 KB), +which would have a large negative impact on compression ratio. diff --git a/contrib/seekable_format/examples/.gitignore b/contrib/seekable_format/examples/.gitignore index df2f9ab0749..0b83f5e11c6 100644 --- a/contrib/seekable_format/examples/.gitignore +++ b/contrib/seekable_format/examples/.gitignore @@ -1,4 +1,5 @@ seekable_compression seekable_decompression +seekable_decompression_mem parallel_processing parallel_compression diff --git a/contrib/seekable_format/examples/Makefile b/contrib/seekable_format/examples/Makefile index 6d9562df8b4..f15a4a15a80 100644 --- a/contrib/seekable_format/examples/Makefile +++ b/contrib/seekable_format/examples/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2017-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -11,9 +11,11 @@ ZSTDLIB_PATH = ../../../lib ZSTDLIB_NAME = libzstd.a +# Parallel tools only make sense against multi-threaded libzstd +ZSTDLIB_TARGET = $(ZSTDLIB_NAME)-mt ZSTDLIB = $(ZSTDLIB_PATH)/$(ZSTDLIB_NAME) -CPPFLAGS += -I../ -I../../../lib -I../../../lib/common +CPPFLAGS += -DXXH_NAMESPACE=ZSTD_ -I../ -I../../../lib -I../../../lib/common CFLAGS ?= -O3 CFLAGS += -g @@ -24,10 +26,11 @@ SEEKABLE_OBJS = ../zstdseek_compress.c ../zstdseek_decompress.c $(ZSTDLIB) default: all -all: seekable_compression seekable_decompression parallel_processing +all: seekable_compression seekable_decompression seekable_decompression_mem \ + parallel_processing $(ZSTDLIB): - make -C $(ZSTDLIB_PATH) $(ZSTDLIB_NAME) + $(MAKE) -C $(ZSTDLIB_PATH) $(ZSTDLIB_TARGET) seekable_compression : seekable_compression.c $(SEEKABLE_OBJS) $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ @@ -35,6 +38,9 @@ seekable_compression : seekable_compression.c $(SEEKABLE_OBJS) seekable_decompression : seekable_decompression.c $(SEEKABLE_OBJS) $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ +seekable_decompression_mem : seekable_decompression_mem.c $(SEEKABLE_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ + parallel_processing : parallel_processing.c $(SEEKABLE_OBJS) $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ -pthread @@ -44,5 +50,6 @@ parallel_compression : parallel_compression.c $(SEEKABLE_OBJS) clean: @rm -f core *.o tmp* result* *.zst \ seekable_compression seekable_decompression \ + seekable_decompression_mem \ parallel_processing parallel_compression @echo Cleaning completed diff --git a/contrib/seekable_format/examples/parallel_compression.c b/contrib/seekable_format/examples/parallel_compression.c index 69644d2b3c8..f8a73a36b44 100644 --- a/contrib/seekable_format/examples/parallel_compression.c +++ b/contrib/seekable_format/examples/parallel_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -21,12 +21,13 @@ # define SLEEP(x) usleep(x * 1000) #endif -#define XXH_NAMESPACE ZSTD_ #include "xxhash.h" +#define ZSTD_MULTITHREAD 1 +#include "threading.h" #include "pool.h" // use zstd thread pool for demo -#include "zstd_seekable.h" +#include "../zstd_seekable.h" static void* malloc_orDie(size_t size) { @@ -73,127 +74,166 @@ static size_t fclose_orDie(FILE* file) exit(6); } -static void fseek_orDie(FILE* file, long int offset, int origin) -{ - if (!fseek(file, offset, origin)) { - if (!fflush(file)) return; - } - /* error */ - perror("fseek"); - exit(7); -} - -static long int ftell_orDie(FILE* file) -{ - long int off = ftell(file); - if (off != -1) return off; - /* error */ - perror("ftell"); - exit(8); -} +struct state { + FILE* fout; + ZSTD_pthread_mutex_t mutex; + size_t nextID; + struct job* pending; + ZSTD_frameLog* frameLog; + const int compressionLevel; +}; struct job { - const void* src; + size_t id; + struct job* next; + struct state* state; + + void* src; size_t srcSize; void* dst; size_t dstSize; unsigned checksum; - - int compressionLevel; - int done; }; -static void compressFrame(void* opaque) +static void addPending_inmutex(struct state* state, struct job* job) { - struct job* job = opaque; + struct job** p = &state->pending; + while (*p && (*p)->id < job->id) + p = &(*p)->next; + job->next = *p; + *p = job; +} - job->checksum = XXH64(job->src, job->srcSize, 0); +static void flushFrame(struct state* state, struct job* job) +{ + fwrite_orDie(job->dst, job->dstSize, state->fout); + free(job->dst); - size_t ret = ZSTD_compress(job->dst, job->dstSize, job->src, job->srcSize, job->compressionLevel); + size_t ret = ZSTD_seekable_logFrame(state->frameLog, (unsigned)job->dstSize, (unsigned)job->srcSize, job->checksum); if (ZSTD_isError(ret)) { - fprintf(stderr, "ZSTD_compress() error : %s \n", ZSTD_getErrorName(ret)); - exit(20); + fprintf(stderr, "ZSTD_seekable_logFrame() error : %s \n", ZSTD_getErrorName(ret)); + exit(12); } +} - job->dstSize = ret; - job->done = 1; +static void flushPending_inmutex(struct state* state) +{ + while (state->pending && state->pending->id == state->nextID) { + struct job* p = state->pending; + state->pending = p->next; + flushFrame(state, p); + free(p); + state->nextID++; + } } -static void compressFile_orDie(const char* fname, const char* outName, int cLevel, unsigned frameSize, int nbThreads) +static void finishFrame(struct job* job) { - POOL_ctx* pool = POOL_create(nbThreads, nbThreads); - if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); } + struct state *state = job->state; + ZSTD_pthread_mutex_lock(&state->mutex); + addPending_inmutex(state, job); + flushPending_inmutex(state); + ZSTD_pthread_mutex_unlock(&state->mutex); +} - FILE* const fin = fopen_orDie(fname, "rb"); - FILE* const fout = fopen_orDie(outName, "wb"); +static void compressFrame(void* opaque) +{ + struct job* job = opaque; - if (ZSTD_compressBound(frameSize) > 0xFFFFFFFFU) { fprintf(stderr, "Frame size too large \n"); exit(10); } - unsigned dstSize = ZSTD_compressBound(frameSize); + job->checksum = (unsigned)XXH64(job->src, job->srcSize, 0); + + size_t ret = ZSTD_compress(job->dst, job->dstSize, job->src, job->srcSize, job->state->compressionLevel); + if (ZSTD_isError(ret)) { + fprintf(stderr, "ZSTD_compress() error : %s \n", ZSTD_getErrorName(ret)); + exit(20); + } + job->dstSize = ret; + // No longer need + free(job->src); + job->src = NULL; + + finishFrame(job); +} - fseek_orDie(fin, 0, SEEK_END); - long int length = ftell_orDie(fin); - fseek_orDie(fin, 0, SEEK_SET); +static const char* createOutFilename_orDie(const char* filename) +{ + size_t const inL = strlen(filename); + size_t const outL = inL + 5; + void* outSpace = malloc_orDie(outL); + memset(outSpace, 0, outL); + strcat(outSpace, filename); + strcat(outSpace, ".zst"); + return (const char*)outSpace; +} - size_t numFrames = (length + frameSize - 1) / frameSize; +static void openInOut_orDie(const char* fname, FILE** fin, FILE** fout) { + if (strcmp(fname, "-") == 0) { + *fin = stdin; + *fout = stdout; + } else { + *fin = fopen_orDie(fname, "rb"); + const char* outName = createOutFilename_orDie(fname); + *fout = fopen_orDie(outName, "wb"); + } +} - struct job* jobs = malloc_orDie(sizeof(struct job) * numFrames); +static void compressFile_orDie(const char* fname, int cLevel, unsigned frameSize, size_t nbThreads) +{ + struct state state = { + .nextID = 0, + .pending = NULL, + .compressionLevel = cLevel, + }; + ZSTD_pthread_mutex_init(&state.mutex, NULL); + state.frameLog = ZSTD_seekable_createFrameLog(1); + if (state.frameLog == NULL) { fprintf(stderr, "ZSTD_seekable_createFrameLog() failed \n"); exit(11); } - size_t i; - for(i = 0; i < numFrames; i++) { - void* in = malloc_orDie(frameSize); - void* out = malloc_orDie(dstSize); + POOL_ctx* pool = POOL_create(nbThreads, nbThreads); + if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); } - size_t inSize = fread_orDie(in, frameSize, fin); + FILE* fin; + openInOut_orDie(fname, &fin, &state.fout); - jobs[i].src = in; - jobs[i].srcSize = inSize; - jobs[i].dst = out; - jobs[i].dstSize = dstSize; - jobs[i].compressionLevel = cLevel; - jobs[i].done = 0; - POOL_add(pool, compressFrame, &jobs[i]); + if (ZSTD_compressBound(frameSize) > 0xFFFFFFFFU) { fprintf(stderr, "Frame size too large \n"); exit(10); } + size_t dstSize = ZSTD_compressBound(frameSize); + + for (size_t id = 0; 1; id++) { + struct job* job = malloc_orDie(sizeof(struct job)); + job->id = id; + job->next = NULL; + job->state = &state; + job->src = malloc_orDie(frameSize); + job->dst = malloc_orDie(dstSize); + job->srcSize = fread_orDie(job->src, frameSize, fin); + job->dstSize = dstSize; + POOL_add(pool, compressFrame, job); + if (feof(fin)) + break; } - ZSTD_frameLog* fl = ZSTD_seekable_createFrameLog(1); - if (fl == NULL) { fprintf(stderr, "ZSTD_seekable_createFrameLog() failed \n"); exit(11); } - for (i = 0; i < numFrames; i++) { - while (!jobs[i].done) SLEEP(5); /* wake up every 5 milliseconds to check */ - fwrite_orDie(jobs[i].dst, jobs[i].dstSize, fout); - free((void*)jobs[i].src); - free(jobs[i].dst); - - size_t ret = ZSTD_seekable_logFrame(fl, jobs[i].dstSize, jobs[i].srcSize, jobs[i].checksum); - if (ZSTD_isError(ret)) { fprintf(stderr, "ZSTD_seekable_logFrame() error : %s \n", ZSTD_getErrorName(ret)); } + POOL_joinJobs(pool); + POOL_free(pool); + if (state.pending) { + fprintf(stderr, "Unexpected leftover output blocks!\n"); + exit(13); } { unsigned char seekTableBuff[1024]; ZSTD_outBuffer out = {seekTableBuff, 1024, 0}; - while (ZSTD_seekable_writeSeekTable(fl, &out) != 0) { - fwrite_orDie(seekTableBuff, out.pos, fout); + while (ZSTD_seekable_writeSeekTable(state.frameLog, &out) != 0) { + fwrite_orDie(seekTableBuff, out.pos, state.fout); out.pos = 0; } - fwrite_orDie(seekTableBuff, out.pos, fout); + fwrite_orDie(seekTableBuff, out.pos, state.fout); } - ZSTD_seekable_freeFrameLog(fl); - free(jobs); - fclose_orDie(fout); + ZSTD_seekable_freeFrameLog(state.frameLog); + fclose_orDie(state.fout); fclose_orDie(fin); } -static const char* createOutFilename_orDie(const char* filename) -{ - size_t const inL = strlen(filename); - size_t const outL = inL + 5; - void* outSpace = malloc_orDie(outL); - memset(outSpace, 0, outL); - strcat(outSpace, filename); - strcat(outSpace, ".zst"); - return (const char*)outSpace; -} - int main(int argc, const char** argv) { const char* const exeName = argv[0]; if (argc!=4) { @@ -205,10 +245,9 @@ int main(int argc, const char** argv) { { const char* const inFileName = argv[1]; unsigned const frameSize = (unsigned)atoi(argv[2]); - int const nbThreads = atoi(argv[3]); + size_t const nbThreads = (size_t)atoi(argv[3]); - const char* const outFileName = createOutFilename_orDie(inFileName); - compressFile_orDie(inFileName, outFileName, 5, frameSize, nbThreads); + compressFile_orDie(inFileName, 5, frameSize, nbThreads); } return 0; diff --git a/contrib/seekable_format/examples/parallel_processing.c b/contrib/seekable_format/examples/parallel_processing.c index 36226b49fd3..2757a9aba07 100644 --- a/contrib/seekable_format/examples/parallel_processing.c +++ b/contrib/seekable_format/examples/parallel_processing.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,7 @@ #define ZSTD_STATIC_LINKING_ONLY #include // presumes zstd library is installed #include -#if defined(WIN32) || defined(_WIN32) +#if defined(_WIN32) # include # define SLEEP(x) Sleep(x) #else @@ -29,7 +29,7 @@ #include "pool.h" // use zstd thread pool for demo -#include "zstd_seekable.h" +#include "../zstd_seekable.h" #define MIN(a, b) ((a) < (b) ? (a) : (b)) @@ -100,13 +100,11 @@ struct sum_job { const char* fname; unsigned long long sum; unsigned frameNb; - int done; }; static void sumFrame(void* opaque) { struct sum_job* job = (struct sum_job*)opaque; - job->done = 0; FILE* const fin = fopen_orDie(job->fname, "rb"); @@ -128,7 +126,6 @@ static void sumFrame(void* opaque) sum += data[i]; } job->sum = sum; - job->done = 1; fclose(fin); ZSTD_seekable_free(seekable); @@ -153,14 +150,14 @@ static void sumFile_orDie(const char* fname, int nbThreads) unsigned fnb; for (fnb = 0; fnb < numFrames; fnb++) { - jobs[fnb] = (struct sum_job){ fname, 0, fnb, 0 }; + jobs[fnb] = (struct sum_job){ fname, 0, fnb }; POOL_add(pool, sumFrame, &jobs[fnb]); } + POOL_joinJobs(pool); unsigned long long total = 0; for (fnb = 0; fnb < numFrames; fnb++) { - while (!jobs[fnb].done) SLEEP(5); /* wake up every 5 milliseconds to check */ total += jobs[fnb].sum; } diff --git a/contrib/seekable_format/examples/seekable_compression.c b/contrib/seekable_format/examples/seekable_compression.c index 9a331a89531..c3d227dd05a 100644 --- a/contrib/seekable_format/examples/seekable_compression.c +++ b/contrib/seekable_format/examples/seekable_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,7 +13,7 @@ #define ZSTD_STATIC_LINKING_ONLY #include // presumes zstd library is installed -#include "zstd_seekable.h" +#include "../zstd_seekable.h" static void* malloc_orDie(size_t size) { @@ -112,20 +112,23 @@ static char* createOutFilename_orDie(const char* filename) return (char*)outSpace; } -int main(int argc, const char** argv) { +#define CLEVEL_DEFAULT 5 +int main(int argc, const char** argv) +{ const char* const exeName = argv[0]; - if (argc!=3) { - printf("wrong arguments\n"); - printf("usage:\n"); - printf("%s FILE FRAME_SIZE\n", exeName); + if (argc<3 || argc>4) { + printf("wrong arguments \n"); + printf("usage: \n"); + printf("%s FILE FRAME_SIZE [LEVEL] \n", exeName); return 1; } { const char* const inFileName = argv[1]; unsigned const frameSize = (unsigned)atoi(argv[2]); + int const cLevel = (argc==4) ? atoi(argv[3]) : CLEVEL_DEFAULT; char* const outFileName = createOutFilename_orDie(inFileName); - compressFile_orDie(inFileName, outFileName, 5, frameSize); + compressFile_orDie(inFileName, outFileName, cLevel, frameSize); free(outFileName); } diff --git a/contrib/seekable_format/examples/seekable_decompression.c b/contrib/seekable_format/examples/seekable_decompression.c index 7050e0fa5c6..7edbca87d0e 100644 --- a/contrib/seekable_format/examples/seekable_decompression.c +++ b/contrib/seekable_format/examples/seekable_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,7 +16,7 @@ #include // presumes zstd library is installed #include -#include "zstd_seekable.h" +#include "../zstd_seekable.h" #define MIN(a, b) ((a) < (b) ? (a) : (b)) @@ -99,6 +99,9 @@ static void decompressFile_orDie(const char* fname, off_t startOffset, off_t end while (startOffset < endOffset) { size_t const result = ZSTD_seekable_decompress(seekable, buffOut, MIN(endOffset - startOffset, buffOutSize), startOffset); + if (!result) { + break; + } if (ZSTD_isError(result)) { fprintf(stderr, "ZSTD_seekable_decompress() error : %s \n", diff --git a/contrib/seekable_format/examples/seekable_decompression_mem.c b/contrib/seekable_format/examples/seekable_decompression_mem.c new file mode 100644 index 00000000000..44a06fbbfb0 --- /dev/null +++ b/contrib/seekable_format/examples/seekable_decompression_mem.c @@ -0,0 +1,147 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + + +#include // malloc, exit +#include // fprintf, perror, feof +#include // strerror +#include // errno +#define ZSTD_STATIC_LINKING_ONLY +#include // presumes zstd library is installed +#include + +#include "zstd_seekable.h" + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + +#define MAX_FILE_SIZE (8 * 1024 * 1024) + +static void* malloc_orDie(size_t size) +{ + void* const buff = malloc(size); + if (buff) return buff; + /* error */ + perror("malloc"); + exit(1); +} + +static void* realloc_orDie(void* ptr, size_t size) +{ + ptr = realloc(ptr, size); + if (ptr) return ptr; + /* error */ + perror("realloc"); + exit(1); +} + +static FILE* fopen_orDie(const char *filename, const char *instruction) +{ + FILE* const inFile = fopen(filename, instruction); + if (inFile) return inFile; + /* error */ + perror(filename); + exit(3); +} + +static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +{ + size_t const readSize = fread(buffer, 1, sizeToRead, file); + if (readSize == sizeToRead) return readSize; /* good */ + if (feof(file)) return readSize; /* good, reached end of file */ + /* error */ + perror("fread"); + exit(4); +} + +static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +{ + size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); + if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ + /* error */ + perror("fwrite"); + exit(5); +} + +static size_t fclose_orDie(FILE* file) +{ + if (!fclose(file)) return 0; + /* error */ + perror("fclose"); + exit(6); +} + +static void fseek_orDie(FILE* file, long int offset, int origin) { + if (!fseek(file, offset, origin)) { + if (!fflush(file)) return; + } + /* error */ + perror("fseek"); + exit(7); +} + + +static void decompressFile_orDie(const char* fname, off_t startOffset, off_t endOffset) +{ + FILE* const fin = fopen_orDie(fname, "rb"); + FILE* const fout = stdout; + // Just for demo purposes, assume file is <= MAX_FILE_SIZE + void* const buffIn = malloc_orDie(MAX_FILE_SIZE); + size_t const inSize = fread_orDie(buffIn, MAX_FILE_SIZE, fin); + size_t const buffOutSize = ZSTD_DStreamOutSize(); /* Guarantee to successfully flush at least one complete compressed block in all circumstances. */ + void* const buffOut = malloc_orDie(buffOutSize); + + ZSTD_seekable* const seekable = ZSTD_seekable_create(); + if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); } + + size_t const initResult = ZSTD_seekable_initBuff(seekable, buffIn, inSize); + if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); } + + while (startOffset < endOffset) { + size_t const result = ZSTD_seekable_decompress(seekable, buffOut, MIN(endOffset - startOffset, buffOutSize), startOffset); + if (!result) { + break; + } + + if (ZSTD_isError(result)) { + fprintf(stderr, "ZSTD_seekable_decompress() error : %s \n", + ZSTD_getErrorName(result)); + exit(12); + } + fwrite_orDie(buffOut, result, fout); + startOffset += result; + } + + ZSTD_seekable_free(seekable); + fclose_orDie(fin); + fclose_orDie(fout); + free(buffIn); + free(buffOut); +} + + +int main(int argc, const char** argv) +{ + const char* const exeName = argv[0]; + + if (argc!=4) { + fprintf(stderr, "wrong arguments\n"); + fprintf(stderr, "usage:\n"); + fprintf(stderr, "%s FILE START END\n", exeName); + return 1; + } + + { + const char* const inFilename = argv[1]; + off_t const startOffset = atoll(argv[2]); + off_t const endOffset = atoll(argv[3]); + decompressFile_orDie(inFilename, startOffset, endOffset); + } + + return 0; +} diff --git a/contrib/seekable_format/tests/.gitignore b/contrib/seekable_format/tests/.gitignore new file mode 100644 index 00000000000..b59d08b342d --- /dev/null +++ b/contrib/seekable_format/tests/.gitignore @@ -0,0 +1,2 @@ +seekable_tests +data.txt diff --git a/contrib/seekable_format/tests/Makefile b/contrib/seekable_format/tests/Makefile new file mode 100644 index 00000000000..a23c0f43c35 --- /dev/null +++ b/contrib/seekable_format/tests/Makefile @@ -0,0 +1,58 @@ +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ + +# This Makefile presumes libzstd is built, using `make` in / or /lib/ + +ZSTDLIB_PATH = ../../../lib +ZSTDLIB_NAME = libzstd.a +ZSTDLIB = $(ZSTDLIB_PATH)/$(ZSTDLIB_NAME) + +CPPFLAGS += -DXXH_NAMESPACE=ZSTD_ -I../ -I$(ZSTDLIB_PATH) -I$(ZSTDLIB_PATH)/common + +CFLAGS ?= -O3 +CFLAGS += -g -Wall -Wextra -Wcast-qual -Wcast-align -Wconversion \ + -Wformat=2 -Wstrict-aliasing=1 + +SEEKABLE_OBJS = ../zstdseek_compress.c ../zstdseek_decompress.c $(ZSTDLIB) + +.PHONY: default clean test +default: test + +test: seekable_tests parallel_compression_test + ./seekable_tests + +$(ZSTDLIB): + $(MAKE) -C $(ZSTDLIB_PATH) $(ZSTDLIB_NAME) + +seekable_tests : $(SEEKABLE_OBJS) + +EXAMPLES_PATH = ../examples +PARALLEL_COMPRESSION = $(EXAMPLES_PATH)/parallel_compression + +DATAGEN_PATH = ../../../tests +DATAGEN = $(DATAGEN_PATH)/datagen + +$(PARALLEL_COMPRESSION): + $(MAKE) -C $(EXAMPLES_PATH) parallel_compression + +$(DATAGEN): + $(MAKE) -C $(DATAGEN_PATH) datagen + +data.txt: $(DATAGEN) + $(DATAGEN) -g100M > $@ + +parallel_compression_test: $(PARALLEL_COMPRESSION) data.txt + ulimit -Sv 102400; $(PARALLEL_COMPRESSION) data.txt 1048576 2 + +.PHONY: parallel_compression_test parallel_comp + +clean: + @$(RM) core *.o tmp* result* *.zst \ + seekable_tests data.txt + @echo Cleaning completed diff --git a/contrib/seekable_format/tests/seekable_tests.c b/contrib/seekable_format/tests/seekable_tests.c new file mode 100644 index 00000000000..809ea6425eb --- /dev/null +++ b/contrib/seekable_format/tests/seekable_tests.c @@ -0,0 +1,375 @@ +#include +#include +#include // malloc +#include +#include +#include + +#include "../zstd_seekable.h" + + +/* ZSTD_seekable_customFile implementation that reads/seeks a buffer while keeping track of total bytes read */ +typedef struct { + const void *ptr; + size_t size; + size_t pos; + size_t totalRead; +} buffWrapperWithTotal_t; + +static int readBuffWithTotal(void* opaque, void* buffer, size_t n) +{ + buffWrapperWithTotal_t* const buff = (buffWrapperWithTotal_t*)opaque; + assert(buff != NULL); + if (buff->pos + n > buff->size) return -1; + memcpy(buffer, (const char*)buff->ptr + buff->pos, n); + buff->pos += n; + buff->totalRead += n; + return 0; +} + +static int seekBuffWithTotal(void* opaque, long long offset, int origin) +{ + buffWrapperWithTotal_t* const buff = (buffWrapperWithTotal_t*) opaque; + unsigned long long newOffset; + assert(buff != NULL); + switch (origin) { + case SEEK_SET: + assert(offset >= 0); + newOffset = (unsigned long long)offset; + break; + case SEEK_CUR: + newOffset = (unsigned long long)((long long)buff->pos + offset); + break; + case SEEK_END: + newOffset = (unsigned long long)((long long)buff->size + offset); + break; + default: + assert(0); /* not possible */ + } + if (newOffset > buff->size) { + return -1; + } + buff->pos = newOffset; + return 0; +} + +/* Basic unit tests for zstd seekable format */ +int main(int argc, const char** argv) +{ + unsigned testNb = 1; + (void)argc; (void)argv; + printf("Beginning zstd seekable format tests...\n"); + + printf("Test %u - simple round trip: ", testNb++); + { size_t const inSize = 4000; + void* const inBuffer = malloc(inSize); + assert(inBuffer != NULL); + + size_t const seekCapacity = 5000; + void* const seekBuffer = malloc(seekCapacity); + assert(seekBuffer != NULL); + size_t seekSize; + + size_t const outCapacity = inSize; + void* const outBuffer = malloc(outCapacity); + assert(outBuffer != NULL); + + ZSTD_seekable_CStream* const zscs = ZSTD_seekable_createCStream(); + assert(zscs != NULL); + + { size_t const initStatus = ZSTD_seekable_initCStream(zscs, 9, 0 /* checksumFlag */, (unsigned)inSize /* maxFrameSize */); + assert(!ZSTD_isError(initStatus)); + } + + { ZSTD_outBuffer outb = { .dst=seekBuffer, .pos=0, .size=seekCapacity }; + ZSTD_inBuffer inb = { .src=inBuffer, .pos=0, .size=inSize }; + + size_t const cStatus = ZSTD_seekable_compressStream(zscs, &outb, &inb); + assert(!ZSTD_isError(cStatus)); + assert(inb.pos == inb.size); + + size_t const endStatus = ZSTD_seekable_endStream(zscs, &outb); + assert(!ZSTD_isError(endStatus)); + seekSize = outb.pos; + } + + ZSTD_seekable* const stream = ZSTD_seekable_create(); + assert(stream != NULL); + { size_t const initStatus = ZSTD_seekable_initBuff(stream, seekBuffer, seekSize); + assert(!ZSTD_isError(initStatus)); } + + { size_t const decStatus = ZSTD_seekable_decompress(stream, outBuffer, outCapacity, 0); + assert(decStatus == inSize); } + + /* unit test ZSTD_seekTable functions */ + ZSTD_seekTable* const zst = ZSTD_seekTable_create_fromSeekable(stream); + assert(zst != NULL); + + unsigned const nbFrames = ZSTD_seekTable_getNumFrames(zst); + assert(nbFrames > 0); + + unsigned long long const frame0Offset = ZSTD_seekTable_getFrameCompressedOffset(zst, 0); + assert(frame0Offset == 0); + + unsigned long long const content0Offset = ZSTD_seekTable_getFrameDecompressedOffset(zst, 0); + assert(content0Offset == 0); + + size_t const cSize = ZSTD_seekTable_getFrameCompressedSize(zst, 0); + assert(!ZSTD_isError(cSize)); + assert(cSize <= seekCapacity); + + size_t const origSize = ZSTD_seekTable_getFrameDecompressedSize(zst, 0); + assert(origSize == inSize); + + unsigned const fo1idx = ZSTD_seekTable_offsetToFrameIndex(zst, 1); + assert(fo1idx == 0); + + free(inBuffer); + free(seekBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(zscs); + ZSTD_seekTable_free(zst); + ZSTD_seekable_free(stream); + } + printf("Success!\n"); + + + printf("Test %u - check that seekable decompress does not hang: ", testNb++); + { /* Github issue #2335 */ + const size_t compressed_size = 17; + const uint8_t compressed_data[17] = { + '^', + '*', + 'M', + '\x18', + '\t', + '\x00', + '\x00', + '\x00', + '\x00', + '\x00', + '\x00', + '\x00', + (uint8_t)('\x03'), + (uint8_t)('\xb1'), + (uint8_t)('\xea'), + (uint8_t)('\x92'), + (uint8_t)('\x8f'), + }; + const size_t uncompressed_size = 32; + uint8_t uncompressed_data[32]; + + ZSTD_seekable* const stream = ZSTD_seekable_create(); + assert(stream != NULL); + { size_t const status = ZSTD_seekable_initBuff(stream, compressed_data, compressed_size); + if (ZSTD_isError(status)) { + ZSTD_seekable_free(stream); + goto _test_error; + } } + + /* Should return an error, but not hang */ + { const size_t offset = 2; + size_t const status = ZSTD_seekable_decompress(stream, uncompressed_data, uncompressed_size, offset); + if (!ZSTD_isError(status)) { + ZSTD_seekable_free(stream); + goto _test_error; + } } + + ZSTD_seekable_free(stream); + } + printf("Success!\n"); + + printf("Test %u - check #2 that seekable decompress does not hang: ", testNb++); + { /* Github issue #FIXME */ + const size_t compressed_size = 27; + const uint8_t compressed_data[27] = { + (uint8_t)'\x28', + (uint8_t)'\xb5', + (uint8_t)'\x2f', + (uint8_t)'\xfd', + (uint8_t)'\x00', + (uint8_t)'\x32', + (uint8_t)'\x91', + (uint8_t)'\x00', + (uint8_t)'\x00', + (uint8_t)'\x00', + (uint8_t)'\x5e', + (uint8_t)'\x2a', + (uint8_t)'\x4d', + (uint8_t)'\x18', + (uint8_t)'\x09', + (uint8_t)'\x00', + (uint8_t)'\x00', + (uint8_t)'\x00', + (uint8_t)'\x00', + (uint8_t)'\x00', + (uint8_t)'\x00', + (uint8_t)'\x00', + (uint8_t)'\x00', + (uint8_t)'\xb1', + (uint8_t)'\xea', + (uint8_t)'\x92', + (uint8_t)'\x8f', + }; + const size_t uncompressed_size = 400; + uint8_t uncompressed_data[400]; + + ZSTD_seekable* stream = ZSTD_seekable_create(); + size_t status = ZSTD_seekable_initBuff(stream, compressed_data, compressed_size); + if (ZSTD_isError(status)) { + ZSTD_seekable_free(stream); + goto _test_error; + } + + const size_t offset = 2; + /* Should return an error, but not hang */ + status = ZSTD_seekable_decompress(stream, uncompressed_data, uncompressed_size, offset); + if (!ZSTD_isError(status)) { + ZSTD_seekable_free(stream); + goto _test_error; + } + + ZSTD_seekable_free(stream); + } + printf("Success!\n"); + + + printf("Test %u - check ZSTD magic in compressing empty string: ", testNb++); + { // compressing empty string should return a zstd header + size_t const capacity = 255; + char* inBuffer = malloc(capacity); + assert(inBuffer != NULL); + inBuffer[0] = '\0'; + void* const outBuffer = malloc(capacity); + assert(outBuffer != NULL); + + ZSTD_seekable_CStream *s = ZSTD_seekable_createCStream(); + ZSTD_seekable_initCStream(s, 1, 1, 255); + + ZSTD_inBuffer input = { .src=inBuffer, .pos=0, .size=0 }; + ZSTD_outBuffer output = { .dst=outBuffer, .pos=0, .size=capacity }; + + ZSTD_seekable_compressStream(s, &output, &input); + ZSTD_seekable_endStream(s, &output); + + if((((char*)output.dst)[0] != '\x28') | (((char*)output.dst)[1] != '\xb5') | (((char*)output.dst)[2] != '\x2f') | (((char*)output.dst)[3] != '\xfd')) { + printf("%#02x %#02x %#02x %#02x\n", ((char*)output.dst)[0], ((char*)output.dst)[1] , ((char*)output.dst)[2] , ((char*)output.dst)[3] ); + + free(inBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(s); + goto _test_error; + } + + free(inBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(s); + } + printf("Success!\n"); + + + printf("Test %u - multiple decompress calls: ", testNb++); + { char const inBuffer[] = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt"; + size_t const inSize = sizeof(inBuffer); + + size_t const seekCapacity = 5000; + void* const seekBuffer = malloc(seekCapacity); + assert(seekBuffer != NULL); + size_t seekSize; + + size_t const outCapacity = inSize; + char* const outBuffer = malloc(outCapacity); + assert(outBuffer != NULL); + + ZSTD_seekable_CStream* const zscs = ZSTD_seekable_createCStream(); + assert(zscs != NULL); + + /* compress test data with a small frame size to ensure multiple frames in the output */ + unsigned const maxFrameSize = 40; + { size_t const initStatus = ZSTD_seekable_initCStream(zscs, 9, 0 /* checksumFlag */, maxFrameSize); + assert(!ZSTD_isError(initStatus)); + } + + { ZSTD_outBuffer outb = { .dst=seekBuffer, .pos=0, .size=seekCapacity }; + ZSTD_inBuffer inb = { .src=inBuffer, .pos=0, .size=inSize }; + + while (inb.pos < inb.size) { + size_t const cStatus = ZSTD_seekable_compressStream(zscs, &outb, &inb); + assert(!ZSTD_isError(cStatus)); + } + + size_t const endStatus = ZSTD_seekable_endStream(zscs, &outb); + assert(!ZSTD_isError(endStatus)); + seekSize = outb.pos; + } + + ZSTD_seekable* const stream = ZSTD_seekable_create(); + assert(stream != NULL); + buffWrapperWithTotal_t buffWrapper = {seekBuffer, seekSize, 0, 0}; + { ZSTD_seekable_customFile srcFile = {&buffWrapper, &readBuffWithTotal, &seekBuffWithTotal}; + size_t const initStatus = ZSTD_seekable_initAdvanced(stream, srcFile); + assert(!ZSTD_isError(initStatus)); } + + /* Perform a series of small reads and seeks (repeatedly read 1 byte and skip 1 byte) + and check that we didn't reread input data unnecessarily */ + size_t pos; + for (pos = 0; pos < inSize; pos += 2) { + size_t const decStatus = ZSTD_seekable_decompress(stream, outBuffer, 1, pos); + if (decStatus != 1 || outBuffer[0] != inBuffer[pos]) { + free(seekBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(zscs); + ZSTD_seekable_free(stream); + goto _test_error; + } + } + if (buffWrapper.totalRead > seekSize) { + /* We read more than the compressed size, meaning there were some rereads. + This is unneeded because we only seeked forward. */ + printf("Too much data read: %zu read, with compressed size %zu\n", buffWrapper.totalRead, seekSize); + free(seekBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(zscs); + ZSTD_seekable_free(stream); + goto _test_error; + } + + /* Perform some reads and seeks to ensure correctness */ + struct { + size_t offset; + size_t size; + } const tests[] = { /* Assume the frame size is 40 */ + {20, 40}, /* read partial data from two frames */ + {60, 10}, /* continue reading from the same offset */ + {50, 20}, /* seek backward within the same frame */ + {10, 10}, /* seek backward to a different frame */ + {25, 10}, /* seek forward within the same frame */ + {60, 10}, /* seek forward to a different frame */ + }; + size_t idx; + for (idx = 0; idx < sizeof(tests) / sizeof(tests[0]); idx++) { + size_t const decStatus = ZSTD_seekable_decompress(stream, outBuffer, tests[idx].size, tests[idx].offset); + if (decStatus != tests[idx].size || memcmp(outBuffer, inBuffer + tests[idx].offset, tests[idx].size) != 0) { + free(seekBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(zscs); + ZSTD_seekable_free(stream); + goto _test_error; + } + } + + free(seekBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(zscs); + ZSTD_seekable_free(stream); + } + printf("Success!\n"); + + /* TODO: Add more tests */ + printf("Finished tests\n"); + return 0; + +_test_error: + printf("test failed! Exiting..\n"); + return 1; +} diff --git a/contrib/seekable_format/zstd_seekable.h b/contrib/seekable_format/zstd_seekable.h index 7ffd1ba0a72..b1f83d0e0a1 100644 --- a/contrib/seekable_format/zstd_seekable.h +++ b/contrib/seekable_format/zstd_seekable.h @@ -1,13 +1,13 @@ #ifndef SEEKABLE_H #define SEEKABLE_H +#include +#include "zstd.h" /* ZSTDLIB_API */ + #if defined (__cplusplus) extern "C" { #endif -#include -#include "zstd.h" /* ZSTDLIB_API */ - #define ZSTD_seekTableFooterSize 9 @@ -15,8 +15,8 @@ extern "C" { #define ZSTD_SEEKABLE_MAXFRAMES 0x8000000U -/* Limit the maximum size to avoid any potential issues storing the compressed size */ -#define ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE 0x80000000U +/* Limit maximum size to avoid potential issues storing the compressed size */ +#define ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE 0x40000000U /*-**************************************************************************** * Seekable Format @@ -29,6 +29,7 @@ extern "C" { typedef struct ZSTD_seekable_CStream_s ZSTD_seekable_CStream; typedef struct ZSTD_seekable_s ZSTD_seekable; +typedef struct ZSTD_seekTable_s ZSTD_seekTable; /*-**************************************************************************** * Seekable compression - HowTo @@ -47,10 +48,19 @@ typedef struct ZSTD_seekable_s ZSTD_seekable; * * Use ZSTD_seekable_initCStream() to initialize a ZSTD_seekable_CStream object * for a new compression operation. -* `maxFrameSize` indicates the size at which to automatically start a new -* seekable frame. `maxFrameSize == 0` implies the default maximum size. -* `checksumFlag` indicates whether or not the seek table should include frame -* checksums on the uncompressed data for verification. +* - `maxFrameSize` indicates the size at which to automatically start a new +* seekable frame. +* `maxFrameSize == 0` implies the default maximum size. +* Smaller frame sizes allow faster decompression of small segments, +* since retrieving a single byte requires decompression of +* the full frame where the byte belongs. +* In general, size the frames to roughly correspond to +* the access granularity (when it's known). +* But small sizes also reduce compression ratio. +* Avoid really tiny frame sizes (< 1 KB), +* that would hurt compression ratio considerably. +* - `checksumFlag` indicates whether or not the seek table should include frame +* checksums on the uncompressed data for verification. * @return : a size hint for input to provide for compression, or an error code * checkable with ZSTD_isError() * @@ -107,6 +117,7 @@ ZSTDLIB_API size_t ZSTD_seekable_freeFrameLog(ZSTD_frameLog* fl); ZSTDLIB_API size_t ZSTD_seekable_logFrame(ZSTD_frameLog* fl, unsigned compressedSize, unsigned decompressedSize, unsigned checksum); ZSTDLIB_API size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output); + /*-**************************************************************************** * Seekable decompression - HowTo * A ZSTD_seekable object is required to tracking the seekTable. @@ -161,13 +172,42 @@ ZSTDLIB_API size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t ZSTDLIB_API size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned frameIndex); #define ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE (0ULL-2) -/*===== Seek Table access functions =====*/ -ZSTDLIB_API unsigned ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs); -ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex); -ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex); -ZSTDLIB_API size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, unsigned frameIndex); -ZSTDLIB_API size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, unsigned frameIndex); -ZSTDLIB_API unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long offset); +/*===== Seekable seek table access functions =====*/ +ZSTDLIB_API unsigned ZSTD_seekable_getNumFrames(const ZSTD_seekable* zs); +ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameCompressedOffset(const ZSTD_seekable* zs, unsigned frameIndex); +ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameDecompressedOffset(const ZSTD_seekable* zs, unsigned frameIndex); +ZSTDLIB_API size_t ZSTD_seekable_getFrameCompressedSize(const ZSTD_seekable* zs, unsigned frameIndex); +ZSTDLIB_API size_t ZSTD_seekable_getFrameDecompressedSize(const ZSTD_seekable* zs, unsigned frameIndex); +ZSTDLIB_API unsigned ZSTD_seekable_offsetToFrameIndex(const ZSTD_seekable* zs, unsigned long long offset); + + +/*-**************************************************************************** +* Direct exploitation of the seekTable +* +* Memory constrained use cases that manage multiple archives +* benefit from retaining multiple archive seek tables +* without retaining a ZSTD_seekable instance for each. +* +* Below API allow the above-mentioned use cases +* to initialize a ZSTD_seekable, extract its (smaller) ZSTD_seekTable, +* then throw the ZSTD_seekable away to save memory. +* +* Standard ZSTD operations can then be used +* to decompress frames based on seek table offsets. +******************************************************************************/ + +/*===== Independent seek table management =====*/ +ZSTDLIB_API ZSTD_seekTable* ZSTD_seekTable_create_fromSeekable(const ZSTD_seekable* zs); +ZSTDLIB_API size_t ZSTD_seekTable_free(ZSTD_seekTable* st); + +/*===== Direct seek table access functions =====*/ +ZSTDLIB_API unsigned ZSTD_seekTable_getNumFrames(const ZSTD_seekTable* st); +ZSTDLIB_API unsigned long long ZSTD_seekTable_getFrameCompressedOffset(const ZSTD_seekTable* st, unsigned frameIndex); +ZSTDLIB_API unsigned long long ZSTD_seekTable_getFrameDecompressedOffset(const ZSTD_seekTable* st, unsigned frameIndex); +ZSTDLIB_API size_t ZSTD_seekTable_getFrameCompressedSize(const ZSTD_seekTable* st, unsigned frameIndex); +ZSTDLIB_API size_t ZSTD_seekTable_getFrameDecompressedSize(const ZSTD_seekTable* st, unsigned frameIndex); +ZSTDLIB_API unsigned ZSTD_seekTable_offsetToFrameIndex(const ZSTD_seekTable* st, unsigned long long offset); + /*===== Seekable advanced I/O API =====*/ typedef int(ZSTD_seekable_read)(void* opaque, void* buffer, size_t n); diff --git a/contrib/seekable_format/zstd_seekable_compression_format.md b/contrib/seekable_format/zstd_seekable_compression_format.md index bf3080f7bbe..7bd0790e81b 100644 --- a/contrib/seekable_format/zstd_seekable_compression_format.md +++ b/contrib/seekable_format/zstd_seekable_compression_format.md @@ -2,7 +2,7 @@ ### Notices -Copyright (c) 2017-present Facebook, Inc. +Copyright (c) Meta Platforms, Inc. and affiliates. Permission is granted to copy and distribute this document for any purpose and without charge, @@ -53,7 +53,7 @@ __`Frame_Size`__ The total size of the skippable frame, not including the `Skippable_Magic_Number` or `Frame_Size`. This is for compatibility with [Zstandard skippable frames]. -[Zstandard skippable frames]: https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#skippable-frames +[Zstandard skippable frames]: https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#skippable-frames #### `Seek_Table_Footer` The seek table footer format is as follows: diff --git a/contrib/seekable_format/zstdseek_compress.c b/contrib/seekable_format/zstdseek_compress.c index 5a75714fac5..4997807d251 100644 --- a/contrib/seekable_format/zstdseek_compress.c +++ b/contrib/seekable_format/zstdseek_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,13 +12,13 @@ #include #define XXH_STATIC_LINKING_ONLY -#define XXH_NAMESPACE ZSTD_ #include "xxhash.h" #define ZSTD_STATIC_LINKING_ONLY #include "zstd.h" #include "zstd_errors.h" #include "mem.h" + #include "zstd_seekable.h" #define CHECK_Z(f) { size_t const ret = (f); if (ret != 0) return ret; } @@ -63,19 +63,18 @@ struct ZSTD_seekable_CStream_s { int writingSeekTable; }; -size_t ZSTD_seekable_frameLog_allocVec(ZSTD_frameLog* fl) +static size_t ZSTD_seekable_frameLog_allocVec(ZSTD_frameLog* fl) { /* allocate some initial space */ size_t const FRAMELOG_STARTING_CAPACITY = 16; fl->entries = (framelogEntry_t*)malloc( sizeof(framelogEntry_t) * FRAMELOG_STARTING_CAPACITY); if (fl->entries == NULL) return ERROR(memory_allocation); - fl->capacity = FRAMELOG_STARTING_CAPACITY; - + fl->capacity = (U32)FRAMELOG_STARTING_CAPACITY; return 0; } -size_t ZSTD_seekable_frameLog_freeVec(ZSTD_frameLog* fl) +static size_t ZSTD_seekable_frameLog_freeVec(ZSTD_frameLog* fl) { if (fl != NULL) free(fl->entries); return 0; @@ -83,7 +82,7 @@ size_t ZSTD_seekable_frameLog_freeVec(ZSTD_frameLog* fl) ZSTD_frameLog* ZSTD_seekable_createFrameLog(int checksumFlag) { - ZSTD_frameLog* fl = malloc(sizeof(ZSTD_frameLog)); + ZSTD_frameLog* const fl = (ZSTD_frameLog*)malloc(sizeof(ZSTD_frameLog)); if (fl == NULL) return NULL; if (ZSTD_isError(ZSTD_seekable_frameLog_allocVec(fl))) { @@ -106,10 +105,9 @@ size_t ZSTD_seekable_freeFrameLog(ZSTD_frameLog* fl) return 0; } -ZSTD_seekable_CStream* ZSTD_seekable_createCStream() +ZSTD_seekable_CStream* ZSTD_seekable_createCStream(void) { - ZSTD_seekable_CStream* zcs = malloc(sizeof(ZSTD_seekable_CStream)); - + ZSTD_seekable_CStream* const zcs = (ZSTD_seekable_CStream*)malloc(sizeof(ZSTD_seekable_CStream)); if (zcs == NULL) return NULL; memset(zcs, 0, sizeof(*zcs)); @@ -134,7 +132,6 @@ size_t ZSTD_seekable_freeCStream(ZSTD_seekable_CStream* zcs) ZSTD_freeCStream(zcs->cstream); ZSTD_seekable_frameLog_freeVec(&zcs->framelog); free(zcs); - return 0; } @@ -152,9 +149,8 @@ size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs, return ERROR(frameParameter_unsupported); } - zcs->maxFrameSize = maxFrameSize - ? maxFrameSize - : ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE; + zcs->maxFrameSize = maxFrameSize ? + maxFrameSize : ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE; zcs->framelog.checksumFlag = checksumFlag; if (zcs->framelog.checksumFlag) { @@ -180,7 +176,7 @@ size_t ZSTD_seekable_logFrame(ZSTD_frameLog* fl, if (fl->size == fl->capacity) { /* exponential size increase for constant amortized runtime */ size_t const newCapacity = fl->capacity * 2; - framelogEntry_t* const newEntries = realloc(fl->entries, + framelogEntry_t* const newEntries = (framelogEntry_t*)realloc(fl->entries, sizeof(framelogEntry_t) * newCapacity); if (newEntries == NULL) return ERROR(memory_allocation); @@ -204,7 +200,7 @@ size_t ZSTD_seekable_endFrame(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output /* end the frame */ size_t ret = ZSTD_endStream(zcs->cstream, output); - zcs->frameCSize += output->pos - prevOutPos; + zcs->frameCSize += (U32)(output->pos - prevOutPos); /* need to flush before doing the rest */ if (ret) return ret; @@ -223,9 +219,8 @@ size_t ZSTD_seekable_endFrame(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output zcs->frameCSize = 0; zcs->frameDSize = 0; - ZSTD_resetCStream(zcs->cstream, 0); - if (zcs->framelog.checksumFlag) - XXH64_reset(&zcs->xxhState, 0); + ZSTD_CCtx_reset(zcs->cstream, ZSTD_reset_session_only); + if (zcs->framelog.checksumFlag) XXH64_reset(&zcs->xxhState, 0); return 0; } @@ -235,6 +230,8 @@ size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* const BYTE* const inBase = (const BYTE*) input->src + input->pos; size_t inLen = input->size - input->pos; + assert(zcs->maxFrameSize < INT_MAX); + ZSTD_CCtx_setParameter(zcs->cstream, ZSTD_c_srcSizeHint, (int)zcs->maxFrameSize); inLen = MIN(inLen, (size_t)(zcs->maxFrameSize - zcs->frameDSize)); /* if we haven't finished flushing the last frame, don't start writing a new one */ @@ -248,8 +245,8 @@ size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* XXH64_update(&zcs->xxhState, inBase, inTmp.pos); } - zcs->frameCSize += output->pos - prevOutPos; - zcs->frameDSize += inTmp.pos; + zcs->frameCSize += (U32)(output->pos - prevOutPos); + zcs->frameDSize += (U32)inTmp.pos; input->pos += inTmp.pos; @@ -290,7 +287,7 @@ static inline size_t ZSTD_stwrite32(ZSTD_frameLog* fl, memcpy((BYTE*)output->dst + output->pos, tmp + (fl->seekTablePos - offset), lenWrite); output->pos += lenWrite; - fl->seekTablePos += lenWrite; + fl->seekTablePos += (U32)lenWrite; if (lenWrite < 4) return ZSTD_seekable_seekTableSize(fl) - fl->seekTablePos; } @@ -339,8 +336,7 @@ size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output) if (output->size - output->pos < 1) return seekTableLen - fl->seekTablePos; if (fl->seekTablePos < seekTableLen - 4) { - BYTE sfd = 0; - sfd |= (fl->checksumFlag) << 7; + BYTE const sfd = (BYTE)((fl->checksumFlag) << 7); ((BYTE*)output->dst)[output->pos] = sfd; output->pos++; @@ -356,7 +352,7 @@ size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output) size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output) { - if (!zcs->writingSeekTable && zcs->frameDSize) { + if (!zcs->writingSeekTable) { const size_t endFrame = ZSTD_seekable_endFrame(zcs, output); if (ZSTD_isError(endFrame)) return endFrame; /* return an accurate size hint */ diff --git a/contrib/seekable_format/zstdseek_decompress.c b/contrib/seekable_format/zstdseek_decompress.c index a0206592299..ab9088a1061 100644 --- a/contrib/seekable_format/zstdseek_decompress.c +++ b/contrib/seekable_format/zstdseek_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -23,13 +23,67 @@ # endif #endif +/* ************************************************************ +* Detect POSIX version +* PLATFORM_POSIX_VERSION = 0 for non-Unix e.g. Windows +* PLATFORM_POSIX_VERSION = 1 for Unix-like but non-POSIX +* PLATFORM_POSIX_VERSION > 1 is equal to found _POSIX_VERSION +* Value of PLATFORM_POSIX_VERSION can be forced on command line +***************************************************************/ +#ifndef PLATFORM_POSIX_VERSION + +# if (defined(__APPLE__) && defined(__MACH__)) || defined(__SVR4) || defined(_AIX) || defined(__hpux) /* POSIX.1-2001 (SUSv3) conformant */ \ + || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) /* BSD distros */ + /* exception rule : force posix version to 200112L, + * note: it's better to use unistd.h's _POSIX_VERSION whenever possible */ +# define PLATFORM_POSIX_VERSION 200112L + +/* try to determine posix version through official unistd.h's _POSIX_VERSION (https://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). + * note : there is no simple way to know in advance if is present or not on target system, + * Posix specification mandates its presence and its content, but target system must respect this spec. + * It's necessary to _not_ #include whenever target OS is not unix-like + * otherwise it will block preprocessing stage. + * The following list of build macros tries to "guess" if target OS is likely unix-like, and therefore can #include + */ +# elif !defined(_WIN32) \ + && ( defined(__unix__) || defined(__unix) \ + || defined(__midipix__) || defined(__VMS) || defined(__HAIKU__) ) + +# if defined(__linux__) || defined(__linux) || defined(__CYGWIN__) +# ifndef _POSIX_C_SOURCE +# define _POSIX_C_SOURCE 200809L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */ +# endif +# endif +# include /* declares _POSIX_VERSION */ +# if defined(_POSIX_VERSION) /* POSIX compliant */ +# define PLATFORM_POSIX_VERSION _POSIX_VERSION +# else +# define PLATFORM_POSIX_VERSION 1 +# endif + +# ifdef __UCLIBC__ +# ifndef __USE_MISC +# define __USE_MISC /* enable st_mtim on uclibc */ +# endif +# endif + +# else /* non-unix target platform (like Windows) */ +# define PLATFORM_POSIX_VERSION 0 +# endif + +#endif /* PLATFORM_POSIX_VERSION */ + + /* ************************************************************ * Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW ***************************************************************/ -#if defined(_MSC_VER) && _MSC_VER >= 1400 +#if defined(LIBC_NO_FSEEKO) +/* Some older libc implementations don't include these functions (e.g. Bionic < 24) */ +# define LONG_SEEK fseek +#elif defined(_MSC_VER) && _MSC_VER >= 1400 # define LONG_SEEK _fseeki64 #elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ -# define LONG_SEEK fseeko +# define LONG_SEEK fseeko #elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) # define LONG_SEEK fseeko64 #elif defined(_WIN32) && !defined(__DJGPP__) @@ -60,7 +114,6 @@ #include #define XXH_STATIC_LINKING_ONLY -#define XXH_NAMESPACE ZSTD_ #include "xxhash.h" #define ZSTD_STATIC_LINKING_ONLY @@ -79,6 +132,8 @@ #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define ZSTD_SEEKABLE_NO_OUTPUT_PROGRESS_MAX 16 + /* Special-case callbacks for FILE* and in-memory modes, so that we can treat * them the same way as the advanced API */ static int ZSTD_seekable_read_FILE(void* opaque, void* buffer, size_t n) @@ -105,8 +160,9 @@ typedef struct { static int ZSTD_seekable_read_buff(void* opaque, void* buffer, size_t n) { - buffWrapper_t* buff = (buffWrapper_t*) opaque; - if (buff->size + n > buff->pos) return -1; + buffWrapper_t* const buff = (buffWrapper_t*)opaque; + assert(buff != NULL); + if (buff->pos + n > buff->size) return -1; memcpy(buffer, (const BYTE*)buff->ptr + buff->pos, n); buff->pos += n; return 0; @@ -116,15 +172,17 @@ static int ZSTD_seekable_seek_buff(void* opaque, long long offset, int origin) { buffWrapper_t* const buff = (buffWrapper_t*) opaque; unsigned long long newOffset; + assert(buff != NULL); switch (origin) { case SEEK_SET: - newOffset = offset; + assert(offset >= 0); + newOffset = (unsigned long long)offset; break; case SEEK_CUR: - newOffset = (unsigned long long)buff->pos + offset; + newOffset = (unsigned long long)((long long)buff->pos + offset); break; case SEEK_END: - newOffset = (unsigned long long)buff->size - offset; + newOffset = (unsigned long long)((long long)buff->size + offset); break; default: assert(0); /* not possible */ @@ -142,18 +200,18 @@ typedef struct { U32 checksum; } seekEntry_t; -typedef struct { +struct ZSTD_seekTable_s { seekEntry_t* entries; size_t tableLen; int checksumFlag; -} seekTable_t; +}; #define SEEKABLE_BUFF_SIZE ZSTD_BLOCKSIZE_MAX struct ZSTD_seekable_s { ZSTD_DStream* dstream; - seekTable_t seekTable; + ZSTD_seekTable seekTable; ZSTD_seekable_customFile src; U64 decompressedOffset; @@ -171,8 +229,7 @@ struct ZSTD_seekable_s { ZSTD_seekable* ZSTD_seekable_create(void) { - ZSTD_seekable* zs = malloc(sizeof(ZSTD_seekable)); - + ZSTD_seekable* const zs = (ZSTD_seekable*)malloc(sizeof(ZSTD_seekable)); if (zs == NULL) return NULL; /* also initializes stage to zsds_init */ @@ -193,7 +250,37 @@ size_t ZSTD_seekable_free(ZSTD_seekable* zs) ZSTD_freeDStream(zs->dstream); free(zs->seekTable.entries); free(zs); + return 0; +} + +ZSTD_seekTable* ZSTD_seekTable_create_fromSeekable(const ZSTD_seekable* zs) +{ + assert(zs != NULL); + if (zs->seekTable.entries == NULL) return NULL; + ZSTD_seekTable* const st = (ZSTD_seekTable*)malloc(sizeof(ZSTD_seekTable)); + if (st==NULL) return NULL; + + st->checksumFlag = zs->seekTable.checksumFlag; + st->tableLen = zs->seekTable.tableLen; + + /* Allocate an extra entry at the end to match logic of initial allocation */ + size_t const entriesSize = sizeof(seekEntry_t) * (zs->seekTable.tableLen + 1); + seekEntry_t* const entries = (seekEntry_t*)malloc(entriesSize); + if (entries==NULL) { + free(st); + return NULL; + } + + memcpy(entries, zs->seekTable.entries, entriesSize); + st->entries = entries; + return st; +} +size_t ZSTD_seekTable_free(ZSTD_seekTable* st) +{ + if (st == NULL) return 0; /* support free on null */ + free(st->entries); + free(st); return 0; } @@ -201,19 +288,24 @@ size_t ZSTD_seekable_free(ZSTD_seekable* zs) * Performs a binary search to find the last frame with a decompressed offset * <= pos * @return : the frame's index */ -unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long pos) +unsigned ZSTD_seekable_offsetToFrameIndex(const ZSTD_seekable* zs, unsigned long long pos) +{ + return ZSTD_seekTable_offsetToFrameIndex(&zs->seekTable, pos); +} + +unsigned ZSTD_seekTable_offsetToFrameIndex(const ZSTD_seekTable* st, unsigned long long pos) { U32 lo = 0; - U32 hi = (U32)zs->seekTable.tableLen; - assert(zs->seekTable.tableLen <= UINT_MAX); + U32 hi = (U32)st->tableLen; + assert(st->tableLen <= UINT_MAX); - if (pos >= zs->seekTable.entries[zs->seekTable.tableLen].dOffset) { - return (U32)zs->seekTable.tableLen; + if (pos >= st->entries[st->tableLen].dOffset) { + return (unsigned)st->tableLen; } while (lo + 1 < hi) { U32 const mid = lo + ((hi - lo) >> 1); - if (zs->seekTable.entries[mid].dOffset <= pos) { + if (st->entries[mid].dOffset <= pos) { lo = mid; } else { hi = mid; @@ -222,36 +314,61 @@ unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long return lo; } -unsigned ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs) +unsigned ZSTD_seekable_getNumFrames(const ZSTD_seekable* zs) +{ + return ZSTD_seekTable_getNumFrames(&zs->seekTable); +} + +unsigned ZSTD_seekTable_getNumFrames(const ZSTD_seekTable* st) +{ + assert(st->tableLen <= UINT_MAX); + return (unsigned)st->tableLen; +} + +unsigned long long ZSTD_seekable_getFrameCompressedOffset(const ZSTD_seekable* zs, unsigned frameIndex) +{ + return ZSTD_seekTable_getFrameCompressedOffset(&zs->seekTable, frameIndex); +} + +unsigned long long ZSTD_seekTable_getFrameCompressedOffset(const ZSTD_seekTable* st, unsigned frameIndex) +{ + if (frameIndex >= st->tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE; + return st->entries[frameIndex].cOffset; +} + +unsigned long long ZSTD_seekable_getFrameDecompressedOffset(const ZSTD_seekable* zs, unsigned frameIndex) +{ + return ZSTD_seekTable_getFrameDecompressedOffset(&zs->seekTable, frameIndex); +} + +unsigned long long ZSTD_seekTable_getFrameDecompressedOffset(const ZSTD_seekTable* st, unsigned frameIndex) { - assert(zs->seekTable.tableLen <= UINT_MAX); - return (unsigned)zs->seekTable.tableLen; + if (frameIndex >= st->tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE; + return st->entries[frameIndex].dOffset; } -unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex) +size_t ZSTD_seekable_getFrameCompressedSize(const ZSTD_seekable* zs, unsigned frameIndex) { - if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE; - return zs->seekTable.entries[frameIndex].cOffset; + return ZSTD_seekTable_getFrameCompressedSize(&zs->seekTable, frameIndex); } -unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex) +size_t ZSTD_seekTable_getFrameCompressedSize(const ZSTD_seekTable* st, unsigned frameIndex) { - if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE; - return zs->seekTable.entries[frameIndex].dOffset; + if (frameIndex >= st->tableLen) return ERROR(frameIndex_tooLarge); + return st->entries[frameIndex + 1].cOffset - + st->entries[frameIndex].cOffset; } -size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, unsigned frameIndex) +size_t ZSTD_seekable_getFrameDecompressedSize(const ZSTD_seekable* zs, unsigned frameIndex) { - if (frameIndex >= zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge); - return zs->seekTable.entries[frameIndex + 1].cOffset - - zs->seekTable.entries[frameIndex].cOffset; + return ZSTD_seekTable_getFrameDecompressedSize(&zs->seekTable, frameIndex); } -size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, unsigned frameIndex) +size_t ZSTD_seekTable_getFrameDecompressedSize(const ZSTD_seekTable* st, unsigned frameIndex) { - if (frameIndex > zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge); - return zs->seekTable.entries[frameIndex + 1].dOffset - - zs->seekTable.entries[frameIndex].dOffset; + if (frameIndex > st->tableLen) return ERROR(frameIndex_tooLarge); + return st->entries[frameIndex + 1].dOffset - + st->entries[frameIndex].dOffset; } static size_t ZSTD_seekable_loadSeekTable(ZSTD_seekable* zs) @@ -270,10 +387,9 @@ static size_t ZSTD_seekable_loadSeekTable(ZSTD_seekable* zs) checksumFlag = sfd >> 7; /* check reserved bits */ - if ((checksumFlag >> 2) & 0x1f) { + if ((sfd >> 2) & 0x1f) { return ERROR(corruption_detected); - } - } + } } { U32 const numFrames = MEM_readLE32(zs->inBuff); U32 const sizePerEntry = 8 + (checksumFlag?4:0); @@ -281,12 +397,9 @@ static size_t ZSTD_seekable_loadSeekTable(ZSTD_seekable* zs) U32 const frameSize = tableSize + ZSTD_seekTableFooterSize + ZSTD_SKIPPABLEHEADERSIZE; U32 remaining = frameSize - ZSTD_seekTableFooterSize; /* don't need to re-read footer */ - { - U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE); - + { U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE); CHECK_IO(src.seek(src.opaque, -(S64)frameSize, SEEK_END)); CHECK_IO(src.read(src.opaque, zs->inBuff, toRead)); - remaining -= toRead; } @@ -299,19 +412,15 @@ static size_t ZSTD_seekable_loadSeekTable(ZSTD_seekable* zs) { /* Allocate an extra entry at the end so that we can do size * computations on the last element without special case */ - seekEntry_t* entries = (seekEntry_t*)malloc(sizeof(seekEntry_t) * (numFrames + 1)); + seekEntry_t* const entries = (seekEntry_t*)malloc(sizeof(seekEntry_t) * (numFrames + 1)); U32 idx = 0; U32 pos = 8; - U64 cOffset = 0; U64 dOffset = 0; - if (!entries) { - free(entries); - return ERROR(memory_allocation); - } + if (entries == NULL) return ERROR(memory_allocation); /* compute cumulative positions */ for (; idx < numFrames; idx++) { @@ -379,33 +488,47 @@ size_t ZSTD_seekable_initAdvanced(ZSTD_seekable* zs, ZSTD_seekable_customFile sr size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, unsigned long long offset) { + unsigned long long const eos = zs->seekTable.entries[zs->seekTable.tableLen].dOffset; + if (offset + len > eos) { + len = eos - offset; + } + U32 targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, offset); + U32 noOutputProgressCount = 0; + size_t srcBytesRead = 0; do { /* check if we can continue from a previous decompress job */ - if (targetFrame != zs->curFrame || offset != zs->decompressedOffset) { + if (targetFrame != zs->curFrame || offset < zs->decompressedOffset) { zs->decompressedOffset = zs->seekTable.entries[targetFrame].dOffset; zs->curFrame = targetFrame; + assert(zs->seekTable.entries[targetFrame].cOffset < LLONG_MAX); CHECK_IO(zs->src.seek(zs->src.opaque, - zs->seekTable.entries[targetFrame].cOffset, + (long long)zs->seekTable.entries[targetFrame].cOffset, SEEK_SET)); zs->in = (ZSTD_inBuffer){zs->inBuff, 0, 0}; XXH64_reset(&zs->xxhState, 0); - ZSTD_resetDStream(zs->dstream); + ZSTD_DCtx_reset(zs->dstream, ZSTD_reset_session_only); + if (zs->buffWrapper.size && srcBytesRead > zs->buffWrapper.size) { + return ERROR(seekableIO); + } } while (zs->decompressedOffset < offset + len) { size_t toRead; ZSTD_outBuffer outTmp; size_t prevOutPos; + size_t prevInPos; + size_t forwardProgress; if (zs->decompressedOffset < offset) { /* dummy decompressions until we get to the target offset */ - outTmp = (ZSTD_outBuffer){zs->outBuff, MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset), 0}; + outTmp = (ZSTD_outBuffer){zs->outBuff, (size_t) (MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset)), 0}; } else { - outTmp = (ZSTD_outBuffer){dst, len, zs->decompressedOffset - offset}; + outTmp = (ZSTD_outBuffer){dst, len, (size_t) (zs->decompressedOffset - offset)}; } prevOutPos = outTmp.pos; + prevInPos = zs->in.pos; toRead = ZSTD_decompressStream(zs->dstream, &outTmp, &zs->in); if (ZSTD_isError(toRead)) { return toRead; @@ -415,7 +538,16 @@ size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, unsign XXH64_update(&zs->xxhState, (BYTE*)outTmp.dst + prevOutPos, outTmp.pos - prevOutPos); } - zs->decompressedOffset += outTmp.pos - prevOutPos; + forwardProgress = outTmp.pos - prevOutPos; + if (forwardProgress == 0) { + if (noOutputProgressCount++ > ZSTD_SEEKABLE_NO_OUTPUT_PROGRESS_MAX) { + return ERROR(seekableIO); + } + } else { + noOutputProgressCount = 0; + } + zs->decompressedOffset += forwardProgress; + srcBytesRead += zs->in.pos - prevInPos; if (toRead == 0) { /* frame complete */ @@ -430,6 +562,8 @@ size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, unsign if (zs->decompressedOffset < offset + len) { /* go back to the start and force a reset of the stream */ targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, zs->decompressedOffset); + /* in this case it will fail later with corruption_detected, since last block does not have checksum */ + assert(targetFrame != zs->seekTable.tableLen); } break; } @@ -441,7 +575,7 @@ size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, unsign zs->in.size = toRead; zs->in.pos = 0; } - } + } /* while (zs->decompressedOffset < offset + len) */ } while (zs->decompressedOffset != offset + len); return len; @@ -453,8 +587,7 @@ size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSiz return ERROR(frameIndex_tooLarge); } - { - size_t const decompressedSize = + { size_t const decompressedSize = zs->seekTable.entries[frameIndex + 1].dOffset - zs->seekTable.entries[frameIndex].dOffset; if (dstSize < decompressedSize) { diff --git a/contrib/seqBench/Makefile b/contrib/seqBench/Makefile new file mode 100644 index 00000000000..e7f08a42cc8 --- /dev/null +++ b/contrib/seqBench/Makefile @@ -0,0 +1,58 @@ +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ + +PROGDIR = ../../programs +LIBDIR = ../../lib + +LIBZSTD = $(LIBDIR)/libzstd.a + +CPPFLAGS+= -I$(LIBDIR) -I$(LIBDIR)/common -I$(LIBDIR)/dictBuilder -I$(PROGDIR) + +CFLAGS ?= -O3 -g +CFLAGS += -std=gnu99 +DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum \ + -Wstrict-prototypes -Wundef -Wpointer-arith \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls +CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) + + +default: seqBench + +all : seqBench + +seqBench: util.o timefn.o benchfn.o datagen.o xxhash.o seqBench.c $(LIBZSTD) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ + +.PHONY: $(LIBZSTD) +$(LIBZSTD): + $(MAKE) -C $(LIBDIR) libzstd.a CFLAGS="$(CFLAGS)" + +benchfn.o: $(PROGDIR)/benchfn.c + $(CC) $(CPPFLAGS) $(CFLAGS) $^ -c + +timefn.o: $(PROGDIR)/timefn.c + $(CC) $(CPPFLAGS) $(CFLAGS) $^ -c + +datagen.o: $(PROGDIR)/datagen.c + $(CC) $(CPPFLAGS) $(CFLAGS) $^ -c + +util.o: $(PROGDIR)/util.c + $(CC) $(CPPFLAGS) $(CFLAGS) $^ -c + + +xxhash.o : $(LIBDIR)/common/xxhash.c + $(CC) $(CPPFLAGS) $(CFLAGS) $^ -c + + +clean: + $(RM) *.o + $(MAKE) -C $(LIBDIR) clean > /dev/null + $(RM) seqBench diff --git a/contrib/seqBench/seqBench.c b/contrib/seqBench/seqBench.c new file mode 100644 index 00000000000..7efebec7bea --- /dev/null +++ b/contrib/seqBench/seqBench.c @@ -0,0 +1,53 @@ +#define ZSTD_STATIC_LINKING_ONLY +#include +#include +#include +#include +#include + +int main(int argc, char *argv[]) { + ZSTD_CCtx* zc = ZSTD_createCCtx(); + + if (argc != 2) { + printf("Usage: seqBench \n"); // TODO provide the block delim option here + return 1; + } + + FILE *f = fopen(argv[1], "rb"); + fseek(f, 0, SEEK_END); + long inBufSize = ftell(f); + fseek(f, 0, SEEK_SET); + + char *inBuf = malloc(inBufSize + 1); + fread(inBuf, inBufSize, 1, f); + fclose(f); + + size_t seqsSize = ZSTD_sequenceBound(inBufSize); + ZSTD_Sequence *seqs = (ZSTD_Sequence*)malloc(seqsSize * sizeof(ZSTD_Sequence)); + char *outBuf = malloc(ZSTD_compressBound(inBufSize)); + + ZSTD_generateSequences(zc, seqs, seqsSize, inBuf, inBufSize); + ZSTD_CCtx_setParameter(zc, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters); + size_t outBufSize = ZSTD_compressSequences(zc, outBuf, inBufSize, seqs, seqsSize, inBuf, inBufSize); + if (ZSTD_isError(outBufSize)) { + printf("ERROR: %lu\n", outBufSize); + return 1; + } + + char *validationBuf = malloc(inBufSize); + ZSTD_decompress(validationBuf, inBufSize, outBuf, outBufSize); + + if (memcmp(inBuf, validationBuf, inBufSize) == 0) { + printf("Compression and decompression were successful!\n"); + } else { + printf("ERROR: input and validation buffers don't match!\n"); + for (int i = 0; i < inBufSize; i++) { + if (inBuf[i] != validationBuf[i]) { + printf("First bad index: %d\n", i); + break; + } + } + } + + return 0; +} diff --git a/doc/README.md b/doc/README.md index bb7a3e4902e..8f3babcdbb2 100644 --- a/doc/README.md +++ b/doc/README.md @@ -5,8 +5,9 @@ This directory contains material defining the Zstandard format, as well as detailed instructions to use `zstd` library. __`zstd_manual.html`__ : Documentation of `zstd.h` API, in html format. -Click on this link: [http://zstd.net/zstd_manual.html](http://zstd.net/zstd_manual.html) -to display documentation of latest release in readable format within a browser. +Unfortunately, Github doesn't display `html` files in parsed format, just as source code. +For a readable display of html API documentation of latest release, +use this link: [https://raw.githack.com/facebook/zstd/release/doc/zstd_manual.html](https://raw.githack.com/facebook/zstd/release/doc/zstd_manual.html) . __`zstd_compression_format.md`__ : This document defines the Zstandard compression format. Compliant decoders must adhere to this document, diff --git a/doc/decompressor_errata.md b/doc/decompressor_errata.md new file mode 100644 index 00000000000..b570f73145d --- /dev/null +++ b/doc/decompressor_errata.md @@ -0,0 +1,148 @@ +Decompressor Errata +=================== + +This document captures known decompressor bugs, where the decompressor rejects a valid zstd frame. +Each entry will contain: +1. The last affected decompressor versions. +2. The decompressor components affected. +2. Whether the compressed frame could ever be produced by the reference compressor. +3. An example frame (hexadecimal string when it can be short enough, link to golden file otherwise) +4. A description of the bug. + +The document is in reverse chronological order, with the bugs that affect the most recent zstd decompressor versions listed first. + + +No sequence using the 2-bytes format +------------------------------------------------ + +**Last affected version**: v1.5.5 + +**Affected decompressor component(s)**: Library & CLI + +**Produced by the reference compressor**: No + +**Example Frame**: see zstd/tests/golden-decompression/zeroSeq_2B.zst + +The zstd decoder incorrectly expects FSE tables when there are 0 sequences present in the block +if the value 0 is encoded using the 2-bytes format. +Instead, it should immediately end the sequence section, and move on to next block. + +This situation was never generated by the reference compressor, +because representing 0 sequences with the 2-bytes format is inefficient +(the 1-byte format is always used in this case). + + +Compressed block with a size of exactly 128 KB +------------------------------------------------ + +**Last affected version**: v1.5.2 + +**Affected decompressor component(s)**: Library & CLI + +**Produced by the reference compressor**: No + +**Example Frame**: see zstd/tests/golden-decompression/block-128k.zst + +The zstd decoder incorrectly rejected blocks of type `Compressed_Block` when their size was exactly 128 KB. +Note that `128 KB - 1` was accepted, and `128 KB + 1` is forbidden by the spec. + +This type of block was never generated by the reference compressor. + +These blocks used to be disallowed by the spec up until spec version 0.3.2 when the restriction was lifted by [PR#1689](https://github.com/facebook/zstd/pull/1689). + +> A Compressed_Block has the extra restriction that Block_Size is always strictly less than the decompressed size. If this condition cannot be respected, the block must be sent uncompressed instead (Raw_Block). + + +Compressed block with 0 literals and 0 sequences +------------------------------------------------ + +**Last affected version**: v1.5.2 + +**Affected decompressor component(s)**: Library & CLI + +**Produced by the reference compressor**: No + +**Example Frame**: `28b5 2ffd 2000 1500 0000 00` + +The zstd decoder incorrectly rejected blocks of type `Compressed_Block` that encodes literals as `Raw_Literals_Block` with no literals, and has no sequences. + +This type of block was never generated by the reference compressor. + +Additionally, these blocks were disallowed by the spec up until spec version 0.3.2 when the restriction was lifted by [PR#1689](https://github.com/facebook/zstd/pull/1689). + +> A Compressed_Block has the extra restriction that Block_Size is always strictly less than the decompressed size. If this condition cannot be respected, the block must be sent uncompressed instead (Raw_Block). + + +First block is RLE block +------------------------ + +**Last affected version**: v1.4.3 + +**Affected decompressor component(s)**: CLI only + +**Produced by the reference compressor**: No + +**Example Frame**: `28b5 2ffd a001 0002 0002 0010 000b 0000 00` + +The zstd CLI decompressor rejected cases where the first block was an RLE block whose `Block_Size` is 131072, and the frame contains more than one block. +This only affected the zstd CLI, and not the library. + +The example is an RLE block with 131072 bytes, followed by a second RLE block with 1 byte. + +The compressor currently works around this limitation by explicitly avoiding producing RLE blocks as the first +block. + +https://github.com/facebook/zstd/blob/8814aa5bfa74f05a86e55e9d508da177a893ceeb/lib/compress/zstd_compress.c#L3527-L3535 + + +Tiny FSE Table & Block +---------------------- + +**Last affected version**: v1.3.4 + +**Affected decompressor component(s)**: Library & CLI + +**Produced by the reference compressor**: Possibly until version v1.3.4, but probably never + +**Example Frame**: `28b5 2ffd 2027 c500 0080 f3f1 f0ec ebc6 c5c7 f09d 4300 0000 e0e0 0658 0100 603e 52` + +The zstd library rejected blocks of type `Compressed_Block` whose offset of the last table with type `FSE_Compressed_Mode` was less than 4 bytes from the end of the block. + +In more depth, let `Last_Table_Offset` be the offset in the compressed block (excluding the header) that +the last table with type `FSE_Compressed_Mode` started. If `Block_Content - Last_Table_Offset < 4` then +the buggy zstd decompressor would reject the block. This occurs when the last serialized table is 2 bytes +and the bitstream size is 1 byte. + +For example: +* There is 1 sequence in the block +* `Literals_Lengths_Mode` is `FSE_Compressed_Mode` & the serialized table size is 2 bytes +* `Offsets_Mode` is `Predefined_Mode` +* `Match_Lengths_Mode` is `Predefined_Mode` +* The bitstream is 1 byte. E.g. there is only one sequence and it fits in 1 byte. + +The total `Block_Content` is `5` bytes, and `Last_Table_Offset` is `2`. + +See the compressor workaround code: + +https://github.com/facebook/zstd/blob/8814aa5bfa74f05a86e55e9d508da177a893ceeb/lib/compress/zstd_compress.c#L2667-L2682 + +Magicless format +---------------------- + +**Last affected version**: v1.5.5 + +**Affected decompressor component(s)**: Library + +**Produced by the reference compressor**: Yes (example: https://gist.github.com/embg/9940726094f4cf2cef162cffe9319232) + +**Example Frame**: `27 b5 2f fd 00 03 19 00 00 66 6f 6f 3f ba c4 59` + +v1.5.6 fixes several bugs in which the magicless-format decoder rejects valid frames. +These include but are not limited to: +* Valid frames that happen to begin with a legacy magic number (little-endian) +* Valid frames that happen to begin with a skippable magic number (little-endian) + +If you are affected by this issue and cannot update to v1.5.6 or later, there is a +workaround to recover affected data. Simply prepend the ZSTD magic number +`0xFD2FB528` (little-endian) to your data and decompress using the standard-format +decoder. diff --git a/doc/decompressor_permissive.md b/doc/decompressor_permissive.md new file mode 100644 index 00000000000..164d6c86d61 --- /dev/null +++ b/doc/decompressor_permissive.md @@ -0,0 +1,80 @@ +Decompressor Permissiveness to Invalid Data +=========================================== + +This document describes the behavior of the reference decompressor in cases +where it accepts formally invalid data instead of reporting an error. + +While the reference decompressor *must* decode any compliant frame following +the specification, its ability to detect erroneous data is on a best effort +basis: the decoder may accept input data that would be formally invalid, +when it causes no risk to the decoder, and which detection would cost too much +complexity or speed regression. + +In practice, the vast majority of invalid data are detected, if only because +many corruption events are dangerous for the decoder process (such as +requesting an out-of-bound memory access) and many more are easy to check. + +This document lists a few known cases where invalid data was formerly accepted +by the decoder, and what has changed since. + + +Truncated Huffman states +------------------------ + +**Last affected version**: v1.5.6 + +**Produced by the reference compressor**: No + +**Example Frame**: `28b5 2ffd 0000 5500 0072 8001 0420 7e1f 02aa 00` + +When using FSE-compressed Huffman weights, the compressed weight bitstream +could contain fewer bits than necessary to decode the initial states. + +The reference decompressor up to v1.5.6 will decode truncated or missing +initial states as zero, which can result in a valid Huffman tree if only +the second state is truncated. + +In newer versions, truncated initial states are reported as a corruption +error by the decoder. + + +Offset == 0 +----------- + +**Last affected version**: v1.5.5 + +**Produced by the reference compressor**: No + +**Example Frame**: `28b5 2ffd 0000 4500 0008 0002 002f 430b ae` + +If a sequence is decoded with `literals_length = 0` and `offset_value = 3` +while `Repeated_Offset_1 = 1`, the computed offset will be `0`, which is +invalid. + +The reference decompressor up to v1.5.5 processes this case as if the computed +offset was `1`, including inserting `1` into the repeated offset list. +This prevents the output buffer from remaining uninitialized, thus denying a +potential attack vector from an untrusted source. +However, in the rare case where this scenario would be the outcome of a +transmission or storage error, the decoder relies on the checksum to detect +the error. + +In newer versions, this case is always detected and reported as a corruption error. + + +Non-zeroes reserved bits +------------------------ + +**Last affected version**: v1.5.5 + +**Produced by the reference compressor**: No + +The Sequences section of each block has a header, and one of its elements is a +byte, which describes the compression mode of each symbol. +This byte contains 2 reserved bits which must be set to zero. + +The reference decompressor up to v1.5.5 just ignores these 2 bits. +This behavior has no consequence for the rest of the frame decoding process. + +In newer versions, the 2 reserved bits are actively checked for value zero, +and the decoder reports a corruption error if they are not. diff --git a/doc/educational_decoder/.gitignore b/doc/educational_decoder/.gitignore new file mode 100644 index 00000000000..b801306fada --- /dev/null +++ b/doc/educational_decoder/.gitignore @@ -0,0 +1,2 @@ +# Build artifacts +harness diff --git a/doc/educational_decoder/Makefile b/doc/educational_decoder/Makefile index c1d2c4cc42f..b3154d9afc0 100644 --- a/doc/educational_decoder/Makefile +++ b/doc/educational_decoder/Makefile @@ -1,15 +1,34 @@ +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ################################################################ + +ZSTD ?= zstd # note: requires zstd installation on local system + +UNAME?= $(shell sh -c 'MSYSTEM="MSYS" uname') +ifeq ($(UNAME), SunOS) +DIFF ?= gdiff +else +DIFF ?= diff +endif + HARNESS_FILES=*.c MULTITHREAD_LDFLAGS = -pthread DEBUGFLAGS= -g -DZSTD_DEBUG=1 CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \ -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR) -CFLAGS ?= -O3 +CFLAGS ?= -O2 CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ - -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ - -Wstrict-prototypes -Wundef \ + -Wstrict-aliasing=1 -Wswitch-enum \ + -Wredundant-decls -Wstrict-prototypes -Wundef \ -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ - -Wredundant-decls + -std=c99 CFLAGS += $(DEBUGFLAGS) CFLAGS += $(MOREFLAGS) FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(MULTITHREAD_LDFLAGS) @@ -18,17 +37,26 @@ harness: $(HARNESS_FILES) $(CC) $(FLAGS) $^ -o $@ clean: - @$(RM) -f harness - @$(RM) -rf harness.dSYM + @$(RM) harness *.o + @$(RM) -rf harness.dSYM # MacOS specific test: harness - @zstd README.md -o tmp.zst + # + # Testing single-file decompression with educational decoder + # + @$(ZSTD) -f README.md -o tmp.zst @./harness tmp.zst tmp - @diff -s tmp README.md - @$(RM) -f tmp* - @zstd --train harness.c zstd_decompress.c zstd_decompress.h README.md - @zstd -D dictionary README.md -o tmp.zst + @$(DIFF) -s tmp README.md + @$(RM) tmp* + # + # Testing dictionary decompression with education decoder + # + # note : files are presented multiple for training, to reach minimum threshold + @$(ZSTD) --train harness.c zstd_decompress.c zstd_decompress.h README.md \ + harness.c zstd_decompress.c zstd_decompress.h README.md \ + harness.c zstd_decompress.c zstd_decompress.h README.md \ + -o dictionary + @$(ZSTD) -f README.md -D dictionary -o tmp.zst @./harness tmp.zst tmp dictionary - @diff -s tmp README.md - @$(RM) -f tmp* dictionary - @make clean + @$(DIFF) -s tmp README.md + @$(RM) tmp* dictionary diff --git a/doc/educational_decoder/README.md b/doc/educational_decoder/README.md index e3b9bf58e5a..c89451ca078 100644 --- a/doc/educational_decoder/README.md +++ b/doc/educational_decoder/README.md @@ -13,6 +13,13 @@ It also contains implementations of Huffman and FSE table decoding. [Zstandard format specification]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md [format specification]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md +While the library's primary objective is code clarity, +it also happens to compile into a small object file. +The object file can be made even smaller by removing error messages, +using the macro directive `ZDEC_NO_MESSAGE` at compilation time. +This can be reduced even further by foregoing dictionary support, +by defining `ZDEC_NO_DICTIONARY`. + `harness.c` provides a simple test harness around the decoder: harness [dictionary] diff --git a/doc/educational_decoder/harness.c b/doc/educational_decoder/harness.c index 47882b16895..12c5a801beb 100644 --- a/doc/educational_decoder/harness.c +++ b/doc/educational_decoder/harness.c @@ -1,10 +1,11 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #include @@ -21,105 +22,98 @@ typedef unsigned char u8; // Protect against allocating too much memory for output #define MAX_OUTPUT_SIZE ((size_t)1024 * 1024 * 1024) -u8 *input; -u8 *output; -u8 *dict; +// Error message then exit +#define ERR_OUT(...) { fprintf(stderr, __VA_ARGS__); exit(1); } -size_t read_file(const char *path, u8 **ptr) { - FILE *f = fopen(path, "rb"); - if (!f) { - fprintf(stderr, "failed to open file %s\n", path); - exit(1); - } + +typedef struct { + u8* address; + size_t size; +} buffer_s; + +static void freeBuffer(buffer_s b) { free(b.address); } + +static buffer_s read_file(const char *path) +{ + FILE* const f = fopen(path, "rb"); + if (!f) ERR_OUT("failed to open file %s \n", path); fseek(f, 0L, SEEK_END); - size_t size = ftell(f); + size_t const size = (size_t)ftell(f); rewind(f); - *ptr = malloc(size); - if (!ptr) { - fprintf(stderr, "failed to allocate memory to hold %s\n", path); - exit(1); - } + void* const ptr = malloc(size); + if (!ptr) ERR_OUT("failed to allocate memory to hold %s \n", path); - size_t pos = 0; - while (!feof(f)) { - size_t read = fread(&(*ptr)[pos], 1, size, f); - if (ferror(f)) { - fprintf(stderr, "error while reading file %s\n", path); - exit(1); - } - pos += read; - } + size_t const read = fread(ptr, 1, size, f); + if (read != size) ERR_OUT("error while reading file %s \n", path); fclose(f); - - return pos; + buffer_s const b = { ptr, size }; + return b; } -void write_file(const char *path, const u8 *ptr, size_t size) { - FILE *f = fopen(path, "wb"); +static void write_file(const char* path, const u8* ptr, size_t size) +{ + FILE* const f = fopen(path, "wb"); + if (!f) ERR_OUT("failed to open file %s \n", path); size_t written = 0; while (written < size) { - written += fwrite(&ptr[written], 1, size, f); - if (ferror(f)) { - fprintf(stderr, "error while writing file %s\n", path); - exit(1); - } + written += fwrite(ptr+written, 1, size, f); + if (ferror(f)) ERR_OUT("error while writing file %s\n", path); } fclose(f); } -int main(int argc, char **argv) { - if (argc < 3) { - fprintf(stderr, "usage: %s [dictionary]\n", - argv[0]); +int main(int argc, char **argv) +{ + if (argc < 3) + ERR_OUT("usage: %s [dictionary] \n", argv[0]); - return 1; - } + buffer_s const input = read_file(argv[1]); - size_t input_size = read_file(argv[1], &input); - size_t dict_size = 0; + buffer_s dict = { NULL, 0 }; if (argc >= 4) { - dict_size = read_file(argv[3], &dict); + dict = read_file(argv[3]); } - size_t decompressed_size = ZSTD_get_decompressed_size(input, input_size); - if (decompressed_size == (size_t)-1) { - decompressed_size = MAX_COMPRESSION_RATIO * input_size; + size_t out_capacity = ZSTD_get_decompressed_size(input.address, input.size); + if (out_capacity == (size_t)-1) { + out_capacity = MAX_COMPRESSION_RATIO * input.size; fprintf(stderr, "WARNING: Compressed data does not contain " "decompressed size, going to assume the compression " "ratio is at most %d (decompressed size of at most " - "%zu)\n", - MAX_COMPRESSION_RATIO, decompressed_size); - } - if (decompressed_size > MAX_OUTPUT_SIZE) { - fprintf(stderr, - "Required output size too large for this implementation\n"); - return 1; - } - output = malloc(decompressed_size); - if (!output) { - fprintf(stderr, "failed to allocate memory\n"); - return 1; + "%u) \n", + MAX_COMPRESSION_RATIO, (unsigned)out_capacity); } + if (out_capacity > MAX_OUTPUT_SIZE) + ERR_OUT("Required output size too large for this implementation \n"); + + u8* const output = malloc(out_capacity); + if (!output) ERR_OUT("failed to allocate memory \n"); dictionary_t* const parsed_dict = create_dictionary(); - if (dict) { - parse_dictionary(parsed_dict, dict, dict_size); + if (dict.size) { +#if defined (ZDEC_NO_DICTIONARY) + printf("dict.size = %zu \n", dict.size); + ERR_OUT("no dictionary support \n"); +#else + parse_dictionary(parsed_dict, dict.address, dict.size); +#endif } - size_t decompressed = - ZSTD_decompress_with_dict(output, decompressed_size, - input, input_size, parsed_dict); + size_t const decompressed_size = + ZSTD_decompress_with_dict(output, out_capacity, + input.address, input.size, + parsed_dict); free_dictionary(parsed_dict); - write_file(argv[2], output, decompressed); + write_file(argv[2], output, decompressed_size); - free(input); + freeBuffer(input); + freeBuffer(dict); free(output); - free(dict); - input = output = dict = NULL; + return 0; } diff --git a/doc/educational_decoder/zstd_decompress.c b/doc/educational_decoder/zstd_decompress.c index 8e231bbb5a9..839e085b481 100644 --- a/doc/educational_decoder/zstd_decompress.c +++ b/doc/educational_decoder/zstd_decompress.c @@ -1,34 +1,52 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /// Zstandard educational decoder implementation /// See https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md -#include -#include -#include -#include +#include // uint8_t, etc. +#include // malloc, free, exit +#include // fprintf +#include // memset, memcpy #include "zstd_decompress.h" -/******* UTILITY MACROS AND TYPES *********************************************/ -// Max block size decompressed size is 128 KB and literal blocks can't be -// larger than their block -#define MAX_LITERALS_SIZE ((size_t)128 * 1024) +/******* IMPORTANT CONSTANTS *********************************************/ + +// Zstandard frame +// "Magic_Number +// 4 Bytes, little-endian format. Value : 0xFD2FB528" +#define ZSTD_MAGIC_NUMBER 0xFD2FB528U + +// The size of `Block_Content` is limited by `Block_Maximum_Size`, +#define ZSTD_BLOCK_SIZE_MAX ((size_t)128 * 1024) + +// literal blocks can't be larger than their block +#define MAX_LITERALS_SIZE ZSTD_BLOCK_SIZE_MAX + + +/******* UTILITY MACROS AND TYPES *********************************************/ #define MAX(a, b) ((a) > (b) ? (a) : (b)) #define MIN(a, b) ((a) < (b) ? (a) : (b)) +#if defined(ZDEC_NO_MESSAGE) +#define MESSAGE(...) +#else +#define MESSAGE(...) fprintf(stderr, "" __VA_ARGS__) +#endif + /// This decoder calls exit(1) when it encounters an error, however a production /// library should propagate error codes #define ERROR(s) \ do { \ - fprintf(stderr, "Error: %s\n", s); \ + MESSAGE("Error: %s\n", s); \ exit(1); \ } while (0) #define INP_SIZE() \ @@ -39,12 +57,12 @@ #define BAD_ALLOC() ERROR("Memory allocation error") #define IMPOSSIBLE() ERROR("An impossibility has occurred") -typedef uint8_t u8; +typedef uint8_t u8; typedef uint16_t u16; typedef uint32_t u32; typedef uint64_t u64; -typedef int8_t i8; +typedef int8_t i8; typedef int16_t i16; typedef int32_t i32; typedef int64_t i64; @@ -176,10 +194,6 @@ static void HUF_init_dtable_usingweights(HUF_dtable *const table, /// Free the malloc'ed parts of a decoding table static void HUF_free_dtable(HUF_dtable *const dtable); - -/// Deep copy a decoding table, so that it can be used and free'd without -/// impacting the source table. -static void HUF_copy_dtable(HUF_dtable *const dst, const HUF_dtable *const src); /*** END HUFFMAN PRIMITIVES ***********/ /*** FSE PRIMITIVES *******************/ @@ -241,10 +255,6 @@ static void FSE_init_dtable_rle(FSE_dtable *const dtable, const u8 symb); /// Free the malloc'ed parts of a decoding table static void FSE_free_dtable(FSE_dtable *const dtable); - -/// Deep copy a decoding table, so that it can be used and free'd without -/// impacting the source table. -static void FSE_copy_dtable(FSE_dtable *const dst, const FSE_dtable *const src); /*** END FSE PRIMITIVES ***************/ /******* END IMPLEMENTATION PRIMITIVE PROTOTYPES ******************************/ @@ -373,7 +383,7 @@ static void execute_match_copy(frame_context_t *const ctx, size_t offset, size_t ZSTD_decompress(void *const dst, const size_t dst_len, const void *const src, const size_t src_len) { - dictionary_t* uninit_dict = create_dictionary(); + dictionary_t* const uninit_dict = create_dictionary(); size_t const decomp_size = ZSTD_decompress_with_dict(dst, dst_len, src, src_len, uninit_dict); free_dictionary(uninit_dict); @@ -395,7 +405,7 @@ size_t ZSTD_decompress_with_dict(void *const dst, const size_t dst_len, /* this decoder assumes decompression of a single frame */ decode_frame(&out, &in, parsed_dict); - return out.ptr - (u8 *)dst; + return (size_t)(out.ptr - (u8 *)dst); } /******* FRAME DECODING ******************************************************/ @@ -416,13 +426,8 @@ static void decompress_data(frame_context_t *const ctx, ostream_t *const out, static void decode_frame(ostream_t *const out, istream_t *const in, const dictionary_t *const dict) { - const u32 magic_number = IO_read_bits(in, 32); - // Zstandard frame - // - // "Magic_Number - // - // 4 Bytes, little-endian format. Value : 0xFD2FB528" - if (magic_number == 0xFD2FB528U) { + const u32 magic_number = (u32)IO_read_bits(in, 32); + if (magic_number == ZSTD_MAGIC_NUMBER) { // ZSTD frame decode_data_frame(out, in, dict); @@ -497,7 +502,7 @@ static void parse_frame_header(frame_header_t *const header, // 3 Reserved_bit // 2 Content_Checksum_flag // 1-0 Dictionary_ID_flag" - const u8 descriptor = IO_read_bits(in, 8); + const u8 descriptor = (u8)IO_read_bits(in, 8); // decode frame header descriptor into flags const u8 frame_content_size_flag = descriptor >> 6; @@ -521,7 +526,7 @@ static void parse_frame_header(frame_header_t *const header, // // Bit numbers 7-3 2-0 // Field name Exponent Mantissa" - u8 window_descriptor = IO_read_bits(in, 8); + u8 window_descriptor = (u8)IO_read_bits(in, 8); u8 exponent = window_descriptor >> 3; u8 mantissa = window_descriptor & 7; @@ -541,7 +546,7 @@ static void parse_frame_header(frame_header_t *const header, const int bytes_array[] = {0, 1, 2, 4}; const int bytes = bytes_array[dictionary_id_flag]; - header->dictionary_id = IO_read_bits(in, bytes * 8); + header->dictionary_id = (u32)IO_read_bits(in, bytes * 8); } else { header->dictionary_id = 0; } @@ -576,43 +581,6 @@ static void parse_frame_header(frame_header_t *const header, } } -/// A dictionary acts as initializing values for the frame context before -/// decompression, so we implement it by applying it's predetermined -/// tables and content to the context before beginning decompression -static void frame_context_apply_dict(frame_context_t *const ctx, - const dictionary_t *const dict) { - // If the content pointer is NULL then it must be an empty dict - if (!dict || !dict->content) - return; - - // If the requested dictionary_id is non-zero, the correct dictionary must - // be present - if (ctx->header.dictionary_id != 0 && - ctx->header.dictionary_id != dict->dictionary_id) { - ERROR("Wrong dictionary provided"); - } - - // Copy the dict content to the context for references during sequence - // execution - ctx->dict_content = dict->content; - ctx->dict_content_len = dict->content_size; - - // If it's a formatted dict copy the precomputed tables in so they can - // be used in the table repeat modes - if (dict->dictionary_id != 0) { - // Deep copy the entropy tables so they can be freed independently of - // the dictionary struct - HUF_copy_dtable(&ctx->literals_dtable, &dict->literals_dtable); - FSE_copy_dtable(&ctx->ll_dtable, &dict->ll_dtable); - FSE_copy_dtable(&ctx->of_dtable, &dict->of_dtable); - FSE_copy_dtable(&ctx->ml_dtable, &dict->ml_dtable); - - // Copy the repeated offsets - memcpy(ctx->previous_offsets, dict->previous_offsets, - sizeof(ctx->previous_offsets)); - } -} - /// Decompress the data from a frame block by block static void decompress_data(frame_context_t *const ctx, ostream_t *const out, istream_t *const in) { @@ -633,8 +601,8 @@ static void decompress_data(frame_context_t *const ctx, ostream_t *const out, // // The next 2 bits represent the Block_Type, while the remaining 21 bits // represent the Block_Size. Format is little-endian." - last_block = IO_read_bits(in, 1); - const int block_type = IO_read_bits(in, 2); + last_block = (int)IO_read_bits(in, 1); + const int block_type = (int)IO_read_bits(in, 2); const size_t block_len = IO_read_bits(in, 21); switch (block_type) { @@ -748,8 +716,8 @@ static size_t decode_literals(frame_context_t *const ctx, istream_t *const in, // types" // // size_format takes between 1 and 2 bits - int block_type = IO_read_bits(in, 2); - int size_format = IO_read_bits(in, 2); + int block_type = (int)IO_read_bits(in, 2); + int size_format = (int)IO_read_bits(in, 2); if (block_type <= 1) { // Raw or RLE literals block @@ -833,6 +801,7 @@ static size_t decode_literals_compressed(frame_context_t *const ctx, // bits (0-1023)." num_streams = 1; // Fall through as it has the same size format + /* fallthrough */ case 1: // "4 streams. Both Compressed_Size and Regenerated_Size use 10 bits // (0-1023)." @@ -855,8 +824,7 @@ static size_t decode_literals_compressed(frame_context_t *const ctx, // Impossible IMPOSSIBLE(); } - if (regenerated_size > MAX_LITERALS_SIZE || - compressed_size >= regenerated_size) { + if (regenerated_size > MAX_LITERALS_SIZE) { CORRUPTION(); } @@ -1005,7 +973,7 @@ static const i16 SEQ_MATCH_LENGTH_DEFAULT_DIST[53] = { static const u32 SEQ_LITERAL_LENGTH_BASELINES[36] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, - 48, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65538}; + 48, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536}; static const u8 SEQ_LITERAL_LENGTH_EXTRA_BITS[36] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; @@ -1021,7 +989,7 @@ static const u8 SEQ_MATCH_LENGTH_EXTRA_BITS[53] = { 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; /// Offset decoding is simpler so we just need a maximum code value -static const u8 SEQ_MAX_CODES[3] = {35, -1, 52}; +static const u8 SEQ_MAX_CODES[3] = {35, (u8)-1, 52}; static void decompress_sequences(frame_context_t *const ctx, istream_t *const in, @@ -1029,7 +997,8 @@ static void decompress_sequences(frame_context_t *const ctx, const size_t num_sequences); static sequence_command_t decode_sequence(sequence_states_t *const state, const u8 *const src, - i64 *const offset); + i64 *const offset, + int lastSequence); static void decode_seq_table(FSE_dtable *const table, istream_t *const in, const seq_part_t type, const seq_mode_t mode); @@ -1049,12 +1018,7 @@ static size_t decode_sequences(frame_context_t *const ctx, istream_t *in, // This is a variable size field using between 1 and 3 bytes. Let's call its // first byte byte0." u8 header = IO_read_bits(in, 8); - if (header == 0) { - // "There are no sequences. The sequence section stops there. - // Regenerated content is defined entirely by literals section." - *sequences = NULL; - return 0; - } else if (header < 128) { + if (header < 128) { // "Number_of_Sequences = byte0 . Uses 1 byte." num_sequences = header; } else if (header < 255) { @@ -1065,6 +1029,12 @@ static size_t decode_sequences(frame_context_t *const ctx, istream_t *in, num_sequences = IO_read_bits(in, 16) + 0x7F00; } + if (num_sequences == 0) { + // "There are no sequences. The sequence section stops there." + *sequences = NULL; + return 0; + } + *sequences = malloc(num_sequences * sizeof(sequence_command_t)); if (!*sequences) { BAD_ALLOC(); @@ -1132,7 +1102,7 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in, // a single 1-bit and then fills the byte with 0-7 0 bits of padding." const int padding = 8 - highest_set_bit(src[len - 1]); // The offset starts at the end because FSE streams are read backwards - i64 bit_offset = len * 8 - padding; + i64 bit_offset = (i64)(len * 8 - (size_t)padding); // "The bitstream starts with initial state values, each using the required // number of bits in their respective accuracy, decoded previously from @@ -1146,7 +1116,7 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in, for (size_t i = 0; i < num_sequences; i++) { // Decode sequences one by one - sequences[i] = decode_sequence(&states, src, &bit_offset); + sequences[i] = decode_sequence(&states, src, &bit_offset, i==num_sequences-1); } if (bit_offset != 0) { @@ -1157,7 +1127,8 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in, // Decode a single sequence and update the state static sequence_command_t decode_sequence(sequence_states_t *const states, const u8 *const src, - i64 *const offset) { + i64 *const offset, + int lastSequence) { // "Each symbol is a code in its own context, which specifies Baseline and // Number_of_Bits to add. Codes are FSE compressed, and interleaved with raw // additional bits in the same bitstream." @@ -1192,7 +1163,7 @@ static sequence_command_t decode_sequence(sequence_states_t *const states, // Literals_Length_State is updated, followed by Match_Length_State, and // then Offset_State." // If the stream is complete don't read bits to update state - if (*offset != 0) { + if (!lastSequence) { FSE_update_state(&states->ll_table, &states->ll_state, src, offset); FSE_update_state(&states->ml_table, &states->ml_state, src, offset); FSE_update_state(&states->of_table, &states->of_state, src, offset); @@ -1242,7 +1213,7 @@ static void decode_seq_table(FSE_dtable *const table, istream_t *const in, break; } case seq_repeat: - // "Repeat_Mode : re-use distribution table from previous compressed + // "Repeat_Mode : reuse distribution table from previous compressed // block." // Nothing to do here, table will be unchanged if (!table->symbols) { @@ -1409,16 +1380,16 @@ size_t ZSTD_get_decompressed_size(const void *src, const size_t src_len) { // get decompressed size from ZSTD frame header { - const u32 magic_number = IO_read_bits(&in, 32); + const u32 magic_number = (u32)IO_read_bits(&in, 32); - if (magic_number == 0xFD2FB528U) { + if (magic_number == ZSTD_MAGIC_NUMBER) { // ZSTD frame frame_header_t header; parse_frame_header(&header, &in); if (header.frame_content_size == 0 && !header.single_segment_flag) { // Content size not provided, we can't tell - return -1; + return (size_t)-1; } return header.frame_content_size; @@ -1431,17 +1402,33 @@ size_t ZSTD_get_decompressed_size(const void *src, const size_t src_len) { /******* END OUTPUT SIZE COUNTING *********************************************/ /******* DICTIONARY PARSING ***************************************************/ -#define DICT_SIZE_ERROR() ERROR("Dictionary size cannot be less than 8 bytes") -#define NULL_SRC() ERROR("Tried to create dictionary with pointer to null src"); - -dictionary_t* create_dictionary() { - dictionary_t* dict = calloc(1, sizeof(dictionary_t)); +dictionary_t* create_dictionary(void) { + dictionary_t* const dict = calloc(1, sizeof(dictionary_t)); if (!dict) { BAD_ALLOC(); } return dict; } +/// Free an allocated dictionary +void free_dictionary(dictionary_t *const dict) { + HUF_free_dtable(&dict->literals_dtable); + FSE_free_dtable(&dict->ll_dtable); + FSE_free_dtable(&dict->of_dtable); + FSE_free_dtable(&dict->ml_dtable); + + free(dict->content); + + memset(dict, 0, sizeof(dictionary_t)); + + free(dict); +} + + +#if !defined(ZDEC_NO_DICTIONARY) +#define DICT_SIZE_ERROR() ERROR("Dictionary size cannot be less than 8 bytes") +#define NULL_SRC() ERROR("Tried to create dictionary with pointer to null src"); + static void init_dictionary_content(dictionary_t *const dict, istream_t *const in); @@ -1513,23 +1500,97 @@ static void init_dictionary_content(dictionary_t *const dict, memcpy(dict->content, content, dict->content_size); } -/// Free an allocated dictionary -void free_dictionary(dictionary_t *const dict) { - HUF_free_dtable(&dict->literals_dtable); - FSE_free_dtable(&dict->ll_dtable); - FSE_free_dtable(&dict->of_dtable); - FSE_free_dtable(&dict->ml_dtable); +static void HUF_copy_dtable(HUF_dtable *const dst, + const HUF_dtable *const src) { + if (src->max_bits == 0) { + memset(dst, 0, sizeof(HUF_dtable)); + return; + } - free(dict->content); + const size_t size = (size_t)1 << src->max_bits; + dst->max_bits = src->max_bits; - memset(dict, 0, sizeof(dictionary_t)); + dst->symbols = malloc(size); + dst->num_bits = malloc(size); + if (!dst->symbols || !dst->num_bits) { + BAD_ALLOC(); + } - free(dict); + memcpy(dst->symbols, src->symbols, size); + memcpy(dst->num_bits, src->num_bits, size); } + +static void FSE_copy_dtable(FSE_dtable *const dst, const FSE_dtable *const src) { + if (src->accuracy_log == 0) { + memset(dst, 0, sizeof(FSE_dtable)); + return; + } + + size_t size = (size_t)1 << src->accuracy_log; + dst->accuracy_log = src->accuracy_log; + + dst->symbols = malloc(size); + dst->num_bits = malloc(size); + dst->new_state_base = malloc(size * sizeof(u16)); + if (!dst->symbols || !dst->num_bits || !dst->new_state_base) { + BAD_ALLOC(); + } + + memcpy(dst->symbols, src->symbols, size); + memcpy(dst->num_bits, src->num_bits, size); + memcpy(dst->new_state_base, src->new_state_base, size * sizeof(u16)); +} + +/// A dictionary acts as initializing values for the frame context before +/// decompression, so we implement it by applying it's predetermined +/// tables and content to the context before beginning decompression +static void frame_context_apply_dict(frame_context_t *const ctx, + const dictionary_t *const dict) { + // If the content pointer is NULL then it must be an empty dict + if (!dict || !dict->content) + return; + + // If the requested dictionary_id is non-zero, the correct dictionary must + // be present + if (ctx->header.dictionary_id != 0 && + ctx->header.dictionary_id != dict->dictionary_id) { + ERROR("Wrong dictionary provided"); + } + + // Copy the dict content to the context for references during sequence + // execution + ctx->dict_content = dict->content; + ctx->dict_content_len = dict->content_size; + + // If it's a formatted dict copy the precomputed tables in so they can + // be used in the table repeat modes + if (dict->dictionary_id != 0) { + // Deep copy the entropy tables so they can be freed independently of + // the dictionary struct + HUF_copy_dtable(&ctx->literals_dtable, &dict->literals_dtable); + FSE_copy_dtable(&ctx->ll_dtable, &dict->ll_dtable); + FSE_copy_dtable(&ctx->of_dtable, &dict->of_dtable); + FSE_copy_dtable(&ctx->ml_dtable, &dict->ml_dtable); + + // Copy the repeated offsets + memcpy(ctx->previous_offsets, dict->previous_offsets, + sizeof(ctx->previous_offsets)); + } +} + +#else // ZDEC_NO_DICTIONARY is defined + +static void frame_context_apply_dict(frame_context_t *const ctx, + const dictionary_t *const dict) { + (void)ctx; + if (dict && dict->content) ERROR("dictionary not supported"); +} + +#endif /******* END DICTIONARY PARSING ***********************************************/ /******* IO STREAM OPERATIONS *************************************************/ -#define UNALIGNED() ERROR("Attempting to operate on a non-byte aligned stream") + /// Reads `num` bits from a bitstream, and updates the internal offset static inline u64 IO_read_bits(istream_t *const in, const int num_bits) { if (num_bits > 64 || num_bits <= 0) { @@ -1608,7 +1669,7 @@ static inline const u8 *IO_get_read_ptr(istream_t *const in, size_t len) { INP_SIZE(); } if (in->bit_offset != 0) { - UNALIGNED(); + ERROR("Attempting to operate on a non-byte aligned stream"); } const u8 *const ptr = in->ptr; in->ptr += len; @@ -1634,7 +1695,7 @@ static inline void IO_advance_input(istream_t *const in, size_t len) { INP_SIZE(); } if (in->bit_offset != 0) { - UNALIGNED(); + ERROR("Attempting to operate on a non-byte aligned stream"); } in->ptr += len; @@ -1832,7 +1893,7 @@ static size_t HUF_decompress_4stream(const HUF_dtable *const dtable, /// Initializes a Huffman table using canonical Huffman codes /// For more explanation on canonical Huffman codes see -/// http://www.cs.uofs.edu/~mccloske/courses/cmps340/huff_canonical_dec2015.html +/// https://www.cs.scranton.edu/~mccloske/courses/cmps340/huff_canonical_dec2015.html /// Codes within a level are allocated in symbol order (i.e. smaller symbols get /// earlier codes) static void HUF_init_dtable(HUF_dtable *const table, const u8 *const bits, @@ -1945,26 +2006,6 @@ static void HUF_free_dtable(HUF_dtable *const dtable) { free(dtable->num_bits); memset(dtable, 0, sizeof(HUF_dtable)); } - -static void HUF_copy_dtable(HUF_dtable *const dst, - const HUF_dtable *const src) { - if (src->max_bits == 0) { - memset(dst, 0, sizeof(HUF_dtable)); - return; - } - - const size_t size = (size_t)1 << src->max_bits; - dst->max_bits = src->max_bits; - - dst->symbols = malloc(size); - dst->num_bits = malloc(size); - if (!dst->symbols || !dst->num_bits) { - BAD_ALLOC(); - } - - memcpy(dst->symbols, src->symbols, size); - memcpy(dst->num_bits, src->num_bits, size); -} /******* END HUFFMAN PRIMITIVES ***********************************************/ /******* FSE PRIMITIVES *******************************************************/ @@ -2107,7 +2148,7 @@ static void FSE_init_dtable(FSE_dtable *const dtable, // "All remaining symbols are sorted in their natural order. Starting from // symbol 0 and table position 0, each symbol gets attributed as many cells - // as its probability. Cell allocation is spreaded, not linear." + // as its probability. Cell allocation is spread, not linear." // Place the rest in the table const u16 step = (size >> 1) + (size >> 3) + 3; const u16 mask = size - 1; @@ -2279,25 +2320,4 @@ static void FSE_free_dtable(FSE_dtable *const dtable) { free(dtable->new_state_base); memset(dtable, 0, sizeof(FSE_dtable)); } - -static void FSE_copy_dtable(FSE_dtable *const dst, const FSE_dtable *const src) { - if (src->accuracy_log == 0) { - memset(dst, 0, sizeof(FSE_dtable)); - return; - } - - size_t size = (size_t)1 << src->accuracy_log; - dst->accuracy_log = src->accuracy_log; - - dst->symbols = malloc(size); - dst->num_bits = malloc(size); - dst->new_state_base = malloc(size * sizeof(u16)); - if (!dst->symbols || !dst->num_bits || !dst->new_state_base) { - BAD_ALLOC(); - } - - memcpy(dst->symbols, src->symbols, size); - memcpy(dst->num_bits, src->num_bits, size); - memcpy(dst->new_state_base, src->new_state_base, size * sizeof(u16)); -} /******* END FSE PRIMITIVES ***************************************************/ diff --git a/doc/educational_decoder/zstd_decompress.h b/doc/educational_decoder/zstd_decompress.h index a01fde331fb..c13c8134dee 100644 --- a/doc/educational_decoder/zstd_decompress.h +++ b/doc/educational_decoder/zstd_decompress.h @@ -1,12 +1,15 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ +#include /* size_t */ + /******* EXPOSED TYPES ********************************************************/ /* * Contains the parsed contents of a dictionary @@ -39,7 +42,7 @@ size_t ZSTD_get_decompressed_size(const void *const src, const size_t src_len); * Return a valid dictionary_t pointer for use with dictionary initialization * or decompression */ -dictionary_t* create_dictionary(); +dictionary_t* create_dictionary(void); /* * Parse a provided dictionary blob for use in decompression diff --git a/doc/images/zstd_logo86.png b/doc/images/zstd_logo86.png index 216f228061c..8abefe21b1e 100644 Binary files a/doc/images/zstd_logo86.png and b/doc/images/zstd_logo86.png differ diff --git a/doc/zstd_compression_format.md b/doc/zstd_compression_format.md index ed758cf598a..7700c472dfa 100644 --- a/doc/zstd_compression_format.md +++ b/doc/zstd_compression_format.md @@ -3,7 +3,7 @@ Zstandard Compression Format ### Notices -Copyright (c) 2016-present Yann Collet, Facebook, Inc. +Copyright (c) Meta Platforms, Inc. and affiliates. Permission is granted to copy and distribute this document for any purpose and without charge, @@ -16,7 +16,7 @@ Distribution of this document is unlimited. ### Version -0.3.1 (25/10/18) +0.4.4 (2025-03-22) Introduction @@ -26,7 +26,7 @@ The purpose of this document is to define a lossless compressed data format, that is independent of CPU type, operating system, file system and character set, suitable for file compression, pipe and streaming compression, -using the [Zstandard algorithm](http://www.zstandard.org). +using the [Zstandard algorithm](https://facebook.github.io/zstd/). The text of the specification assumes a basic background in programming at the level of bits and other primitive data representations. @@ -35,7 +35,7 @@ even for an arbitrarily long sequentially presented input data stream, using only an a priori bounded amount of intermediate storage, and hence can be used in data communications. The format uses the Zstandard compression method, -and optional [xxHash-64 checksum method](http://www.xxhash.org), +and optional [xxHash-64 checksum method](https://cyan4973.github.io/xxHash/), for detection of data corruption. The data format defined by this specification @@ -134,7 +134,7 @@ __`Content_Checksum`__ An optional 32-bit checksum, only present if `Content_Checksum_flag` is set. The content checksum is the result -of [xxh64() hash function](http://www.xxhash.org) +of [xxh64() hash function](https://cyan4973.github.io/xxHash/) digesting the original (decoded) data as input, and a seed of zero. The low 4 bytes of the checksum are stored in __little-endian__ format. @@ -291,21 +291,10 @@ Format is __little-endian__. It's allowed to represent a small ID (for example `13`) with a large 4-bytes dictionary ID, even if it is less efficient. -_Reserved ranges :_ -Within private environments, any `Dictionary_ID` can be used. - -However, for frames and dictionaries distributed in public space, -`Dictionary_ID` must be attributed carefully. -Rules for public environment are not yet decided, -but the following ranges are reserved for some future registrar : -- low range : `<= 32767` -- high range : `>= (1 << 31)` - -Outside of these ranges, any value of `Dictionary_ID` -which is both `>= 32768` and `< (1<<31)` can be used freely, -even in public environment. - - +A value of `0` has same meaning as no `Dictionary_ID`, +in which case the frame may or may not need a dictionary to be decoded, +and the ID of such a dictionary is not specified. +The decoder must know this information by other means. #### `Frame_Content_Size` @@ -341,6 +330,8 @@ The structure of a block is as follows: |:--------------:|:---------------:| | 3 bytes | n bytes | +__`Block_Header`__ + `Block_Header` uses 3 bytes, written using __little-endian__ convention. It contains 3 fields : @@ -358,6 +349,7 @@ It may be followed by an optional `Content_Checksum` __`Block_Type`__ The next 2 bits represent the `Block_Type`. +`Block_Type` influences the meaning of `Block_Size`. There are 4 block types : | Value | 0 | 1 | 2 | 3 | @@ -384,16 +376,30 @@ There are 4 block types : __`Block_Size`__ The upper 21 bits of `Block_Header` represent the `Block_Size`. -`Block_Size` is the size of the block excluding the header. -A block can contain any number of bytes (even zero), up to -`Block_Maximum_Decompressed_Size`, which is the smallest of: -- Window_Size -- 128 KB -A `Compressed_Block` has the extra restriction that `Block_Size` is always -strictly less than the decompressed size. -If this condition cannot be respected, -the block must be sent uncompressed instead (`Raw_Block`). +When `Block_Type` is `Compressed_Block` or `Raw_Block`, +`Block_Size` is the size of `Block_Content` (hence excluding `Block_Header`). + +When `Block_Type` is `RLE_Block`, since `Block_Content`’s size is always 1, +`Block_Size` represents the number of times this byte must be repeated. + +`Block_Size` is limited by `Block_Maximum_Size` (see below). + +__`Block_Content`__ and __`Block_Maximum_Size`__ + +The size of `Block_Content` is limited by `Block_Maximum_Size`, +which is the smallest of: +- `Window_Size` +- 128 KiB (131.072 bytes) + +`Block_Maximum_Size` is constant for a given frame. +This maximum is applicable to both the decompressed size +and the compressed size of any block in the frame. + +The reasoning for this limit is that a decoder can read this information +at the beginning of a frame and use it to allocate buffers. +The guarantees on the size of blocks ensure that +the buffers will be large enough for any following block of the valid frame. Compressed Blocks @@ -429,7 +435,7 @@ They can be decoded first, and then copied during [Sequence Execution], or they can be decoded on the flow during [Sequence Execution]. Literals can be stored uncompressed or compressed using Huffman prefix codes. -When compressed, an optional tree description can be present, +When compressed, a tree description may optionally be present, followed by 1 or 4 streams. | `Literals_Section_Header` | [`Huffman_Tree_Description`] | [jumpTable] | Stream1 | [Stream2] | [Stream3] | [Stream4] | @@ -464,6 +470,7 @@ This field uses 2 lowest bits of first byte, describing 4 different block types repeated `Regenerated_Size` times. - `Compressed_Literals_Block` - This is a standard Huffman-compressed block, starting with a Huffman tree description. + In this mode, there are at least 2 different literals represented in the Huffman tree description. See details below. - `Treeless_Literals_Block` - This is a Huffman-compressed block, using Huffman tree _from previous Huffman-compressed literals block_. @@ -504,7 +511,7 @@ Its value is : `Size_Format = (Literals_Section_Header[0]>>2) & 3` `Regenerated_Size = (Literals_Section_Header[0]>>4) + (Literals_Section_Header[1]<<4) + (Literals_Section_Header[2]<<12)` Only Stream1 is present for these cases. -Note : it's allowed to represent a short value (for example `13`) +Note : it's allowed to represent a short value (for example `27`) using a long format, even if it's less efficient. __`Size_Format` for `Compressed_Literals_Block` and `Treeless_Literals_Block`__ : @@ -515,18 +522,37 @@ __`Size_Format` for `Compressed_Literals_Block` and `Treeless_Literals_Block`__ Both `Regenerated_Size` and `Compressed_Size` use 10 bits (0-1023). `Literals_Section_Header` uses 3 bytes. - `Size_Format` == 01 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 10 bits (0-1023). + Both `Regenerated_Size` and `Compressed_Size` use 10 bits (6-1023). `Literals_Section_Header` uses 3 bytes. - `Size_Format` == 10 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 14 bits (0-16383). + Both `Regenerated_Size` and `Compressed_Size` use 14 bits (6-16383). `Literals_Section_Header` uses 4 bytes. - `Size_Format` == 11 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 18 bits (0-262143). + Both `Regenerated_Size` and `Compressed_Size` use 18 bits (6-262143). `Literals_Section_Header` uses 5 bytes. Both `Compressed_Size` and `Regenerated_Size` fields follow __little-endian__ convention. Note: `Compressed_Size` __includes__ the size of the Huffman Tree description _when_ it is present. +Note 2: `Compressed_Size` can never be `==0`. +Even in single-stream scenario, assuming an empty content, it must be `>=1`, +since it contains at least the final end bit flag. +In 4-streams scenario, a valid `Compressed_Size` is necessarily `>= 10` +(6 bytes for the jump table, + 4x1 bytes for the 4 streams). + +4 streams is faster than 1 stream in decompression speed, +by exploiting instruction level parallelism. +But it's also more expensive, +costing on average ~7.3 bytes more than the 1 stream mode, mostly from the jump table. + +In general, use the 4 streams mode when there are more literals to decode, +to favor higher decompression speeds. +Note that beyond >1KB of literals, the 4 streams mode is compulsory. + +Note that a minimum of 6 bytes is required for the 4 streams mode. +That's a technical minimum, but it's not recommended to employ the 4 streams mode +for such a small quantity, that would be wasteful. +A more practical lower bound would be around ~256 bytes. #### Raw Literals Block The data in Stream1 is `Regenerated_Size` bytes long, @@ -546,6 +572,7 @@ or from a dictionary. ### `Huffman_Tree_Description` This section is only present when `Literals_Block_Type` type is `Compressed_Literals_Block` (`2`). +The tree describes the weights of all literals symbols that can be present in the literals block, at least 2 and up to 256. The format of the Huffman tree description can be found at [Huffman Tree description](#huffman-tree-description). The size of `Huffman_Tree_Description` is determined during decoding process, it must be used to determine where streams begin. @@ -555,10 +582,10 @@ it must be used to determine where streams begin. ### Jump Table The Jump Table is only present when there are 4 Huffman-coded streams. -Reminder : Huffman compressed data consists of either 1 or 4 Huffman-coded streams. +Reminder : Huffman compressed data consists of either 1 or 4 streams. If only one stream is present, it is a single bitstream occupying the entire -remaining portion of the literals block, encoded as described within +remaining portion of the literals block, encoded as described in [Huffman-Coded Streams](#huffman-coded-streams). If there are four streams, `Literals_Section_Header` only provided @@ -569,17 +596,18 @@ except for the last stream which may be up to 3 bytes smaller, to reach a total decompressed size as specified in `Regenerated_Size`. The compressed size of each stream is provided explicitly in the Jump Table. -Jump Table is 6 bytes long, and consist of three 2-byte __little-endian__ fields, +Jump Table is 6 bytes long, and consists of three 2-byte __little-endian__ fields, describing the compressed sizes of the first three streams. -`Stream4_Size` is computed from total `Total_Streams_Size` minus sizes of other streams. +`Stream4_Size` is computed from `Total_Streams_Size` minus sizes of other streams: `Stream4_Size = Total_Streams_Size - 6 - Stream1_Size - Stream2_Size - Stream3_Size`. -Note: if `Stream1_Size + Stream2_Size + Stream3_Size > Total_Streams_Size`, +`Stream4_Size` is necessarily `>= 1`. Therefore, +if `Total_Streams_Size < Stream1_Size + Stream2_Size + Stream3_Size + 6 + 1`, data is considered corrupted. Each of these 4 bitstreams is then decoded independently as a Huffman-Coded stream, -as described at [Huffman-Coded Streams](#huffman-coded-streams) +as described in [Huffman-Coded Streams](#huffman-coded-streams) Sequences Section @@ -622,13 +650,15 @@ __`Number_of_Sequences`__ This is a variable size field using between 1 and 3 bytes. Let's call its first byte `byte0`. -- `if (byte0 == 0)` : there are no sequences. - The sequence section stops there. - Decompressed content is defined entirely as Literals Section content. - The FSE tables used in `Repeat_Mode` aren't updated. - `if (byte0 < 128)` : `Number_of_Sequences = byte0` . Uses 1 byte. -- `if (byte0 < 255)` : `Number_of_Sequences = ((byte0-128) << 8) + byte1` . Uses 2 bytes. -- `if (byte0 == 255)`: `Number_of_Sequences = byte1 + (byte2<<8) + 0x7F00` . Uses 3 bytes. +- `if (byte0 < 255)` : `Number_of_Sequences = ((byte0 - 0x80) << 8) + byte1`. Uses 2 bytes. + Note that the 2 bytes format fully overlaps the 1 byte format. +- `if (byte0 == 255)`: `Number_of_Sequences = byte1 + (byte2<<8) + 0x7F00`. Uses 3 bytes. + +`if (Number_of_Sequences == 0)` : there are no sequences. + The sequence section stops immediately, + FSE tables used in `Repeat_Mode` aren't updated. + Block's decompressed content is defined solely by the Literals Section content. __Symbol compression modes__ @@ -899,7 +929,10 @@ There is an exception though, when current sequence's `literals_length = 0`. In this case, repeated offsets are shifted by one, so an `offset_value` of 1 means `Repeated_Offset2`, an `offset_value` of 2 means `Repeated_Offset3`, -and an `offset_value` of 3 means `Repeated_Offset1 - 1_byte`. +and an `offset_value` of 3 means `Repeated_Offset1 - 1`. + +In the final case, if `Repeated_Offset1 - 1` evaluates to 0, then the +data is considered corrupted. For the first block, the starting offset history is populated with following values : `Repeated_Offset1`=1, `Repeated_Offset2`=4, `Repeated_Offset3`=8, @@ -912,38 +945,41 @@ Note that blocks which are not `Compressed_Block` are skipped, they do not contr ###### Offset updates rules -The newest offset takes the lead in offset history, -shifting others back by one rank, -up to the previous rank of the new offset _if it was present in history_. - -__Examples__ : - -In the common case, when new offset is not part of history : -`Repeated_Offset3` = `Repeated_Offset2` -`Repeated_Offset2` = `Repeated_Offset1` -`Repeated_Offset1` = `NewOffset` - -When the new offset _is_ part of history, there may be specific adjustments. - -When `NewOffset` == `Repeated_Offset1`, offset history remains actually unmodified. - -When `NewOffset` == `Repeated_Offset2`, -`Repeated_Offset1` and `Repeated_Offset2` ranks are swapped. -`Repeated_Offset3` is unmodified. - -When `NewOffset` == `Repeated_Offset3`, -there is actually no difference with the common case : -all offsets are shifted by one rank, -`NewOffset` (== `Repeated_Offset3`) becomes the new `Repeated_Offset1`. - -Also worth mentioning, the specific corner case when `offset_value` == 3, -and the literal length of the current sequence is zero. -In which case , `NewOffset` = `Repeated_Offset1` - 1_byte. -Here also, from an offset history update perspective, it's just a common case : -`Repeated_Offset3` = `Repeated_Offset2` -`Repeated_Offset2` = `Repeated_Offset1` -`Repeated_Offset1` = `NewOffset` ( == `Repeated_Offset1` - 1_byte ) - +During the execution of the sequences of a `Compressed_Block`, the +`Repeated_Offsets`' values are kept up to date, so that they always represent +the three most-recently used offsets. In order to achieve that, they are +updated after executing each sequence in the following way: + +When the sequence's `offset_value` does not refer to one of the +`Repeated_Offsets`--when it has value greater than 3, or when it has value 3 +and the sequence's `literals_length` is zero--the `Repeated_Offsets`' values +are shifted back one, and `Repeated_Offset1` takes on the value of the +just-used offset. + +Otherwise, when the sequence's `offset_value` refers to one of the +`Repeated_Offsets`--when it has value 1 or 2, or when it has value 3 and the +sequence's `literals_length` is non-zero--the `Repeated_Offsets` are re-ordered +so that `Repeated_Offset1` takes on the value of the used Repeated_Offset, and +the existing values are pushed back from the first `Repeated_Offset` through to +the `Repeated_Offset` selected by the `offset_value`. This effectively performs +a single-stepped wrapping rotation of the values of these offsets, so that +their order again reflects the recency of their use. + +The following table shows the values of the `Repeated_Offsets` as a series of +sequences are applied to them: + +| `offset_value` | `literals_length` | `Repeated_Offset1` | `Repeated_Offset2` | `Repeated_Offset3` | Comment | +|:--------------:|:-----------------:|:------------------:|:------------------:|:------------------:|:-----------------------:| +| | | 1 | 4 | 8 | starting values | +| 1114 | 11 | 1111 | 1 | 4 | non-repeat | +| 1 | 22 | 1111 | 1 | 4 | repeat 1: no change | +| 2225 | 22 | 2222 | 1111 | 1 | non-repeat | +| 1114 | 111 | 1111 | 2222 | 1111 | non-repeat | +| 3336 | 33 | 3333 | 1111 | 2222 | non-repeat | +| 2 | 22 | 1111 | 3333 | 2222 | repeat 2: swap 1 & 2 | +| 3 | 33 | 2222 | 1111 | 3333 | repeat 3: rotate 3 to 1 | +| 3 | 0 | 2221 | 2222 | 1111 | special case : insert `repeat1 - 1` | +| 1 | 0 | 2222 | 2221 | 1111 | == repeat 2 | Skippable Frames @@ -958,14 +994,14 @@ into a flow of concatenated frames. Skippable frames defined in this specification are compatible with [LZ4] ones. -[LZ4]:http://www.lz4.org +[LZ4]:https://lz4.github.io/lz4/ From a compliant decoder perspective, skippable frames need just be skipped, and their content ignored, resuming decoding after the skippable frame. It can be noted that a skippable frame can be used to watermark a stream of concatenated frames -embedding any kind of tracking information (even just an UUID). +embedding any kind of tracking information (even just a UUID). Users wary of such possibility should scan the stream of concatenated frames in an attempt to detect such frame for analysis or removal. @@ -1002,55 +1038,57 @@ and to compress Huffman headers. FSE --- FSE, short for Finite State Entropy, is an entropy codec based on [ANS]. -FSE encoding/decoding involves a state that is carried over between symbols, -so decoding must be done in the opposite direction as encoding. +FSE encoding/decoding involves a state that is carried over between symbols. +Decoding must be done in the opposite direction as encoding. Therefore, all FSE bitstreams are read from end to beginning. Note that the order of the bits in the stream is not reversed, -we just read the elements in the reverse order they are written. +we just read each multi-bits element in the reverse order they are encoded. For additional details on FSE, see [Finite State Entropy]. [Finite State Entropy]:https://github.com/Cyan4973/FiniteStateEntropy/ -FSE decoding involves a decoding table which has a power of 2 size, and contain three elements: +FSE decoding is directed by a decoding table with a power of 2 size, each row containing three elements: `Symbol`, `Num_Bits`, and `Baseline`. The `log2` of the table size is its `Accuracy_Log`. An FSE state value represents an index in this table. To obtain the initial state value, consume `Accuracy_Log` bits from the stream as a __little-endian__ value. -The next symbol in the stream is the `Symbol` indicated in the table for that state. +The first symbol in the stream is the `Symbol` indicated in the table for that state. To obtain the next state value, the decoder should consume `Num_Bits` bits from the stream as a __little-endian__ value and add it to `Baseline`. [ANS]: https://en.wikipedia.org/wiki/Asymmetric_Numeral_Systems ### FSE Table Description -To decode FSE streams, it is necessary to construct the decoding table. -The Zstandard format encodes FSE table descriptions as follows: - -An FSE distribution table describes the probabilities of all symbols -from `0` to the last present one (included) -on a normalized scale of `1 << Accuracy_Log` . -Note that there must be two or more symbols with nonzero probability. +To decode an FSE bitstream, it is necessary to build its FSE decoding table. +The decoding table is derived from a distribution of Probabilities. +The Zstandard format encodes distributions of Probabilities as follows: -It's a bitstream which is read forward, in __little-endian__ fashion. -It's not necessary to know bitstream exact size, -it will be discovered and reported by the decoding process. +The distribution of probabilities is described in a bitstream which is read forward, +in __little-endian__ fashion. +The amount of bytes consumed from the bitstream to describe the distribution +is discovered at the end of the decoding process. -The bitstream starts by reporting on which scale it operates. +The bitstream starts by reporting on which scale the distribution operates. Let's `low4Bits` designate the lowest 4 bits of the first byte : `Accuracy_Log = low4bits + 5`. -Then follows each symbol value, from `0` to last present one. -The number of bits used by each field is variable. +An FSE distribution table describes the probabilities of all symbols +from `0` to the last present one (included) in natural order. +The sum of probabilities is normalized to reach a power of 2 total of `1 << Accuracy_Log` . +There must be two or more symbols with non-zero probabilities. + +The number of bits used to decode each probability is variable. It depends on : - Remaining probabilities + 1 : __example__ : Presuming an `Accuracy_Log` of 8, - and presuming 100 probabilities points have already been distributed, + and presuming 100 probability points have already been distributed, the decoder may read any value from `0` to `256 - 100 + 1 == 157` (inclusive). - Therefore, it must read `log2sup(157) == 8` bits. + Therefore, it may read up to `log2sup(157) == 8` bits, where `log2sup(N)` + is the smallest integer `T` that satisfies `(1 << T) > N`. - Value decoded : small values use 1 less bit : __example__ : @@ -1061,110 +1099,133 @@ It depends on : values from 98 to 157 use 8 bits. This is achieved through this scheme : - | Value read | Value decoded | Number of bits used | - | ---------- | ------------- | ------------------- | - | 0 - 97 | 0 - 97 | 7 | - | 98 - 127 | 98 - 127 | 8 | - | 128 - 225 | 0 - 97 | 7 | - | 226 - 255 | 128 - 157 | 8 | + | 8-bit field read | Value decoded | Nb of bits consumed | + | ---------------- | ------------- | ------------------- | + | 0 - 97 | 0 - 97 | 7 | + | 98 - 127 | 98 - 127 | 8 | + | 128 - 225 | 0 - 97 | 7 | + | 226 - 255 | 128 - 157 | 8 | -Symbols probabilities are read one by one, in order. +Probability is derived from Value decoded using the following formula: +`Probality = Value - 1` -Probability is obtained from Value decoded by following formula : -`Proba = value - 1` +Consequently, a Probability of `0` is described by a Value `1`. -It means value `0` becomes negative probability `-1`. -`-1` is a special probability, which means "less than 1". -Its effect on distribution table is described in the [next section]. -For the purpose of calculating total allocated probability points, it counts as one. +A Value `0` is used to signal a special case, named "Probability `-1`". +It describes a probability which should have been "less than 1". +Its effect on the decoding table building process is described in the [next section]. +For the purpose of counting total allocated probability points, it counts as one. [next section]:#from-normalized-distribution-to-decoding-tables -When a symbol has a __probability__ of `zero`, +Symbols probabilities are read one by one, in order. +After each probability is decoded, the total nb of probability points is updated. +This is used to determine how many bits must be read to decode the probability of next symbol. + +When a symbol has a __probability__ of `zero` (decoded from reading a Value `1`), it is followed by a 2-bits repeat flag. This repeat flag tells how many probabilities of zeroes follow the current one. It provides a number ranging from 0 to 3. If it is a 3, another 2-bits repeat flag follows, and so on. -When last symbol reaches cumulated total of `1 << Accuracy_Log`, -decoding is complete. -If the last symbol makes cumulated total go above `1 << Accuracy_Log`, -distribution is considered corrupted. +When the Probability for a symbol makes cumulated total reach `1 << Accuracy_Log`, +then it's the last symbol, and decoding is complete. Then the decoder can tell how many bytes were used in this process, and how many symbols are present. The bitstream consumes a round number of bytes. Any remaining bit within the last byte is just unused. +If this process results in a non-zero probability for a symbol outside of the +valid range of symbols that the FSE table is defined for, even if that symbol is +not used, then the data is considered corrupted. +For the specific case of offset codes, +a decoder implementation may reject a frame containing a non-zero probability +for an offset code larger than the largest offset code supported by the decoder +implementation. + #### From normalized distribution to decoding tables -The distribution of normalized probabilities is enough +The normalized distribution of probabilities is enough to create a unique decoding table. - -It follows the following build rule : +It is generated using the following build rule : The table has a size of `Table_Size = 1 << Accuracy_Log`. -Each cell describes the symbol decoded, -and instructions to get the next state. +Each row specifies the decoded symbol, +and instructions to reach the next state (`Number_of_Bits` and `Baseline`). -Symbols are scanned in their natural order for "less than 1" probabilities. -Symbols with this probability are being attributed a single cell, +Symbols are first scanned in their natural order for "less than 1" probabilities +(previously decoded from a Value of `0`). +Symbols with this special probability are being attributed a single row, starting from the end of the table and retreating. These symbols define a full state reset, reading `Accuracy_Log` bits. -All remaining symbols are allocated in their natural order. -Starting from symbol `0` and table position `0`, -each symbol gets allocated as many cells as its probability. -Cell allocation is spreaded, not linear : -each successor position follow this rule : +Then, all remaining symbols, sorted in natural order, are allocated rows. +Starting from smallest present symbol, and table position `0`, +each symbol gets allocated as many rows as its probability. +Row allocation is not linear, it follows this order, in modular arithmetic: ``` position += (tableSize>>1) + (tableSize>>3) + 3; position &= tableSize-1; ``` -A position is skipped if already occupied by a "less than 1" probability symbol. -`position` does not reset between symbols, it simply iterates through -each position in the table, switching to the next symbol when enough -states have been allocated to the current one. +Using above ordering rule, each symbol gets allocated as many rows as its probability. +If a position is already occupied by a "less than 1" probability symbol, +it is simply skipped, and the next position is allocated instead. +Once enough rows have been allocated for the current symbol, +the allocation process continues, using the next symbol, in natural order. +This process guarantees that the table is entirely and exactly filled. -The result is a list of state values. -Each state will decode the current symbol. +Each row specifies a decoded symbol, and is accessed by current state value. +It also specifies `Number_of_Bits` and `Baseline`, which are required to determine next state value. -To get the `Number_of_Bits` and `Baseline` required for next state, -it's first necessary to sort all states in their natural order. -The lower states will need 1 more bit than higher ones. -The process is repeated for each symbol. +To correctly set these fields, it's necessary to sort all occurrences of each symbol in state value order, +and then attribute N+1 bits to lower rows, and N bits to higher rows, +following the process described below (using an example): __Example__ : -Presuming a symbol has a probability of 5. -It receives 5 state values. States are sorted in natural order. +Presuming an `Accuracy_Log` of 7, +let's imagine a symbol with a Probability of 5: +it receives 5 rows, corresponding to 5 state values between `0` and `127`. + +In this example, the first state value happens to be `1` (after unspecified previous symbols). +The next 4 states are then determined using above modular arithmetic rule, +which specifies to add `64+16+3 = 83` modulo `128` to jump to next position, +producing the following series: `1`, `84`, `39`, `122`, `77` (modular arithmetic). +(note: the next symbol will then start at `32`). -Next power of 2 is 8. -Space of probabilities is divided into 8 equal parts. -Presuming the `Accuracy_Log` is 7, it defines 128 states. -Divided by 8, each share is 16 large. +These state values are then sorted in natural order, +resulting in the following series: `1`, `39`, `77`, `84`, `122`. -In order to reach 8, 8-5=3 lowest states will count "double", -doubling the number of shares (32 in width), -requiring one more bit in the process. +The next power of 2 after 5 is 8. +Therefore, the probability space will be divided into 8 equal parts. +Since the probability space is `1<<7 = 128` large, each share is `128/8 = 16` large. -Baseline is assigned starting from the higher states using fewer bits, -and proceeding naturally, then resuming at the first state, -each takes its allocated width from Baseline. +In order to reach 8 shares, the `8-5 = 3` lowest states will count "double", +doubling their shares (32 in width), hence requiring one more bit. -| state order | 0 | 1 | 2 | 3 | 4 | -| ---------------- | ----- | ----- | ------ | ---- | ----- | -| width | 32 | 32 | 32 | 16 | 16 | -| `Number_of_Bits` | 5 | 5 | 5 | 4 | 4 | -| range number | 2 | 4 | 6 | 0 | 1 | -| `Baseline` | 32 | 64 | 96 | 0 | 16 | -| range | 32-63 | 64-95 | 96-127 | 0-15 | 16-31 | +Baseline is assigned starting from the lowest state using fewer bits, +continuing in natural state order, looping back at the beginning. +Each state takes its allocated range from Baseline, sized by its `Number_of_Bits`. -The next state is determined from current state -by reading the required `Number_of_Bits`, and adding the specified `Baseline`. +| state order | 0 | 1 | 2 | 3 | 4 | +| ---------------- | ----- | ----- | ------ | ---- | ------ | +| state value | 1 | 39 | 77 | 84 | 122 | +| width | 32 | 32 | 32 | 16 | 16 | +| `Number_of_Bits` | 5 | 5 | 5 | 4 | 4 | +| allocation order | 3 | 4 | 5 | 1 | 2 | +| `Baseline` | 32 | 64 | 96 | 0 | 16 | +| range | 32-63 | 64-95 | 96-127 | 0-15 | 16-31 | -See [Appendix A] for the results of this process applied to the default distributions. +During decoding, the next state value is determined by using current state value as row number, +then reading the required `Number_of_Bits` from the bitstream, and adding the specified `Baseline`. + +Note: +as a trivial example, it follows that, for a symbol with a Probability of `1`, +`Baseline` is necessarily `0`, and `Number_of_Bits` is necessarily `Accuracy_Log`. + +See [Appendix A] to see the outcome of this process applied to the default distributions. [Appendix A]: #appendix-a---decoding-tables-for-predefined-codes @@ -1173,7 +1234,7 @@ Huffman Coding -------------- Zstandard Huffman-coded streams are read backwards, similar to the FSE bitstreams. -Therefore, to find the start of the bitstream, it is therefore to +Therefore, to find the start of the bitstream, it is required to know the offset of the last byte of the Huffman-coded stream. After writing the last bit containing information, the compressor @@ -1209,48 +1270,61 @@ This specification limits maximum code length to 11 bits. #### Representation -All literal values from zero (included) to last present one (excluded) +All literal symbols from zero (included) to last present one (excluded) are represented by `Weight` with values from `0` to `Max_Number_of_Bits`. Transformation from `Weight` to `Number_of_Bits` follows this formula : ``` Number_of_Bits = Weight ? (Max_Number_of_Bits + 1 - Weight) : 0 ``` -The last symbol's `Weight` is deduced from previously decoded ones, -by completing to the nearest power of 2. -This power of 2 gives `Max_Number_of_Bits`, the depth of the current tree. +When a literal symbol is not present, it receives a `Weight` of 0. +The least frequent symbol receives a `Weight` of 1. +If no literal has a `Weight` of 1, then the data is considered corrupted. +If there are not at least two literals with non-zero `Weight`, then the data +is considered corrupted. +The most frequent symbol receives a `Weight` anywhere between 1 and 11 (max). +The last symbol's `Weight` is deduced from previously retrieved Weights, +by completing to the nearest power of 2. It's necessarily non 0. +If it's not possible to reach a clean power of 2 with a single `Weight` value, +the Huffman Tree Description is considered invalid. +This final power of 2 gives `Max_Number_of_Bits`, the depth of the current tree. `Max_Number_of_Bits` must be <= 11, otherwise the representation is considered corrupted. __Example__ : Let's presume the following Huffman tree must be described : -| literal value | 0 | 1 | 2 | 3 | 4 | 5 | +| literal symbol | A | B | C | D | E | F | | ---------------- | --- | --- | --- | --- | --- | --- | | `Number_of_Bits` | 1 | 2 | 3 | 0 | 4 | 4 | The tree depth is 4, since its longest elements uses 4 bits -(longest elements are the one with smallest frequency). -Value `5` will not be listed, as it can be determined from values for 0-4, -nor will values above `5` as they are all 0. -Values from `0` to `4` will be listed using `Weight` instead of `Number_of_Bits`. +(longest elements are the ones with smallest frequency). + +All symbols will now receive a `Weight` instead of `Number_of_Bits`. Weight formula is : ``` Weight = Number_of_Bits ? (Max_Number_of_Bits + 1 - Number_of_Bits) : 0 ``` -It gives the following series of weights : +It gives the following series of Weights : -| literal value | 0 | 1 | 2 | 3 | 4 | -| ------------- | --- | --- | --- | --- | --- | -| `Weight` | 4 | 3 | 2 | 0 | 1 | +| literal symbol | A | B | C | D | E | F | +| -------------- | --- | --- | --- | --- | --- | --- | +| `Weight` | 4 | 3 | 2 | 0 | 1 | 1 | + +This list will be sent to the decoder, with the following modifications: + +- `F` will not be listed, because it can be determined from previous symbols +- nor will symbols above `F` as they are all 0 +- on the other hand, all symbols before `A`, starting with `\0`, will be listed, with a Weight of 0. The decoder will do the inverse operation : -having collected weights of literal symbols from `0` to `4`, -it knows the last literal, `5`, is present with a non-zero `Weight`. -The `Weight` of `5` can be determined by advancing to the next power of 2. +having collected weights of literal symbols from `A` to `E`, +it knows the last literal, `F`, is present with a non-zero `Weight`. +The `Weight` of `F` can be determined by advancing to the next power of 2. The sum of `2^(Weight-1)` (excluding 0's) is : `8 + 4 + 2 + 0 + 1 = 15`. Nearest larger power of 2 value is 16. -Therefore, `Max_Number_of_Bits = 4` and `Weight[5] = 16-15 = 1`. +Therefore, `Max_Number_of_Bits = log2(16) = 4` and `Weight[F] = log_2(16 - 15) + 1 = 1`. #### Huffman Tree header @@ -1290,7 +1364,7 @@ sharing a single distribution table. To decode an FSE bitstream, it is necessary to know its compressed size. Compressed size is provided by `headerByte`. It's also necessary to know its _maximum possible_ decompressed size, -which is `255`, since literal values span from `0` to `255`, +which is `255`, since literal symbols span from `0` to `255`, and last symbol's `Weight` is not represented. An FSE bitstream starts by a header, describing probabilities distribution. @@ -1312,6 +1386,13 @@ If updating state after decoding a symbol would require more bits than remain in the stream, it is assumed that extra bits are 0. Then, symbols for each of the final states are decoded and the process is complete. +If this process would produce more weights than the maximum number of decoded +weights (255), then the data is considered corrupted. + +If either of the 2 initial states are absent or truncated, then the data is +considered corrupted. Consequently, it is not possible to encode fewer than +2 weights using this mode. + #### Conversion from weights to Huffman prefix codes All present symbols shall now have a `Weight` value. @@ -1319,26 +1400,28 @@ It is possible to transform weights into `Number_of_Bits`, using this formula: ``` Number_of_Bits = (Weight>0) ? Max_Number_of_Bits + 1 - Weight : 0 ``` -Symbols are sorted by `Weight`. -Within same `Weight`, symbols keep natural sequential order. +In order to determine which prefix code is assigned to each Symbol, +Symbols are first sorted by `Weight`, then by natural sequential order. Symbols with a `Weight` of zero are removed. -Then, starting from lowest `Weight`, prefix codes are distributed in sequential order. +Then, starting from lowest `Weight` (hence highest `Number_of_Bits`), +prefix codes are assigned in ascending order. __Example__ : -Let's presume the following list of weights has been decoded : +Let's assume the following list of weights has been decoded: -| Literal | 0 | 1 | 2 | 3 | 4 | 5 | +| Literal | A | B | C | D | E | F | | -------- | --- | --- | --- | --- | --- | --- | | `Weight` | 4 | 3 | 2 | 0 | 1 | 1 | Sorted by weight and then natural sequential order, -it gives the following distribution : +it gives the following prefix codes distribution: -| Literal | 3 | 4 | 5 | 2 | 1 | 0 | -| ---------------- | --- | --- | --- | --- | --- | ---- | -| `Weight` | 0 | 1 | 1 | 2 | 3 | 4 | -| `Number_of_Bits` | 0 | 4 | 4 | 3 | 2 | 1 | -| prefix codes | N/A | 0000| 0001| 001 | 01 | 1 | +| Literal | D | E | F | C | B | A | +| ---------------- | --- | ---- | ---- | ---- | ---- | ---- | +| `Weight` | 0 | 1 | 1 | 2 | 3 | 4 | +| `Number_of_Bits` | 0 | 4 | 4 | 3 | 2 | 1 | +| prefix code | N/A | 0000 | 0001 | 001 | 01 | 1 | +| ascending order | N/A | 0000 | 0001 | 001x | 01xx | 1xxx | ### Huffman-coded Streams @@ -1361,10 +1444,10 @@ it's possible to read the bitstream in a __little-endian__ fashion, keeping track of already used bits. Since the bitstream is encoded in reverse order, starting from the end read symbols in forward order. -For example, if the literal sequence "0145" was encoded using above prefix code, +For example, if the literal sequence `ABEF` was encoded using above prefix code, it would be encoded (in reverse order) as: -|Symbol | 5 | 4 | 1 | 0 | Padding | +|Symbol | F | E | B | A | Padding | |--------|------|------|----|---|---------| |Encoding|`0000`|`0001`|`01`|`1`| `00001` | @@ -1411,13 +1494,17 @@ __`Dictionary_ID`__ : 4 bytes, stored in __little-endian__ format. It's used by decoders to check if they use the correct dictionary. _Reserved ranges :_ - If the frame is going to be distributed in a private environment, - any `Dictionary_ID` can be used. - However, for public distribution of compressed frames, - the following ranges are reserved and shall not be used : +If the dictionary is going to be distributed in a public environment, +the following ranges of `Dictionary_ID` are reserved for some future registrar +and shall not be used : + + - low range : <= 32767 + - high range : >= (2^31) + +Outside of these ranges, any value of `Dictionary_ID` +which is both `>= 32768` and `< (1<<31)` can be used freely, +even in public environment. - - low range : <= 32767 - - high range : >= (2^31) __`Entropy_Tables`__ : follow the same format as tables in [compressed blocks]. See the relevant [FSE](#fse-table-description) @@ -1429,7 +1516,7 @@ __`Entropy_Tables`__ : follow the same format as tables in [compressed blocks]. Repeat distribution mode for sequence decoding. It's finally followed by 3 offset values, populating recent offsets (instead of using `{1,4,8}`), stored in order, 4-bytes __little-endian__ each, for a total of 12 bytes. - Each recent offset must have a value < dictionary size. + Each recent offset must have a value <= dictionary content size, and cannot equal 0. __`Content`__ : The rest of the dictionary is its content. The content act as a "past" in front of data to compress or decompress, @@ -1437,7 +1524,7 @@ __`Content`__ : The rest of the dictionary is its content. As long as the amount of data decoded from this frame is less than or equal to `Window_Size`, sequence commands may specify offsets longer than the total length of decoded output so far to reference back to the - dictionary, even parts of the dictionary with offsets larger than `Window_Size`. + dictionary, even parts of the dictionary with offsets larger than `Window_Size`. After the total output has surpassed `Window_Size` however, this is no longer allowed and the dictionary is no longer accessible. @@ -1655,6 +1742,19 @@ or at least provide a meaningful error code explaining for which reason it canno Version changes --------------- +- 0.4.4 : minor clarification for block size +- 0.4.3 : clarifications for Huffman prefix code assignment example +- 0.4.2 : refactor FSE table construction process, inspired by Donald Pian +- 0.4.1 : clarifications on a few error scenarios, by Eric Lasota +- 0.4.0 : fixed imprecise behavior for nbSeq==0, detected by Igor Pavlov +- 0.3.9 : clarifications for Huffman-compressed literal sizes. +- 0.3.8 : clarifications for Huffman Blocks and Huffman Tree descriptions. +- 0.3.7 : clarifications for Repeat_Offsets, matching RFC8878 +- 0.3.6 : clarifications for Dictionary_ID +- 0.3.5 : clarifications for Block_Maximum_Size +- 0.3.4 : clarifications for FSE decoding table +- 0.3.3 : clarifications for field Block_Size +- 0.3.2 : remove additional block size restriction on compressed blocks - 0.3.1 : minor clarification regarding offset history update rules - 0.3.0 : minor edits to match RFC8478 - 0.2.9 : clarifications for huffman weights direct representation, by Ulrich Kunitz diff --git a/doc/zstd_manual.html b/doc/zstd_manual.html index 024c10e630f..89f0761f492 100644 --- a/doc/zstd_manual.html +++ b/doc/zstd_manual.html @@ -1,36 +1,37 @@ -zstd 1.4.0 Manual +zstd 1.5.7 Manual -

    zstd 1.4.0 Manual

    +

    zstd 1.5.7 Manual

    +Note: the content of this file has been automatically generated by parsing "zstd.h"

    Contents

    1. Introduction
    2. Version
    3. -
    4. Simple API
    5. +
    6. Simple Core API
    7. Explicit context
    8. -
    9. Advanced compression API
    10. -
    11. Advanced decompression API
    12. +
    13. Advanced compression API (Requires v1.4.0+)
    14. +
    15. Advanced decompression API (Requires v1.4.0+)
    16. Streaming
    17. Streaming compression - HowTo
    18. Streaming decompression - HowTo
    19. Simple dictionary API
    20. Bulk processing dictionary API
    21. Dictionary helper functions
    22. -
    23. Advanced dictionary and prefix API
    24. +
    25. Advanced dictionary and prefix API (Requires v1.4.0+)
    26. experimental API (static linking only)
    27. -
    28. Frame size functions
    29. +
    30. Frame header and size functions
    31. Memory management
    32. Advanced compression functions
    33. Advanced decompression functions
    34. Advanced streaming functions
    35. -
    36. Buffer-less and synchronous inner streaming functions
    37. +
    38. Buffer-less and synchronous inner streaming functions (DEPRECATED)
    39. Buffer-less streaming compression (synchronous mode)
    40. Buffer-less streaming decompression (synchronous mode)
    41. -
    42. Block level API
    43. +
    44. Block level API (DEPRECATED)

    Introduction

    @@ -66,57 +67,70 @@ 

    zstd 1.4.0 Manual

    Version

    
     
    -
    unsigned ZSTD_versionNumber(void);   /**< to check runtime library version */
    -

    -

    Simple API

    
    +
    unsigned ZSTD_versionNumber(void);
    +

    Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). +


    + +
    const char* ZSTD_versionString(void);
    +

    Return runtime library version, like "1.4.5". Requires v1.3.0+. +


    + +

    Simple Core API

    
     
     
    size_t ZSTD_compress( void* dst, size_t dstCapacity,
                     const void* src, size_t srcSize,
                           int compressionLevel);
     

    Compresses `src` content as a single zstd compressed frame into already allocated `dst`. - Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + enough space to successfully compress the data. @return : compressed size written into `dst` (<= `dstCapacity), or an error code if it fails (which can be tested using ZSTD_isError()).


    size_t ZSTD_decompress( void* dst, size_t dstCapacity,
                       const void* src, size_t compressedSize);
    -

    `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. - `dstCapacity` is an upper bound of originalSize to regenerate. - If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. - @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), - or an errorCode if it fails (which can be tested using ZSTD_isError()). +

    `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. + Multiple compressed frames can be decompressed at once with this method. + The result will be the concatenation of all decompressed frames, back to back. + `dstCapacity` is an upper bound of originalSize to regenerate. + First frame's decompressed size can be extracted using ZSTD_getFrameContentSize(). + If maximum upper bound isn't known, prefer using streaming mode to decompress data. + @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), + or an errorCode if it fails (which can be tested using ZSTD_isError()).


    +

    Decompression helper functions


    #define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
     #define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
     unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
    -

    `src` should point to the start of a ZSTD encoded frame. - `srcSize` must be at least as large as the frame header. - hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. - @return : - decompressed size of `src` frame content, if known - - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined - - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) - note 1 : a 0 return value means the frame is valid but "empty". - note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. - When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. - In which case, it's necessary to use streaming mode to decompress data. - Optionally, application can rely on some implicit limit, - as ZSTD_decompress() only needs an upper bound of decompressed size. - (For example, data could be necessarily cut into blocks <= 16 KB). - note 3 : decompressed size is always present when compression is completed using single-pass functions, - such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). - note 4 : decompressed size can be very large (64-bits value), - potentially larger than what local system can handle as a single memory segment. - In which case, it's necessary to use streaming mode to decompress data. - note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. - Always ensure return value fits within application's authorized limits. - Each application can set its own limits. - note 6 : This function replaces ZSTD_getDecompressedSize() -


    - -
    unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
    -

    NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). +

    `src` should point to the start of a ZSTD encoded frame. + `srcSize` must be at least as large as the frame header. + hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. + @return : - decompressed size of `src` frame content, if known + - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined + - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) + note 1 : a 0 return value means the frame is valid but "empty". + When invoking this method on a skippable frame, it will return 0. + note 2 : decompressed size is an optional field, it may not be present (typically in streaming mode). + When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. + In which case, it's necessary to use streaming mode to decompress data. + Optionally, application can rely on some implicit limit, + as ZSTD_decompress() only needs an upper bound of decompressed size. + (For example, data could be necessarily cut into blocks <= 16 KB). + note 3 : decompressed size is always present when compression is completed using single-pass functions, + such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). + note 4 : decompressed size can be very large (64-bits value), + potentially larger than what local system can handle as a single memory segment. + In which case, it's necessary to use streaming mode to decompress data. + note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. + Always ensure return value fits within application's authorized limits. + Each application can set its own limits. + note 6 : This function replaces ZSTD_getDecompressedSize() +


    + +
    ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize")
    +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
    +

    This function is now obsolete, in favor of ZSTD_getFrameContentSize(). Both functions work the same way, but ZSTD_getDecompressedSize() blends "empty", "unknown" and "error" results to the same return value (0), while ZSTD_getFrameContentSize() gives them separate return values. @@ -128,54 +142,94 @@

    zstd 1.4.0 Manual

    `srcSize` must be >= first frame size @return : the compressed size of the first frame starting at `src`, suitable to pass as `srcSize` to `ZSTD_decompress` or similar, - or an error code if input is invalid + or an error code if input is invalid + Note 1: this method is called _find*() because it's not enough to read the header, + it may have to scan through the frame's content, to reach its end. + Note 2: this method also works with Skippable Frames. In which case, + it returns the size of the complete skippable frame, + which is always equal to its content size + 8 bytes for headers. +


    + +

    Compression helper functions


    +
    #define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U)
    +#define ZSTD_COMPRESSBOUND(srcSize)   (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
    +size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
    +

    maximum compressed size in worst case single-pass scenario. + When invoking `ZSTD_compress()`, or any other one-pass compression function, + it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize) + as it eliminates one potential failure scenario, + aka not enough room in dst buffer to write the compressed frame. + Note : ZSTD_compressBound() itself can fail, if @srcSize >= ZSTD_MAX_INPUT_SIZE . + In which case, ZSTD_compressBound() will return an error code + which can be tested using ZSTD_isError(). + + ZSTD_COMPRESSBOUND() : + same as ZSTD_compressBound(), but as a macro. + It can be used to produce constants, which can be useful for static allocation, + for example to size a static array on stack. + Will produce constant value 0 if srcSize is too large. +


    -

    Helper functions

    #define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
    -size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
    -unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
    -const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
    -int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed */
    -int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
    +

    Error helper functions

    /* ZSTD_isError() :
    + * Most ZSTD_* functions returning a size_t value can be tested for error,
    + * using ZSTD_isError().
    + * @return 1 if error, 0 otherwise
    + */
    +unsigned     ZSTD_isError(size_t result);      /*!< tells if a `size_t` function result is an error code */
    +ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); /* convert a result into an error code, which can be compared to error enum list */
    +const char*  ZSTD_getErrorName(size_t result); /*!< provides readable string from a function result */
    +int          ZSTD_minCLevel(void);             /*!< minimum negative compression level allowed, requires v1.4.0+ */
    +int          ZSTD_maxCLevel(void);             /*!< maximum compression level available */
    +int          ZSTD_defaultCLevel(void);         /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
     

    Explicit context

    
     
     

    Compression context

      When compressing many times,
    -  it is recommended to allocate a context just once, and re-use it for each successive compression operation.
    -  This will make workload friendlier for system's memory.
    -  Use one context per thread for parallel execution in multi-threaded environments. 
    +  it is recommended to allocate a compression context just once,
    +  and reuse it for each successive compression operation.
    +  This will make the workload easier for system's memory.
    +  Note : re-using context is just a speed / resource optimization.
    +         It doesn't change the compression ratio, which remains identical.
    +  Note 2: For parallel execution in multi-threaded environments,
    +         use one different context per thread .
    + 
     
    typedef struct ZSTD_CCtx_s ZSTD_CCtx;
     ZSTD_CCtx* ZSTD_createCCtx(void);
    -size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);
    +size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);  /* compatible with NULL pointer */
     

    size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
                              void* dst, size_t dstCapacity,
                        const void* src, size_t srcSize,
                              int compressionLevel);
    -

    Same as ZSTD_compress(), using an explicit ZSTD_CCtx - The function will compress at requested compression level, - ignoring any other parameter +

    Same as ZSTD_compress(), using an explicit ZSTD_CCtx. + Important : in order to mirror `ZSTD_compress()` behavior, + this function compresses at the requested compression level, + __ignoring any other advanced parameter__ . + If any advanced parameter was set using the advanced API, + they will all be reset. Only @compressionLevel remains. +


    Decompression context

      When decompressing many times,
       it is recommended to allocate a context only once,
    -  and re-use it for each successive compression operation.
    +  and reuse it for each successive compression operation.
       This will make workload friendlier for system's memory.
       Use one context per thread for parallel execution. 
     
    typedef struct ZSTD_DCtx_s ZSTD_DCtx;
     ZSTD_DCtx* ZSTD_createDCtx(void);
    -size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
    +size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);  /* accept NULL pointer */
     

    size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
                                void* dst, size_t dstCapacity,
                          const void* src, size_t srcSize);
     

    Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx. - Compatible with sticky parameters. + Compatible with sticky parameters (see below).


    -

    Advanced compression API

    
    +

    Advanced compression API (Requires v1.4.0+)

    
     
     
    typedef enum { ZSTD_fast=1,
                    ZSTD_dfast=2,
    @@ -194,18 +248,29 @@ 

    Decompression context

      When decompressing many times,
     
         /* compression parameters
          * Note: When compressing with a ZSTD_CDict these parameters are superseded
    -     * by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict()
    -     * for more info (superseded-by-cdict). */
    -    ZSTD_c_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
    +     * by the parameters used to construct the ZSTD_CDict.
    +     * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
    +    ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
    +                              * Note that exact compression parameters are dynamically determined,
    +                              * depending on both compression level and srcSize (when known).
                                   * Default level is ZSTD_CLEVEL_DEFAULT==3.
                                   * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
                                   * Note 1 : it's possible to pass a negative compression level.
    -                              * Note 2 : setting a level sets all default values of other compression parameters */
    +                              * Note 2 : setting a level does not automatically set all other compression parameters
    +                              *   to default. Setting this will however eventually dynamically impact the compression
    +                              *   parameters which have not been manually set. The manually set
    +                              *   ones will 'stick'. */
    +    /* Advanced compression parameters :
    +     * It's possible to pin down compression parameters to some specific values.
    +     * In which case, these values are no longer dynamically selected by the compressor */
         ZSTD_c_windowLog=101,    /* Maximum allowed back-reference distance, expressed as power of 2.
    +                              * This will set a memory budget for streaming decompression,
    +                              * with larger values requiring more memory
    +                              * and typically compressing more.
                                   * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
                                   * Special: value 0 means "use default windowLog".
                                   * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
    -                              *       requires explicitly allowing such window size at decompression stage if using streaming. */
    +                              *       requires explicitly allowing such size at streaming decompression stage. */
         ZSTD_c_hashLog=102,      /* Size of the initial probe table, as a power of 2.
                                   * Resulting memory usage is (1 << (hashLog+2)).
                                   * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
    @@ -216,13 +281,13 @@ 

    Decompression context

      When decompressing many times,
                                   * Resulting memory usage is (1 << (chainLog+2)).
                                   * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
                                   * Larger tables result in better and slower compression.
    -                              * This parameter is useless when using "fast" strategy.
    +                              * This parameter is useless for "fast" strategy.
                                   * It's still useful when using "dfast" strategy,
                                   * in which case it defines a secondary probe table.
                                   * Special: value 0 means "use default chainLog". */
         ZSTD_c_searchLog=104,    /* Number of search attempts, as a power of 2.
                                   * More attempts result in better and slower compression.
    -                              * This parameter is useless when using "fast" and "dFast" strategies.
    +                              * This parameter is useless for "fast" and "dFast" strategies.
                                   * Special: value 0 means "use default searchLog". */
         ZSTD_c_minMatch=105,     /* Minimum size of searched matches.
                                   * Note that Zstandard can still find matches of smaller size,
    @@ -245,13 +310,27 @@ 

    Decompression context

      When decompressing many times,
                                   * resulting in stronger and slower compression.
                                   * Special: value 0 means "use default strategy". */
     
    +    ZSTD_c_targetCBlockSize=130, /* v1.5.6+
    +                                  * Attempts to fit compressed block size into approximately targetCBlockSize.
    +                                  * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX.
    +                                  * Note that it's not a guarantee, just a convergence target (default:0).
    +                                  * No target when targetCBlockSize == 0.
    +                                  * This is helpful in low bandwidth streaming environments to improve end-to-end latency,
    +                                  * when a client can make use of partial documents (a prominent example being Chrome).
    +                                  * Note: this parameter is stable since v1.5.6.
    +                                  * It was present as an experimental parameter in earlier versions,
    +                                  * but it's not recommended using it with earlier library versions
    +                                  * due to massive performance regressions.
    +                                  */
         /* LDM mode parameters */
         ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
                                          * This parameter is designed to improve compression ratio
                                          * for large inputs, by finding large matches at long distance.
                                          * It increases memory usage and window size.
                                          * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
    -                                     * except when expressly set to a different value. */
    +                                     * except when expressly set to a different value.
    +                                     * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and
    +                                     * compression strategy >= ZSTD_btopt (== compression level 16+) */
         ZSTD_c_ldmHashLog=161,   /* Size of the table for long distance matching, as a power of 2.
                                   * Larger values increase memory usage and compression ratio,
                                   * but decrease compression speed.
    @@ -277,26 +356,30 @@ 

    Decompression context

      When decompressing many times,
         ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
                                   * Content size must be known at the beginning of compression.
                                   * This is automatically the case when using ZSTD_compress2(),
    -                              * For streaming variants, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
    +                              * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
         ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
         ZSTD_c_dictIDFlag=202,   /* When applicable, dictionary's ID is written into frame header (default:1) */
     
         /* multi-threading parameters */
    -    /* These parameters are only useful if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
    -     * They return an error otherwise. */
    +    /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
    +     * Otherwise, trying to set any other value than default (0) will be a no-op and return an error.
    +     * In a situation where it's unknown if the linked library supports multi-threading or not,
    +     * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property.
    +     */
         ZSTD_c_nbWorkers=400,    /* Select how many threads will be spawned to compress in parallel.
    -                              * When nbWorkers >= 1, triggers asynchronous mode when used with ZSTD_compressStream*() :
    +                              * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() :
                                   * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
    -                              * while compression work is performed in parallel, within worker threads.
    +                              * while compression is performed in parallel, within worker thread(s).
                                   * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
                                   *  in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
                                   * More workers improve speed, but also increase memory usage.
    -                              * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */
    +                              * Default value is `0`, aka "single-threaded mode" : no worker is spawned,
    +                              * compression is performed inside Caller's thread, and all invocations are blocking */
         ZSTD_c_jobSize=401,      /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
                                   * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
                                   * 0 means default, which is dynamically determined based on compression parameters.
    -                              * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
    -                              * The minimum size is automatically and transparently enforced */
    +                              * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest.
    +                              * The minimum size is automatically and transparently enforced. */
         ZSTD_c_overlapLog=402,   /* Control the overlap size, as a fraction of window size.
                                   * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
                                   * It helps preserve compression ratio, while each job is compressed in parallel.
    @@ -318,6 +401,18 @@ 

    Decompression context

      When decompressing many times,
          * ZSTD_c_forceMaxWindow
          * ZSTD_c_forceAttachDict
          * ZSTD_c_literalCompressionMode
    +     * ZSTD_c_srcSizeHint
    +     * ZSTD_c_enableDedicatedDictSearch
    +     * ZSTD_c_stableInBuffer
    +     * ZSTD_c_stableOutBuffer
    +     * ZSTD_c_blockDelimiters
    +     * ZSTD_c_validateSequences
    +     * ZSTD_c_blockSplitterLevel
    +     * ZSTD_c_splitAfterSequences
    +     * ZSTD_c_useRowMatchFinder
    +     * ZSTD_c_prefetchCDictTables
    +     * ZSTD_c_enableSeqProducerFallback
    +     * ZSTD_c_maxBlockSize
          * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
          * note : never ever use experimentalParam? names directly;
          *        also, the enums values themselves are unstable and can still change.
    @@ -327,6 +422,21 @@ 

    Decompression context

      When decompressing many times,
          ZSTD_c_experimentalParam3=1000,
          ZSTD_c_experimentalParam4=1001,
          ZSTD_c_experimentalParam5=1002,
    +     /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */
    +     ZSTD_c_experimentalParam7=1004,
    +     ZSTD_c_experimentalParam8=1005,
    +     ZSTD_c_experimentalParam9=1006,
    +     ZSTD_c_experimentalParam10=1007,
    +     ZSTD_c_experimentalParam11=1008,
    +     ZSTD_c_experimentalParam12=1009,
    +     ZSTD_c_experimentalParam13=1010,
    +     ZSTD_c_experimentalParam14=1011,
    +     ZSTD_c_experimentalParam15=1012,
    +     ZSTD_c_experimentalParam16=1013,
    +     ZSTD_c_experimentalParam17=1014,
    +     ZSTD_c_experimentalParam18=1015,
    +     ZSTD_c_experimentalParam19=1016,
    +     ZSTD_c_experimentalParam20=1017
     } ZSTD_cParameter;
     

    typedef struct {
    @@ -389,7 +499,7 @@ 

    Decompression context

      When decompressing many times,
                       They will be used to compress next frame.
                       Resetting session never fails.
       - The parameters : changes all parameters back to "default".
    -                  This removes any reference to any dictionary too.
    +                  This also removes any reference to any dictionary or external sequence producer.
                       Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
                       otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
       - Both : similar to resetting the session, followed by resetting parameters.
    @@ -400,17 +510,19 @@ 

    Decompression context

      When decompressing many times,
                            void* dst, size_t dstCapacity,
                      const void* src, size_t srcSize);
     

    Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. + (note that this entry point doesn't even expose a compression level parameter). ZSTD_compress2() always starts a new frame. Should cctx hold data from a previously unfinished frame, everything about it is forgotten. - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() - The function is always blocking, returns when compression is completed. - Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + enough space to successfully compress the data, though it is possible it fails for other reasons. @return : compressed size written into `dst` (<= `dstCapacity), or an error code if it fails (which can be tested using ZSTD_isError()).


    -

    Advanced decompression API

    
    +

    Advanced decompression API (Requires v1.4.0+)

    
     
     
    typedef enum {
     
    @@ -424,11 +536,21 @@ 

    Decompression context

      When decompressing many times,
         /* note : additional experimental parameters are also available
          * within the experimental section of the API.
          * At the time of this writing, they include :
    -     * ZSTD_c_format
    +     * ZSTD_d_format
    +     * ZSTD_d_stableOutBuffer
    +     * ZSTD_d_forceIgnoreChecksum
    +     * ZSTD_d_refMultipleDDicts
    +     * ZSTD_d_disableHuffmanAssembly
    +     * ZSTD_d_maxBlockSize
          * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
          * note : never ever use experimentalParam? names directly
          */
    -     ZSTD_d_experimentalParam1=1000
    +     ZSTD_d_experimentalParam1=1000,
    +     ZSTD_d_experimentalParam2=1001,
    +     ZSTD_d_experimentalParam3=1002,
    +     ZSTD_d_experimentalParam4=1003,
    +     ZSTD_d_experimentalParam5=1004,
    +     ZSTD_d_experimentalParam6=1005
     
     } ZSTD_dParameter;
     

    @@ -476,14 +598,14 @@

    Decompression context

      When decompressing many times,
       A ZSTD_CStream object is required to track streaming operation.
       Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
       ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
    -  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
    +  It is recommended to reuse ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
     
       For parallel execution, use one separate ZSTD_CStream per thread.
     
       note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
     
       Parameters are sticky : when starting a new compression on the same context,
    -  it will re-use the same sticky parameters as previous compression session.
    +  it will reuse the same sticky parameters as previous compression session.
       When in doubt, it's recommended to fully initialize the context before usage.
       Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
       ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
    @@ -535,7 +657,7 @@ 

    Decompression context

      When decompressing many times,
     
    typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
     

    ZSTD_CStream management functions

    ZSTD_CStream* ZSTD_createCStream(void);
    -size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
    +size_t ZSTD_freeCStream(ZSTD_CStream* zcs);  /* accept NULL pointer */
     

    Streaming compression functions

    typedef enum {
         ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
    @@ -559,8 +681,9 @@ 

    Streaming compression functions

    typedef enum {
       - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
       - output->pos must be <= dstCapacity, input->pos must be <= srcSize
       - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
    +  - endOp must be a valid directive
       - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
    -  - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
    +  - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,
                                                       and then immediately returns, just indicating that there is some data remaining to be flushed.
                                                       The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
       - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
    @@ -573,6 +696,11 @@ 

    Streaming compression functions

    typedef enum {
                 only ZSTD_e_end or ZSTD_e_flush operations are allowed.
                 Before starting a new compression job, or changing compression parameters,
                 it is required to fully flush internal buffers.
    +  - note: if an operation ends with an error, it may leave @cctx in an undefined state.
    +          Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.
    +          In order to be re-employed after an error, a state must be reset,
    +          which can be done explicitly (ZSTD_CCtx_reset()),
    +          or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())
      
     


    @@ -596,13 +724,16 @@

    Streaming compression functions

    typedef enum {
          ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
          ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
          ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
    +
    + Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API
    + to compress with a dictionary.
      
     


    Streaming decompression - HowTo

       A ZSTD_DStream object is required to track streaming operations.
       Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
    -  ZSTD_DStream objects can be re-used multiple times.
    +  ZSTD_DStream objects can be re-employed multiple times.
     
       Use ZSTD_initDStream() to start a new decompression operation.
      @return : recommended first input size
    @@ -612,25 +743,64 @@ 

    Streaming compression functions

    typedef enum {
       The function will update both `pos` fields.
       If `input.pos < input.size`, some input has not been consumed.
       It's up to the caller to present again remaining data.
    +
       The function tries to flush all data decoded immediately, respecting output buffer size.
       If `output.pos < output.size`, decoder has flushed everything it could.
    -  But if `output.pos == output.size`, there might be some data left within internal buffers.,
    +
    +  However, when `output.pos == output.size`, it's more difficult to know.
    +  If @return > 0, the frame is not complete, meaning
    +  either there is still some data left to flush within internal buffers,
    +  or there is more input to read to complete the frame (or both).
       In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
       Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
      @return : 0 when a frame is completely decoded and fully flushed,
             or an error code, which can be tested using ZSTD_isError(),
             or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
                                     the return value is a suggested next input size (just a hint for better latency)
    -                                that will never request more than the remaining frame size.
    +                                that will never request more than the remaining content of the compressed frame.
      
     
    typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
     

    ZSTD_DStream management functions

    ZSTD_DStream* ZSTD_createDStream(void);
    -size_t ZSTD_freeDStream(ZSTD_DStream* zds);
    +size_t ZSTD_freeDStream(ZSTD_DStream* zds);  /* accept NULL pointer */
     

    Streaming decompression functions


    +
    size_t ZSTD_initDStream(ZSTD_DStream* zds);
    +

    Initialize/reset DStream state for new decompression operation. + Call before new decompression operation using same DStream. + + Note : This function is redundant with the advanced API and equivalent to: + ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); + ZSTD_DCtx_refDDict(zds, NULL); + +


    + +
    size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
    +

    Streaming decompression function. + Call repetitively to consume full input updating it as necessary. + Function will update both input and output `pos` fields exposing current state via these fields: + - `input.pos < input.size`, some input remaining and caller should provide remaining input + on the next call. + - `output.pos < output.size`, decoder flushed internal output buffer. + - `output.pos == output.size`, unflushed data potentially present in the internal buffers, + check ZSTD_decompressStream() @return value, + if > 0, invoke it again to flush remaining data to output. + Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. + + @return : 0 when a frame is completely decoded and fully flushed, + or an error code, which can be tested using ZSTD_isError(), + or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + + Note: when an operation returns with an error code, the @zds state may be left in undefined state. + It's UB to invoke `ZSTD_decompressStream()` on such a state. + In order to re-use such a state, it must be first reset, + which can be done explicitly (`ZSTD_DCtx_reset()`), + or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`) + +


    +
    size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
     

    size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
    @@ -644,7 +814,7 @@ 

    Streaming decompression functions


    int compressionLevel);

    Compression at an explicit compression level using a Dictionary. A dictionary can be any arbitrary data segment (also called a prefix), - or a buffer with specified information (see dictBuilder/zdict.h). + or a buffer with specified information (see zdict.h). Note : This function loads the dictionary, resulting in significant startup delay. It's intended for a dictionary used only once. Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. @@ -665,16 +835,22 @@

    Streaming decompression functions


    ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
                                  int compressionLevel);
    -

    When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once. - ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost. +

    When compressing multiple messages or blocks using the same dictionary, + it's recommended to digest the dictionary only once, since it's a costly operation. + ZSTD_createCDict() will create a state from digesting a dictionary. + The resulting state can be used for future compression operations with very limited startup cost. ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict. - Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content. - Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. + @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. + Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content. + Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, + in which case the only thing that it transports is the @compressionLevel. + This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, + expecting a ZSTD_CDict parameter with any data, including those without a known dictionary.


    size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
    -

    Function frees memory allocated by ZSTD_createCDict(). +

    Function frees memory allocated by ZSTD_createCDict(). + If a NULL pointer is passed, no operation is performed.


    size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
    @@ -693,7 +869,8 @@ 

    Streaming decompression functions



    size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
    -

    Function frees memory allocated with ZSTD_createDDict() +

    Function frees memory allocated with ZSTD_createDDict() + If a NULL pointer is passed, no operation is performed.


    size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
    @@ -712,6 +889,12 @@ 

    Streaming decompression functions


    It can still be loaded, but as a content-only dictionary.


    +
    unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
    +

    Provides the dictID of the dictionary loaded into `cdict`. + If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + Non-conformant dictionaries can still be loaded, but as content-only dictionaries. +


    +
    unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
     

    Provides the dictID of the dictionary loaded into `ddict`. If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. @@ -723,18 +906,20 @@

    Streaming decompression functions


    If @return == 0, the dictID could not be decoded. This could for one of the following reasons : - The frame does not require a dictionary to be decoded (most common case). - - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. + - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information. Note : this use case also happens when using a non-conformant dictionary. - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). - This is not a Zstandard frame. When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code.


    -

    Advanced dictionary and prefix API

    +

    Advanced dictionary and prefix API (Requires v1.4.0+)

      This API allows dictionaries to be used with ZSTD_compress2(),
    - ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
    - only reset with the context is reset with ZSTD_reset_parameters or
    - ZSTD_reset_session_and_parameters. Prefixes are single-use.
    + ZSTD_compressStream2(), and ZSTD_decompressDCtx().
    + Dictionaries are sticky, they remain valid when same context is reused,
    + they only reset when the context is reset
    + with ZSTD_reset_parameters or ZSTD_reset_session_and_parameters.
    + In contrast, Prefixes are single-use.
     
    size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
    @@ -743,8 +928,9 @@ 

    Streaming decompression functions


    @result : 0, or an error code (which can be tested with ZSTD_isError()). Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, meaning "return to no-dictionary mode". - Note 1 : Dictionary is sticky, it will be used for all future compressed frames. - To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). + Note 1 : Dictionary is sticky, it will be used for all future compressed frames, + until parameters are reset, a new dictionary is loaded, or the dictionary + is explicitly invalidated by loading a NULL dictionary. Note 2 : Loading a dictionary involves building tables. It's also a CPU consuming operation, with non-negligible impact on latency. Tables are dependent on compression parameters, and for this reason, @@ -753,14 +939,18 @@

    Streaming decompression functions


    Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. In such a case, dictionary buffer must outlive its users. Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() - to precisely select how dictionary content must be interpreted. + to precisely select how dictionary content must be interpreted. + Note 5 : This method does not benefit from LDM (long distance mode). + If you want to employ LDM on some large dictionary content, + prefer employing ZSTD_CCtx_refPrefix() described below. +


    size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
    -

    Reference a prepared dictionary, to be used for all next compressed frames. +

    Reference a prepared dictionary, to be used for all future compressed frames. Note that compression parameters are enforced from within CDict, and supersede any compression parameter previously set within CCtx. - The parameters ignored are labled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. + The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. The dictionary will remain valid for future compressed frames using same CCtx. @result : 0, or an error code (which can be tested with ZSTD_isError()). @@ -777,6 +967,7 @@

    Streaming decompression functions


    Decompression will need same prefix to properly regenerate data. Compressing with a prefix is similar in outcome as performing a diff and compressing it, but performs much faster, especially during decompression (compression speed is tunable with compression level). + This method is compatible with LDM (long distance mode). @result : 0, or an error code (which can be tested with ZSTD_isError()). Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary Note 1 : Prefix buffer is referenced. It **must** outlive compression. @@ -787,14 +978,14 @@

    Streaming decompression functions


    Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters. It's a CPU consuming operation, with non-negligible impact on latency. If there is a need to use the same prefix multiple times, consider loadDictionary instead. - Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent). + Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation.


    size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
    -

    Create an internal DDict from dict buffer, - to be used to decompress next frames. - The dictionary remains valid for all future frames, until explicitly invalidated. +

    Create an internal DDict from dict buffer, to be used to decompress all future frames. + The dictionary remains valid for all future frames, until explicitly invalidated, or + a new dictionary is loaded. @result : 0, or an error code (which can be tested with ZSTD_isError()). Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, meaning "return to no-dictionary mode". @@ -811,9 +1002,17 @@

    Streaming decompression functions


    size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
     

    Reference a prepared dictionary, to be used to decompress next frames. The dictionary remains active for decompression of future frames using same DCtx. + + If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function + will store the DDict references in a table, and the DDict used for decompression + will be determined at decompression time, as per the dict ID in the frame. + The memory for the table is allocated on the first call to refDDict, and can be + freed with ZSTD_freeDCtx(). + + If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary + will be managed, and referencing a dictionary effectively "discards" any previous one. + @result : 0, or an error code (which can be tested with ZSTD_isError()). - Note 1 : Currently, only one dictionary can be managed. - Referencing a new dictionary effectively "discards" any previous one. Special: referencing a NULL DDict means "return to no-dictionary mode". Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. @@ -831,7 +1030,7 @@

    Streaming decompression functions


    Note 2 : Prefix buffer is referenced. It **must** outlive decompression. Prefix buffer must remain unmodified up to the end of frame, reached when ZSTD_decompressStream() returns 0. - Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent). + Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent). Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section) Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. A full dictionary is more costly, as it requires building tables. @@ -857,6 +1056,43 @@

    Streaming decompression functions



    +
    typedef struct {
    +    unsigned int offset;      /* The offset of the match. (NOT the same as the offset code)
    +                               * If offset == 0 and matchLength == 0, this sequence represents the last
    +                               * literals in the block of litLength size.
    +                               */
    +
    +    unsigned int litLength;   /* Literal length of the sequence. */
    +    unsigned int matchLength; /* Match length of the sequence. */
    +
    +                              /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0.
    +                               * In this case, we will treat the sequence as a marker for a block boundary.
    +                               */
    +
    +    unsigned int rep;         /* Represents which repeat offset is represented by the field 'offset'.
    +                               * Ranges from [0, 3].
    +                               *
    +                               * Repeat offsets are essentially previous offsets from previous sequences sorted in
    +                               * recency order. For more detail, see doc/zstd_compression_format.md
    +                               *
    +                               * If rep == 0, then 'offset' does not contain a repeat offset.
    +                               * If rep > 0:
    +                               *  If litLength != 0:
    +                               *      rep == 1 --> offset == repeat_offset_1
    +                               *      rep == 2 --> offset == repeat_offset_2
    +                               *      rep == 3 --> offset == repeat_offset_3
    +                               *  If litLength == 0:
    +                               *      rep == 1 --> offset == repeat_offset_2
    +                               *      rep == 2 --> offset == repeat_offset_3
    +                               *      rep == 3 --> offset == repeat_offset_1 - 1
    +                               *
    +                               * Note: This field is optional. ZSTD_generateSequences() will calculate the value of
    +                               * 'rep', but repeat offsets do not necessarily need to be calculated from an external
    +                               * sequence provider perspective. For example, ZSTD_compressSequences() does not
    +                               * use this 'rep' field at all (as of now).
    +                               */
    +} ZSTD_Sequence;
    +

    typedef struct {
         unsigned windowLog;       /**< largest match distance : larger == more compression, more memory needed during decompression */
         unsigned chainLog;        /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
    @@ -886,32 +1122,35 @@ 

    Streaming decompression functions



    typedef enum {
         ZSTD_dlm_byCopy = 0,  /**< Copy dictionary content internally */
    -    ZSTD_dlm_byRef = 1,   /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
    +    ZSTD_dlm_byRef = 1    /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
     } ZSTD_dictLoadMethod_e;
     

    typedef enum {
    -    /* Opened question : should we have a format ZSTD_f_auto ?
    -     * Today, it would mean exactly the same as ZSTD_f_zstd1.
    -     * But, in the future, should several formats become supported,
    -     * on the compression side, it would mean "default format".
    -     * On the decompression side, it would mean "automatic format detection",
    -     * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
    -     * Since meaning is a little different, another option could be to define different enums for compression and decompression.
    -     * This question could be kept for later, when there are actually multiple formats to support,
    -     * but there is also the question of pinning enum values, and pinning value `0` is especially important */
         ZSTD_f_zstd1 = 0,           /* zstd frame format, specified in zstd_compression_format.md (default) */
    -    ZSTD_f_zstd1_magicless = 1, /* Variant of zstd frame format, without initial 4-bytes magic number.
    +    ZSTD_f_zstd1_magicless = 1  /* Variant of zstd frame format, without initial 4-bytes magic number.
                                      * Useful to save 4 bytes per generated frame.
                                      * Decoder cannot recognise automatically this format, requiring this instruction. */
     } ZSTD_format_e;
     

    +
    typedef enum {
    +    /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
    +    ZSTD_d_validateChecksum = 0,
    +    ZSTD_d_ignoreChecksum = 1
    +} ZSTD_forceIgnoreChecksum_e;
    +

    +
    typedef enum {
    +    /* Note: this enum controls ZSTD_d_refMultipleDDicts */
    +    ZSTD_rmd_refSingleDDict = 0,
    +    ZSTD_rmd_refMultipleDDicts = 1
    +} ZSTD_refMultipleDDicts_e;
    +

    typedef enum {
         /* Note: this enum and the behavior it controls are effectively internal
          * implementation details of the compressor. They are expected to continue
          * to evolve and should be considered only in the context of extremely
          * advanced performance tuning.
          *
    -     * Zstd currently supports the use of a CDict in two ways:
    +     * Zstd currently supports the use of a CDict in three ways:
          *
          * - The contents of the CDict can be copied into the working context. This
          *   means that the compression can search both the dictionary and input
    @@ -927,6 +1166,12 @@ 

    Streaming decompression functions


    * working context's tables can be reused). For small inputs, this can be * faster than copying the CDict's tables. * + * - The CDict's tables are not used at all, and instead we use the working + * context alone to reload the dictionary and use params based on the source + * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict(). + * This method is effective when the dictionary sizes are very small relative + * to the input size, and the input size is fairly large to begin with. + * * Zstd has a simple internal heuristic that selects which strategy to use * at the beginning of a compression. However, if experimentation shows that * Zstd is making poor choices, it is possible to override that choice with @@ -935,6 +1180,7 @@

    Streaming decompression functions


    ZSTD_dictDefaultAttach = 0,
    /* Use the default heuristic. */ ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */ ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */ + ZSTD_dictForceLoad = 3 /* Always reload the dictionary */ } ZSTD_dictAttachPref_e;

    typedef enum {
    @@ -943,12 +1189,22 @@ 

    Streaming decompression functions


    * levels will be compressed. */ ZSTD_lcm_huffman = 1,
    /**< Always attempt Huffman compression. Uncompressed literals will still be * emitted if Huffman compression is not profitable. */ - ZSTD_lcm_uncompressed = 2, /**< Always emit uncompressed literals. */ + ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */ } ZSTD_literalCompressionMode_e;

    -

    Frame size functions

    
    +
    typedef enum {
    +  /* Note: This enum controls features which are conditionally beneficial.
    +   * Zstd can take a decision on whether or not to enable the feature (ZSTD_ps_auto),
    +   * but setting the switch to ZSTD_ps_enable or ZSTD_ps_disable force enable/disable the feature.
    +   */
    +  ZSTD_ps_auto = 0,         /* Let the library automatically determine whether the feature shall be enabled */
    +  ZSTD_ps_enable = 1,       /* Force-enable the feature */
    +  ZSTD_ps_disable = 2       /* Do not use the feature */
    +} ZSTD_ParamSwitch_e;
    +

    +

    Frame header and size functions

    
     
    -
    unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
    +
    ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
     

    `src` should point to the start of a series of ZSTD encoded and/or skippable frames `srcSize` must be the _exact_ size of this series (i.e. there should be a frame boundary at `src + srcSize`) @@ -971,12 +1227,12 @@

    Streaming decompression functions


    however it does mean that all frame data must be present and valid.


    -
    unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
    +
    ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
     

    `src` should point to the start of a series of ZSTD encoded and/or skippable frames `srcSize` must be the _exact_ size of this series (i.e. there should be a frame boundary at `src + srcSize`) @return : - upper-bound for the decompressed size of all data in all successive frames - - if an error occured: ZSTD_CONTENTSIZE_ERROR + - if an error occurred: ZSTD_CONTENTSIZE_ERROR note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame. note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`. @@ -986,58 +1242,318 @@

    Streaming decompression functions



    -
    size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
    -

    srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. +

    ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
    +

    srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX. @return : size of the Frame Header, or an error code (if srcSize is too small)


    +
    typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_FrameType_e;
    +

    +
    typedef struct {
    +    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
    +    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
    +    unsigned blockSizeMax;
    +    ZSTD_FrameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
    +    unsigned headerSize;
    +    unsigned dictID;                     /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */
    +    unsigned checksumFlag;
    +    unsigned _reserved1;
    +    unsigned _reserved2;
    +} ZSTD_FrameHeader;
    +

    +
    ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize);
    +/*! ZSTD_getFrameHeader_advanced() :
    + *  same as ZSTD_getFrameHeader(),
    + *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
    +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
    +

    decode Frame Header into `zfhPtr`, or requires larger `srcSize`. + @return : 0 => header is complete, `zfhPtr` is correctly filled, + >0 => `srcSize` is too small, @return value is the wanted `srcSize` amount, `zfhPtr` is not filled, + or an error code, which can be tested using ZSTD_isError() +


    + +
    ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize);
    +

    Zstd supports in-place decompression, where the input and output buffers overlap. + In this case, the output buffer must be at least (Margin + Output_Size) bytes large, + and the input buffer must be at the end of the output buffer. + + _______________________ Output Buffer ________________________ + | | + | ____ Input Buffer ____| + | | | + v v v + |---------------------------------------|-----------|----------| + ^ ^ ^ + |___________________ Output_Size ___________________|_ Margin _| + + NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). + NOTE: This applies only to single-pass decompression through ZSTD_decompress() or + ZSTD_decompressDCtx(). + NOTE: This function supports multi-frame input. + + @param src The compressed frame(s) + @param srcSize The size of the compressed frame(s) + @returns The decompression margin or an error that can be checked with ZSTD_isError(). + +


    + +
    #define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)(                                              \
    +        ZSTD_FRAMEHEADERSIZE_MAX                                                              /* Frame header */ + \
    +        4                                                                                         /* checksum */ + \
    +        ((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \
    +        (blockSize)                                                                    /* One block of margin */   \
    +    ))
    +

    Similar to ZSTD_decompressionMargin(), but instead of computing the margin from + the compressed frame, compute it from the original size and the blockSizeLog. + See ZSTD_decompressionMargin() for details. + + WARNING: This macro does not support multi-frame input, the input must be a single + zstd frame. If you need that support use the function, or implement it yourself. + + @param originalSize The original uncompressed size of the data. + @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX). + Unless you explicitly set the windowLog smaller than + ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX. + +


    + +
    typedef enum {
    +  ZSTD_sf_noBlockDelimiters = 0,         /* ZSTD_Sequence[] has no block delimiters, just sequences */
    +  ZSTD_sf_explicitBlockDelimiters = 1    /* ZSTD_Sequence[] contains explicit block delimiters */
    +} ZSTD_SequenceFormat_e;
    +

    +
    ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
    +

    `srcSize` : size of the input buffer + @return : upper-bound for the number of sequences that can be generated + from a buffer of srcSize bytes + + note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). + +


    + +
    ZSTD_DEPRECATED("For debugging only, will be replaced by ZSTD_extractSequences()")
    +ZSTDLIB_STATIC_API size_t
    +ZSTD_generateSequences(ZSTD_CCtx* zc,
    +           ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
    +           const void* src, size_t srcSize);
    +

    WARNING: This function is meant for debugging and informational purposes ONLY! + Its implementation is flawed, and it will be deleted in a future version. + It is not guaranteed to succeed, as there are several cases where it will give + up and fail. You should NOT use this function in production code. + + This function is deprecated, and will be removed in a future version. + + Generate sequences using ZSTD_compress2(), given a source buffer. + + @param zc The compression context to be used for ZSTD_compress2(). Set any + compression parameters you need on this context. + @param outSeqs The output sequences buffer of size @p outSeqsSize + @param outSeqsCapacity The size of the output sequences buffer. + ZSTD_sequenceBound(srcSize) is an upper bound on the number + of sequences that can be generated. + @param src The source buffer to generate sequences from of size @p srcSize. + @param srcSize The size of the source buffer. + + Each block will end with a dummy sequence + with offset == 0, matchLength == 0, and litLength == length of last literals. + litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) + simply acts as a block delimiter. + + @returns The number of sequences generated, necessarily less than + ZSTD_sequenceBound(srcSize), or an error code that can be checked + with ZSTD_isError(). + +


    + +
    ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
    +

    Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals + by merging them into the literals of the next sequence. + + As such, the final generated result has no explicit representation of block boundaries, + and the final last literals segment is not represented in the sequences. + + The output of this function can be fed into ZSTD_compressSequences() with CCtx + setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters + @return : number of sequences left after merging + +


    + +
    ZSTDLIB_STATIC_API size_t
    +ZSTD_compressSequences(ZSTD_CCtx* cctx,
    +           void* dst, size_t dstCapacity,
    +     const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
    +     const void* src, size_t srcSize);
    +

    Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. + @src contains the entire input (not just the literals). + If @srcSize > sum(sequence.length), the remaining bytes are considered all literals + If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.). + The entire source is compressed into a single frame. + + The compression behavior changes based on cctx params. In particular: + If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain + no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on + the block size derived from the cctx, and sequences may be split. This is the default setting. + + If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain + valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. + + When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes + using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit + can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation. + By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10). + ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction. + + If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined + behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for + specifics regarding offset/matchlength requirements) and then bail out and return an error. + + In addition to the two adjustable experimental params, there are other important cctx params. + - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN. + - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression. + - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset + is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md + + Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused. + Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly, + and cannot emit an RLE block that disagrees with the repcode history. + @return : final compressed size, or a ZSTD error code. + +


    + +
    ZSTDLIB_STATIC_API size_t
    +ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx,
    +                      void* dst, size_t dstCapacity,
    +                const ZSTD_Sequence* inSeqs, size_t nbSequences,
    +                const void* literals, size_t litSize, size_t litBufCapacity,
    +                size_t decompressedSize);
    +

    This is a variant of ZSTD_compressSequences() which, + instead of receiving (src,srcSize) as input parameter, receives (literals,litSize), + aka all the literals, already extracted and laid out into a single continuous buffer. + This can be useful if the process generating the sequences also happens to generate the buffer of literals, + thus skipping an extraction + caching stage. + It's a speed optimization, useful when the right conditions are met, + but it also features the following limitations: + - Only supports explicit delimiter mode + - Currently does not support Sequences validation (so input Sequences are trusted) + - Not compatible with frame checksum, which must be disabled + - If any block is incompressible, will fail and return an error + - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error. + - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals. + @litBufCapacity must be at least 8 bytes larger than @litSize. + - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error. + @return : final compressed size, or a ZSTD error code. + +


    + +
    ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
    +                                 const void* src, size_t srcSize,
    +                                       unsigned magicVariant);
    +

    Generates a zstd skippable frame containing data given by src, and writes it to dst buffer. + + Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, + ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. + As such, the parameter magicVariant controls the exact skippable frame magic number variant used, + so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. + + Returns an error if destination buffer is not large enough, if the source size is not representable + with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid). + + @return : number of bytes written or a ZSTD error. + +


    + +
    ZSTDLIB_STATIC_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity,
    +                                      unsigned* magicVariant,
    +                                      const void* src, size_t srcSize);
    +

    Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer. + + The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written, + i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. + This can be NULL if the caller is not interested in the magicVariant. + + Returns an error if destination buffer is not large enough, or if the frame is not skippable. + + @return : number of bytes written or a ZSTD error. + +


    + +
    ZSTDLIB_STATIC_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
    +

    Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + +


    +

    Memory management

    
     
    -
    size_t ZSTD_estimateCCtxSize(int compressionLevel);
    -size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
    -size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
    -size_t ZSTD_estimateDCtxSize(void);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int maxCompressionLevel);
    +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
    +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
    +ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
     

    These functions make it possible to estimate memory usage of a future {D,C}Ctx, before its creation. - ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one. - It will also consider src size to be arbitrarily "large", which is worst case. - If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation. - ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. - ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. - Note : CCtx size estimation is only correct for single-threaded compression. -


    - -
    size_t ZSTD_estimateCStreamSize(int compressionLevel);
    -size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
    -size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
    -size_t ZSTD_estimateDStreamSize(size_t windowSize);
    -size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
    -

    ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. - It will also consider src size to be arbitrarily "large", which is worst case. + This is useful in combination with ZSTD_initStatic(), + which makes it possible to employ a static buffer for ZSTD_CCtx* state. + + ZSTD_estimateCCtxSize() will provide a memory budget large enough + to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2() + associated with any compression level up to max specified one. + The estimate will assume the input may be arbitrarily large, + which is the worst case. + + Note that the size estimation is specific for one-shot compression, + it is not valid for streaming (see ZSTD_estimateCStreamSize*()) + nor other potential ways of using a ZSTD_CCtx* state. + + When srcSize can be bound by a known and rather "small" value, + this knowledge can be used to provide a tighter budget estimation + because the ZSTD_CCtx* state will need less memory for small inputs. + This tighter estimation can be provided by employing more advanced functions + ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), + and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). + Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. + + Note : only single-threaded compression is supported. + ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + +


    + +
    ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int maxCompressionLevel);
    +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
    +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
    +ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t maxWindowSize);
    +ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
    +

    ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression + using any compression level up to the max specified one. + It will also consider src size to be arbitrarily "large", which is a worst case scenario. If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. Note : CStream size estimation is only correct for single-threaded compression. - ZSTD_DStream memory budget depends on window Size. + ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + Size estimates assume that no external sequence producer is registered. + + ZSTD_DStream memory budget depends on frame's window Size. This information can be passed manually, using ZSTD_estimateDStreamSize, or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); + Any frame requesting a window size larger than max specified one will be rejected. Note : if streaming is init with function ZSTD_init?Stream_usingDict(), an internal ?Dict will be created, which additional size is not estimated here. - In this case, get total size by adding ZSTD_estimate?DictSize + In this case, get total size by adding ZSTD_estimate?DictSize +


    -
    size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
    -size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
    -size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
    +ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
    +ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
     

    ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.


    -
    ZSTD_CCtx*    ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
    -ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticCCtx() */
    +
    ZSTDLIB_STATIC_API ZSTD_CCtx*    ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
    +ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticCCtx() */
     

    Initialize an object using a pre-allocated fixed-size buffer. workspace: The memory area to emplace the object into. Provided pointer *must be 8-bytes aligned*. @@ -1060,43 +1576,64 @@

    Streaming decompression functions



    -
    ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticDCtx() */
    +
    ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticDCtx() */
     

    typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
     typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
     typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
    -static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */
    +static
    +#ifdef __GNUC__
    +__attribute__((__unused__))
    +#endif
     

    These prototypes make it possible to pass your own allocation/free functions. ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below. All allocation/free operations will be completed using these custom variants instead of regular ones.


    +
    ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */
    +

    +
    typedef struct POOL_ctx_s ZSTD_threadPool;
    +ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
    +ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool);  /* accept NULL pointer */
    +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
    +

    These prototypes make it possible to share a thread pool among multiple compression contexts. + This can limit resources for applications with multiple threads where each one uses + a threaded compression mode (via ZSTD_c_nbWorkers parameter). + ZSTD_createThreadPool creates a new thread pool with a given number of threads. + Note that the lifetime of such pool must exist while being used. + ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value + to use an internal thread pool). + ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer. + +


    +

    Advanced compression functions

    
     
    -
    ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
    +
    ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
     

    Create a digested dictionary for compression Dictionary content is just referenced, not duplicated. As a consequence, `dictBuffer` **must** outlive CDict, - and its content must remain unmodified throughout the lifetime of CDict. + and its content must remain unmodified throughout the lifetime of CDict. + note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef


    -
    ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
    +
    ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
     

    @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. `estimatedSrcSize` value is optional, select 0 if not known


    -
    ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
    +
    ZSTDLIB_STATIC_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
     

    same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0


    -
    size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
     

    Ensure param values remain within authorized range. @return 0 on success, or an error code (can be checked with ZSTD_isError())


    -
    ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
    +
    ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
     

    optimize params for a given `srcSize` and `dictSize`. `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN. `dictSize` must be `0` when there is no dictionary. @@ -1104,47 +1641,76 @@

    Streaming decompression functions


    This function never fails (wide contract)


    -
    size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
    -                              void* dst, size_t dstCapacity,
    -                        const void* src, size_t srcSize,
    -                        const void* dict,size_t dictSize,
    -                              ZSTD_parameters params);
    -

    Same as ZSTD_compress_usingDict(), with fine-tune control over compression parameters (by structure) +

    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams);
    +

    Set all parameters provided within @p cparams into the working @p cctx. + Note : if modifying parameters during compression (MT mode only), + note that changes to the .windowLog parameter will be ignored. + @return 0 on success, or an error code (can be checked with ZSTD_isError()). + On failure, no parameters are updated. +


    -
    size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams);
    +

    Set all parameters provided within @p fparams into the working @p cctx. + @return 0 on success, or an error code (can be checked with ZSTD_isError()). + +


    + +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params);
    +

    Set all parameters provided within @p params into the working @p cctx. + @return 0 on success, or an error code (can be checked with ZSTD_isError()). + +


    + +
    ZSTD_DEPRECATED("use ZSTD_compress2")
    +ZSTDLIB_STATIC_API
    +size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
    +                  void* dst, size_t dstCapacity,
    +            const void* src, size_t srcSize,
    +            const void* dict,size_t dictSize,
    +                  ZSTD_parameters params);
    +

    Note : this function is now DEPRECATED. + It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. + This prototype will generate compilation warnings. +


    + +
    ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
    +ZSTDLIB_STATIC_API
    +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
                                       void* dst, size_t dstCapacity,
                                 const void* src, size_t srcSize,
                                 const ZSTD_CDict* cdict,
                                       ZSTD_frameParameters fParams);
    -

    Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters +

    Note : this function is now DEPRECATED. + It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters. + This prototype will generate compilation warnings.


    -
    size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
     

    Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. It saves some memory, but also requires that `dict` outlives its usage within `cctx`


    -
    size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
     

    Same as ZSTD_CCtx_loadDictionary(), but gives finer control over how to load the dictionary (by copy ? by reference ?) and how to interpret it (automatic ? force raw mode ? full mode only ?)


    -
    size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
     

    Same as ZSTD_CCtx_refPrefix(), but gives finer control over how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)


    -
    size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
     

    Get the requested compression parameter value, selected by enum ZSTD_cParameter, and store it into int* value. @return : 0, or an error code (which can be tested with ZSTD_isError()).


    -
    ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
    -size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
    +
    ZSTDLIB_STATIC_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
    +ZSTDLIB_STATIC_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);  /* accept NULL pointer */
     

    Quick howto : - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure - ZSTD_CCtxParams_setParameter() : Push parameters one by one into @@ -1156,46 +1722,48 @@

    Streaming decompression functions


    These parameters will be applied to all subsequent frames. - ZSTD_compressStream2() : Do compression using the CCtx. - - ZSTD_freeCCtxParams() : Free the memory. + - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer. This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() for static allocation of CCtx for single-threaded compression.


    -
    size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
     

    Reset params to default values.


    -
    size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
     

    Initializes the compression parameters of cctxParams according to compression level. All other parameters are reset to their default values.


    -
    size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
     

    Initializes the compression and frame parameters of cctxParams according to params. All other parameters are reset to their default values.


    -
    size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
     

    Similar to ZSTD_CCtx_setParameter. Set one compression parameter, selected by enum ZSTD_cParameter. - Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams(). - @result : 0, or an error code (which can be tested with ZSTD_isError()). + Parameters must be applied to a ZSTD_CCtx using + ZSTD_CCtx_setParametersUsingCCtxParams(). + @result : a code representing success or failure (which can be tested with + ZSTD_isError()).


    -
    size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
     

    Similar to ZSTD_CCtx_getParameter. Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. @result : 0, or an error code (which can be tested with ZSTD_isError()).


    -
    size_t ZSTD_CCtx_setParametersUsingCCtxParams(
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
             ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
     

    Apply a set of ZSTD_CCtx_params to the compression context. This can be done even after compression is started, @@ -1205,7 +1773,7 @@

    Streaming decompression functions



    -
    size_t ZSTD_compressStream2_simpleArgs (
    +
    ZSTDLIB_STATIC_API size_t ZSTD_compressStream2_simpleArgs (
                     ZSTD_CCtx* cctx,
                     void* dst, size_t dstCapacity, size_t* dstPos,
               const void* src, size_t srcSize, size_t* srcPos,
    @@ -1219,40 +1787,40 @@ 

    Streaming decompression functions


    Advanced decompression functions

    
     
    -
    unsigned ZSTD_isFrame(const void* buffer, size_t size);
    +
    ZSTDLIB_STATIC_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
     

    Tells if the content of `buffer` starts with a valid Frame Identifier. Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. Note 3 : Skippable Frame Identifiers are considered valid.


    -
    ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
    +
    ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
     

    Create a digested dictionary, ready to start decompression operation without startup delay. Dictionary content is referenced, and therefore stays in dictBuffer. It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict


    -
    size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
     

    Same as ZSTD_DCtx_loadDictionary(), but references `dict` content instead of copying it into `dctx`. This saves memory if `dict` remains around., However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression.


    -
    size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
     

    Same as ZSTD_DCtx_loadDictionary(), but gives direct control over how to load the dictionary (by copy ? by reference ?) and how to interpret it (automatic ? force raw mode ? full mode only ?).


    -
    size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
     

    Same as ZSTD_DCtx_refPrefix(), but gives finer control over how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)


    -
    size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
     

    Refuses allocating internal buffers for frames requiring a window size larger than provided limit. This protects a decoder context from reserving too much memory for itself (potential attack scenario). This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. @@ -1261,14 +1829,24 @@

    Streaming decompression functions



    -
    size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
    -

    Instruct the decoder context about what kind of data to decode next. +

    ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
    +

    Get the requested decompression parameter value, selected by enum ZSTD_dParameter, + and store it into int* value. + @return : 0, or an error code (which can be tested with ZSTD_isError()). + +


    + +
    ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
    +ZSTDLIB_STATIC_API
    +size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
    +

    This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). + Instruct the decoder context about what kind of data to decode next. This instruction is mandatory to decode data without a fully-formed header, such ZSTD_f_zstd1_magicless for example. @return : 0, or an error code (which can be tested using ZSTD_isError()).


    -
    size_t ZSTD_decompressStream_simpleArgs (
    +
    ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs (
                     ZSTD_DCtx* dctx,
                     void* dst, size_t dstCapacity, size_t* dstPos,
               const void* src, size_t srcSize, size_t* srcPos);
    @@ -1284,76 +1862,112 @@ 

    Streaming decompression functions


    redundant functions will be deprecated, and then at some point removed.
    -

    Advanced Streaming compression functions

    /**! ZSTD_initCStream_srcSize() :
    - * This function is deprecated, and equivalent to:
    - *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
    - *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
    - *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
    - *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
    - *
    - * pledgedSrcSize must be correct. If it is not known at init time, use
    - * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
    - * "0" also disables frame content size field. It may be enabled in the future.
    - */
    -size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);
    -/**! ZSTD_initCStream_usingDict() :
    - * This function is deprecated, and is equivalent to:
    - *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
    - *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
    - *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
    - *
    - * Creates of an internal CDict (incompatible with static CCtx), except if
    - * dict == NULL or dictSize < 8, in which case no dict is used.
    - * Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if
    - * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
    - */
    -size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
    -/**! ZSTD_initCStream_advanced() :
    - * This function is deprecated, and is approximately equivalent to:
    - *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
    - *     ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
    - *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
    - *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
    - *
    - * pledgedSrcSize must be correct. If srcSize is not known at init time, use
    - * value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy.
    - */
    -size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
    -                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);
    -/**! ZSTD_initCStream_usingCDict() :
    - * This function is deprecated, and equivalent to:
    - *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
    - *     ZSTD_CCtx_refCDict(zcs, cdict);
    - *
    - * note : cdict will just be referenced, and must outlive compression session
    - */
    +

    Advanced Streaming compression functions


    +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
    +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
    +             int compressionLevel,
    +             unsigned long long pledgedSrcSize);
    +

    This function is DEPRECATED, and equivalent to: + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) + ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + + pledgedSrcSize must be correct. If it is not known at init time, use + ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, + "0" also disables frame content size field. It may be enabled in the future. + This prototype will generate compilation warnings. + +


    + +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
    +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
    +         const void* dict, size_t dictSize,
    +               int compressionLevel);
    +

    This function is DEPRECATED, and is equivalent to: + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); + + Creates of an internal CDict (incompatible with static CCtx), except if + dict == NULL or dictSize < 8, in which case no dict is used. + Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if + it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. + This prototype will generate compilation warnings. + +


    + +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
    +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
    +        const void* dict, size_t dictSize,
    +              ZSTD_parameters params,
    +              unsigned long long pledgedSrcSize);
    +

    This function is DEPRECATED, and is equivalent to: + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + ZSTD_CCtx_setParams(zcs, params); + ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); + + dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy. + pledgedSrcSize must be correct. + If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. + This prototype will generate compilation warnings. + +


    + +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
    -/**! ZSTD_initCStream_usingCDict_advanced() :
    - * This function is deprecated, and is approximately equivalent to:
    - *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
    - *     ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
    - *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
    - *     ZSTD_CCtx_refCDict(zcs, cdict);
    - *
    - * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
    - * pledgedSrcSize must be correct. If srcSize is not known at init time, use
    - * value ZSTD_CONTENTSIZE_UNKNOWN.
    - */
    -size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);
    -

    -
    size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
    -

    This function is deprecated, and is equivalent to: +

    This function is DEPRECATED, and equivalent to: + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + ZSTD_CCtx_refCDict(zcs, cdict); + + note : cdict will just be referenced, and must outlive compression session + This prototype will generate compilation warnings. + +


    + +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
    +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
    +                   const ZSTD_CDict* cdict,
    +                         ZSTD_frameParameters fParams,
    +                         unsigned long long pledgedSrcSize);
    +

    This function is DEPRECATED, and is equivalent to: + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + ZSTD_CCtx_setFParams(zcs, fParams); + ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + ZSTD_CCtx_refCDict(zcs, cdict); + + same as ZSTD_initCStream_usingCDict(), with control over frame parameters. + pledgedSrcSize must be correct. If srcSize is not known at init time, use + value ZSTD_CONTENTSIZE_UNKNOWN. + This prototype will generate compilation warnings. + +


    + +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
    +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
    +

    This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but + ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be + explicitly specified. start a new frame, using same parameters from previous frame. - This is typically useful to skip dictionary loading stage, since it will re-use it in-place. + This is typically useful to skip dictionary loading stage, since it will reuse it in-place. Note that zcs must be init at least once before using ZSTD_resetCStream(). If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN. If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs, but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead. @return : 0, or an error code (which can be tested using ZSTD_isError()) + This prototype will generate compilation warnings.


    @@ -1366,7 +1980,7 @@

    Advanced Streaming compression functions

    /**! ZST
         unsigned nbActiveWorkers;      /* MT only : nb of workers actively compressing at probe time */
     } ZSTD_frameProgression;
     

    -
    size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
     

    Tell how many bytes are ready to be flushed immediately. Useful for multithreading scenarios (nbWorkers >= 1). Probe the oldest active job, defined as oldest job not yet entirely flushed, @@ -1381,49 +1995,98 @@

    Advanced Streaming compression functions

    /**! ZST
      
     


    -

    Advanced Streaming decompression functions

    /**
    - * This function is deprecated, and is equivalent to:
    - *
    - *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
    - *     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
    - *
    - * note: no dictionary will be used if dict == NULL or dictSize < 8
    - */
    -size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
    -/**
    - * This function is deprecated, and is equivalent to:
    - *
    - *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
    - *     ZSTD_DCtx_refDDict(zds, ddict);
    - *
    - * note : ddict is referenced, it must outlive decompression session
    - */
    -size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
    -/**
    - * This function is deprecated, and is equivalent to:
    - *
    - *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
    - *
    - * re-use decompression parameters from previous init; saves dictionary loading
    - */
    -size_t ZSTD_resetDStream(ZSTD_DStream* zds);
    -

    -

    Buffer-less and synchronous inner streaming functions

    -  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
    -  But it's also a complex one, with several restrictions, documented below.
    -  Prefer normal streaming API for an easier experience.
    +

    Advanced Streaming decompression functions


    +
    ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
    +

    + ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); + ZSTD_DCtx_loadDictionary(zds, dict, dictSize); + + note: no dictionary will be used if dict == NULL or dictSize < 8 + +


    + +
    ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
    +

    + ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); + ZSTD_DCtx_refDDict(zds, ddict); + + note : ddict is referenced, it must outlive decompression session + +


    + +
    ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
    +

    + ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); + + reuse decompression parameters from previous init; saves dictionary loading + +


    + +
    ZSTDLIB_STATIC_API void
    +ZSTD_registerSequenceProducer(
    +  ZSTD_CCtx* cctx,
    +  void* sequenceProducerState,
    +  ZSTD_sequenceProducer_F sequenceProducer
    +);
    +

    Instruct zstd to use a block-level external sequence producer function. + + The sequenceProducerState must be initialized by the caller, and the caller is + responsible for managing its lifetime. This parameter is sticky across + compressions. It will remain set until the user explicitly resets compression + parameters. + + Sequence producer registration is considered to be an "advanced parameter", + part of the "advanced API". This means it will only have an effect on compression + APIs which respect advanced parameters, such as compress2() and compressStream2(). + Older compression APIs such as compressCCtx(), which predate the introduction of + "advanced parameters", will ignore any external sequence producer setting. + + The sequence producer can be "cleared" by registering a NULL function pointer. This + removes all limitations described above in the "LIMITATIONS" section of the API docs. + + The user is strongly encouraged to read the full API documentation (above) before + calling this function. +


    + +
    ZSTDLIB_STATIC_API void
    +ZSTD_CCtxParams_registerSequenceProducer(
    +  ZSTD_CCtx_params* params,
    +  void* sequenceProducerState,
    +  ZSTD_sequenceProducer_F sequenceProducer
    +);
    +

    Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params. + This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(), + which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx(). + + If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx() + is required, then this function is for you. Otherwise, you probably don't need it. + + See tests/zstreamtest.c for example usage. +


    + +

    Buffer-less and synchronous inner streaming functions (DEPRECATED)

    +  This API is deprecated, and will be removed in a future version.
    +  It allows streaming (de)compression with user allocated buffers.
    +  However, it is hard to use, and not as well tested as the rest of
    +  our API.
    +
    +  Please use the normal streaming API instead: ZSTD_compressStream2,
    +  and ZSTD_decompressStream.
    +  If there is functionality that you need, but it doesn't provide,
    +  please open an issue on our GitHub.
      
     

    Buffer-less streaming compression (synchronous mode)

       A ZSTD_CCtx object is required to track streaming operations.
       Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
    -  ZSTD_CCtx object can be re-used multiple times within successive compression operations.
    +  ZSTD_CCtx object can be reused multiple times within successive compression operations.
     
       Start by initializing a context.
    -  Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
    -  or ZSTD_compressBegin_advanced(), for finer parameter control.
    -  It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
    +  Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
     
       Then, consume your input using ZSTD_compressContinue().
       There are some important considerations to keep in mind when using this advanced function :
    @@ -1441,30 +2104,34 @@ 

    Advanced Streaming decompression functions

    /**
       It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
       Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
     
    -  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
    +  `ZSTD_CCtx` object can be reused (ZSTD_compressBegin()) to compress again.
     
    -

    Buffer-less streaming compression functions

    size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
    -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
    -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
    -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
    -size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
    -size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
    +

    Buffer-less streaming compression functions

    ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
    +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
    +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
    +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
    +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
    +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
     

    +
    size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
    +

    +
    size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
    +

    Buffer-less streaming decompression (synchronous mode)

       A ZSTD_DCtx object is required to track streaming operations.
       Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
    -  A ZSTD_DCtx object can be re-used multiple times.
    +  A ZSTD_DCtx object can be reused multiple times.
     
       First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
       Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
       Data fragment must be large enough to ensure successful decoding.
      `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
    -  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
    -           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
    +  result  : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
    +           >0 : `srcSize` is too small, please provide at least result bytes on next attempt.
                errorCode, which can be tested using ZSTD_isError().
     
    -  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
    +  It fills a ZSTD_FrameHeader structure with important information to correctly decode the frame,
       such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
       Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
       As a consequence, check that values remain within valid application range.
    @@ -1480,7 +2147,7 @@ 

    Buffer-less streaming compression functions

    size_t ZS
     
       The most memory efficient way is to use a round buffer of sufficient size.
       Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
    -  which can @return an error code if required value is too large for current system (in 32-bits mode).
    +  which can return an error code if required value is too large for current system (in 32-bits mode).
       In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
       up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
       which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
    @@ -1500,7 +2167,7 @@ 

    Buffer-less streaming compression functions

    size_t ZS
       ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
       ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
     
    - @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
    +  result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
       It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
       It can also be an error code, which can be tested with ZSTD_isError().
     
    @@ -1522,35 +2189,26 @@ 

    Buffer-less streaming compression functions

    size_t ZS
       For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
     
    -

    Buffer-less streaming decompression functions

    typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
    -typedef struct {
    -    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
    -    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
    -    unsigned blockSizeMax;
    -    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
    -    unsigned headerSize;
    -    unsigned dictID;
    -    unsigned checksumFlag;
    -} ZSTD_frameHeader;
    -

    -
    size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
    -/*! ZSTD_getFrameHeader_advanced() :
    - *  same as ZSTD_getFrameHeader(),
    - *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
    -size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
    -size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
    -

    decode Frame Header, or requires larger `srcSize`. - @return : 0, `zfhPtr` is correctly filled, - >0, `srcSize` is too small, value is wanted `srcSize` amount, - or an error code, which can be tested using ZSTD_isError() -


    - +

    Buffer-less streaming decompression functions


    +
    ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
    +

    typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
     

    -

    Block level API

    
    +

    Block level API (DEPRECATED)

    
    +
    +

    You can get the frame header down to 2 bytes by setting: + - ZSTD_c_format = ZSTD_f_zstd1_magicless + - ZSTD_c_contentSizeFlag = 0 + - ZSTD_c_checksumFlag = 0 + - ZSTD_c_dictIDFlag = 0 + + This API is not as well tested as our normal API, so we recommend not using it. + We will be removing it in a future version. If the normal API doesn't provide + the functionality you need, please open a GitHub issue. -

    Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes). - User will have to take in charge required information to regenerate data, such as compressed and content sizes. + Block functions produce and decode raw zstd blocks, without frame metadata. + Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). + But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. A few rules to respect : - Compressing and decompressing require a context structure @@ -1558,24 +2216,29 @@

    Buffer-less streaming decompression functions

    typedef
         - It is necessary to init context before starting
           + compression : any ZSTD_compressBegin*() variant, including with dictionary
           + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
    -      + copyCCtx() and copyDCtx() can be used too
         - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
           + If input is larger than a block size, it's necessary to split input data into multiple blocks
    -      + For inputs larger than a single block, really consider using regular ZSTD_compress() instead.
    -        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
    -    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
    -      In which case, nothing is produced into `dst` !
    -      + User must test for such outcome and deal directly with uncompressed data
    -      + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
    +      + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
    +        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
    +    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
    +      ===> In which case, nothing is produced into `dst` !
    +      + User __must__ test for such outcome and deal directly with uncompressed data
    +      + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
    +        Doing so would mess up with statistics history, leading to potential data corruption.
    +      + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
           + In case of multiple successive blocks, should some of them be uncompressed,
             decoder must be informed of their existence in order to follow proper history.
             Use ZSTD_insertBlock() for such a case.
     


    -

    Raw zstd block functions

    size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
    -size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
    -size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
    -size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
    +

    Raw zstd block functions

    ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
    +ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
    +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
    +ZSTDLIB_STATIC_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
    +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
    +ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
    +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
    +ZSTDLIB_STATIC_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
     

    diff --git a/examples/Makefile b/examples/Makefile index 65ea8abad5d..31f52d35763 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -1,21 +1,22 @@ # ################################################################ -# Copyright (c) 2016-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the # LICENSE file in the root directory of this source tree) and the GPLv2 (found # in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. # ################################################################ -# This Makefile presumes libzstd is installed, using `sudo make install` +LIBDIR =../lib +CPPFLAGS += -I$(LIBDIR) +LIB = $(LIBDIR)/libzstd.a -CPPFLAGS += -I../lib -LIB = ../lib/libzstd.a - -.PHONY: default all clean test +.PHONY: default default: all +.PHONY: all all: simple_compression simple_decompression \ multiple_simple_compression\ dictionary_compression dictionary_decompression \ @@ -23,37 +24,39 @@ all: simple_compression simple_decompression \ multiple_streaming_compression streaming_memory_usage $(LIB) : - $(MAKE) -C ../lib libzstd.a + $(MAKE) -C $(LIBDIR) libzstd.a + +simple_compression.o: common.h +simple_compression : $(LIB) -simple_compression : simple_compression.c common.h $(LIB) - $(CC) $(CPPFLAGS) $(CFLAGS) $< $(LIB) $(LDFLAGS) -o $@ +simple_decompression.o: common.h +simple_decompression : $(LIB) -simple_decompression : simple_decompression.c common.h $(LIB) - $(CC) $(CPPFLAGS) $(CFLAGS) $< $(LIB) $(LDFLAGS) -o $@ +multiple_simple_compression.o: common.h +multiple_simple_compression : $(LIB) -multiple_simple_compression : multiple_simple_compression.c common.h $(LIB) - $(CC) $(CPPFLAGS) $(CFLAGS) $< $(LIB) $(LDFLAGS) -o $@ +dictionary_compression.o: common.h +dictionary_compression : $(LIB) -dictionary_compression : dictionary_compression.c common.h $(LIB) - $(CC) $(CPPFLAGS) $(CFLAGS) $< $(LIB) $(LDFLAGS) -o $@ +dictionary_decompression.o: common.h +dictionary_decompression : $(LIB) -dictionary_decompression : dictionary_decompression.c common.h $(LIB) - $(CC) $(CPPFLAGS) $(CFLAGS) $< $(LIB) $(LDFLAGS) -o $@ +streaming_compression.o: common.h +streaming_compression : $(LIB) -streaming_compression : streaming_compression.c common.h $(LIB) - $(CC) $(CPPFLAGS) $(CFLAGS) $< $(LIB) $(LDFLAGS) -o $@ +multiple_streaming_compression.o: common.h +multiple_streaming_compression : $(LIB) -multiple_streaming_compression : multiple_streaming_compression.c common.h $(LIB) - $(CC) $(CPPFLAGS) $(CFLAGS) $< $(LIB) $(LDFLAGS) -o $@ +streaming_decompression.o: common.h +streaming_decompression : $(LIB) -streaming_decompression : streaming_decompression.c common.h $(LIB) - $(CC) $(CPPFLAGS) $(CFLAGS) $< $(LIB) $(LDFLAGS) -o $@ +streaming_memory_usage.o: common.h +streaming_memory_usage : $(LIB) -streaming_memory_usage : streaming_memory_usage.c $(LIB) - $(CC) $(CPPFLAGS) $(CFLAGS) $< $(LIB) $(LDFLAGS) -o $@ +.PHONY:clean clean: - @rm -f core *.o tmp* result* *.zst \ + @$(RM) core *.o tmp* result* *.zst \ simple_compression simple_decompression \ multiple_simple_compression \ dictionary_compression dictionary_decompression \ @@ -61,6 +64,7 @@ clean: multiple_streaming_compression streaming_memory_usage @echo Cleaning completed +.PHONY:test test: all cp README.md tmp cp Makefile tmp2 diff --git a/examples/common.h b/examples/common.h index a714cbb72c3..4873e877a7a 100644 --- a/examples/common.h +++ b/examples/common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -21,6 +21,17 @@ #include // stat #include + +/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ +#if defined(__GNUC__) +# define UNUSED_ATTR __attribute__((unused)) +#else +# define UNUSED_ATTR +#endif + +#define HEADER_FUNCTION static UNUSED_ATTR + + /* * Define the returned error code from utility functions. */ @@ -57,7 +68,7 @@ typedef enum { * Check the zstd error code and die if an error occurred after printing a * message. */ -#define CHECK_ZSTD(fn, ...) \ +#define CHECK_ZSTD(fn) \ do { \ size_t const err = (fn); \ CHECK(!ZSTD_isError(err), "%s", ZSTD_getErrorName(err)); \ @@ -68,7 +79,7 @@ typedef enum { * * @return The size of a given file path. */ -static size_t fsize_orDie(const char *filename) +HEADER_FUNCTION size_t fsize_orDie(const char *filename) { struct stat st; if (stat(filename, &st) != 0) { @@ -96,7 +107,7 @@ static size_t fsize_orDie(const char *filename) * @return If successful this function will return a FILE pointer to an * opened file otherwise it sends an error to stderr and exits. */ -static FILE* fopen_orDie(const char *filename, const char *instruction) +HEADER_FUNCTION FILE* fopen_orDie(const char *filename, const char *instruction) { FILE* const inFile = fopen(filename, instruction); if (inFile) return inFile; @@ -108,7 +119,7 @@ static FILE* fopen_orDie(const char *filename, const char *instruction) /*! fclose_orDie() : * Close an opened file using given FILE pointer. */ -static void fclose_orDie(FILE* file) +HEADER_FUNCTION void fclose_orDie(FILE* file) { if (!fclose(file)) { return; }; /* error */ @@ -123,7 +134,7 @@ static void fclose_orDie(FILE* file) * * @return The number of bytes read. */ -static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +HEADER_FUNCTION size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) { size_t const readSize = fread(buffer, 1, sizeToRead, file); if (readSize == sizeToRead) return readSize; /* good */ @@ -143,7 +154,7 @@ static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) * * @return The number of bytes written. */ -static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +HEADER_FUNCTION size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) { size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ @@ -159,7 +170,7 @@ static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) * cated memory. If there is an error, this function will send that * error to stderr and exit. */ -static void* malloc_orDie(size_t size) +HEADER_FUNCTION void* malloc_orDie(size_t size) { void* const buff = malloc(size); if (buff) return buff; @@ -177,7 +188,7 @@ static void* malloc_orDie(size_t size) * @return If successful this function will load file into buffer and * return file size, otherwise it will printout an error to stderr and exit. */ -static size_t loadFile_orDie(const char* fileName, void* buffer, size_t bufferSize) +HEADER_FUNCTION size_t loadFile_orDie(const char* fileName, void* buffer, size_t bufferSize) { size_t const fileSize = fsize_orDie(fileName); CHECK(fileSize <= bufferSize, "File too large!"); @@ -201,7 +212,8 @@ static size_t loadFile_orDie(const char* fileName, void* buffer, size_t bufferSi * @return If successful this function will return buffer and bufferSize(=fileSize), * otherwise it will printout an error to stderr and exit. */ -static void* mallocAndLoadFile_orDie(const char* fileName, size_t* bufferSize) { +HEADER_FUNCTION void* mallocAndLoadFile_orDie(const char* fileName, size_t* bufferSize) +{ size_t const fileSize = fsize_orDie(fileName); *bufferSize = fileSize; void* const buffer = malloc_orDie(*bufferSize); @@ -217,7 +229,7 @@ static void* mallocAndLoadFile_orDie(const char* fileName, size_t* bufferSize) { * Note: This function will send an error to stderr and exit if it * cannot write to a given file. */ -static void saveFile_orDie(const char* fileName, const void* buff, size_t buffSize) +HEADER_FUNCTION void saveFile_orDie(const char* fileName, const void* buff, size_t buffSize) { FILE* const oFile = fopen_orDie(fileName, "wb"); size_t const wSize = fwrite(buff, 1, buffSize, oFile); diff --git a/examples/dictionary_compression.c b/examples/dictionary_compression.c index 9efdb785c11..83edc1cad9b 100644 --- a/examples/dictionary_compression.c +++ b/examples/dictionary_compression.c @@ -1,12 +1,22 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. - */ +**/ + +/* This example deals with Dictionary compression, + * its counterpart is `examples/dictionary_decompression.c` . + * These examples presume that a dictionary already exists. + * The main method to create a dictionary is `zstd --train`, + * look at the CLI documentation for details. + * Another possible method is to employ dictionary training API, + * published in `lib/zdict.h` . +**/ + #include // printf #include // free #include // memset, strcat @@ -14,7 +24,7 @@ #include "common.h" // Helper functions, CHECK(), and CHECK_ZSTD() /* createDict() : - `dictFileName` is supposed to have been created using `zstd --train` */ +** `dictFileName` is supposed already created using `zstd --train` */ static ZSTD_CDict* createCDict_orDie(const char* dictFileName, int cLevel) { size_t dictSize; diff --git a/examples/dictionary_decompression.c b/examples/dictionary_decompression.c index f683bbb4380..e6c999964a2 100644 --- a/examples/dictionary_decompression.c +++ b/examples/dictionary_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/multiple_simple_compression.c b/examples/multiple_simple_compression.c index a44ac8b442f..bf77ca13317 100644 --- a/examples/multiple_simple_compression.c +++ b/examples/multiple_simple_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/multiple_streaming_compression.c b/examples/multiple_streaming_compression.c index ad98b1bd1b0..b12ad03dce1 100644 --- a/examples/multiple_streaming_compression.c +++ b/examples/multiple_streaming_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/simple_compression.c b/examples/simple_compression.c index 019a143d4c8..7c880725fc7 100644 --- a/examples/simple_compression.c +++ b/examples/simple_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/simple_decompression.c b/examples/simple_decompression.c index 1aa57c7b093..f499156f64e 100644 --- a/examples/simple_decompression.c +++ b/examples/simple_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/streaming_compression.c b/examples/streaming_compression.c index d1353a684a6..063aa82a294 100644 --- a/examples/streaming_compression.c +++ b/examples/streaming_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,9 +15,12 @@ #include // presumes zstd library is installed #include "common.h" // Helper functions, CHECK(), and CHECK_ZSTD() - -static void compressFile_orDie(const char* fname, const char* outName, int cLevel) +static void compressFile_orDie(const char* fname, const char* outName, int cLevel, + int nbThreads) { + fprintf (stderr, "Starting compression of %s with level %d, using %d threads\n", + fname, cLevel, nbThreads); + /* Open the input and output files. */ FILE* const fin = fopen_orDie(fname, "rb"); FILE* const fout = fopen_orDie(outName, "wb"); @@ -39,13 +42,20 @@ static void compressFile_orDie(const char* fname, const char* outName, int cLeve */ CHECK_ZSTD( ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, cLevel) ); CHECK_ZSTD( ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1) ); + if (nbThreads > 1) { + size_t const r = ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, nbThreads); + if (ZSTD_isError(r)) { + fprintf (stderr, "Note: the linked libzstd library doesn't support multithreading. " + "Reverting to single-thread mode. \n"); + } + } /* This loop read from the input file, compresses that entire chunk, * and writes all output produced to the output file. */ size_t const toRead = buffInSize; - size_t read; - while ((read = fread_orDie(buffIn, toRead, fin))) { + for (;;) { + size_t read = fread_orDie(buffIn, toRead, fin); /* Select the flush mode. * If the read may not be finished (read == toRead) we use * ZSTD_e_continue. If this is the last chunk, we use ZSTD_e_end. @@ -76,6 +86,10 @@ static void compressFile_orDie(const char* fname, const char* outName, int cLeve } while (!finished); CHECK(input.pos == input.size, "Impossible: zstd only returns 0 when the input is completely consumed!"); + + if (lastChunk) { + break; + } } ZSTD_freeCCtx(cctx); @@ -101,19 +115,32 @@ int main(int argc, const char** argv) { const char* const exeName = argv[0]; - if (argc!=2) { + if (argc < 2) { printf("wrong arguments\n"); printf("usage:\n"); - printf("%s FILE\n", exeName); + printf("%s FILE [LEVEL] [THREADS]\n", exeName); return 1; } + int cLevel = 1; + int nbThreads = 1; + + if (argc >= 3) { + cLevel = atoi (argv[2]); + CHECK(cLevel != 0, "can't parse LEVEL!"); + } + + if (argc >= 4) { + nbThreads = atoi (argv[3]); + CHECK(nbThreads != 0, "can't parse THREADS!"); + } + const char* const inFilename = argv[1]; char* const outFilename = createOutFilename_orDie(inFilename); - compressFile_orDie(inFilename, outFilename, 1); + compressFile_orDie(inFilename, outFilename, cLevel, nbThreads); free(outFilename); /* not strictly required, since program execution stops there, - * but some static analyzer main complain otherwise */ + * but some static analyzer may complain otherwise */ return 0; } diff --git a/examples/streaming_compression_thread_pool.c b/examples/streaming_compression_thread_pool.c new file mode 100644 index 00000000000..a1a024129f2 --- /dev/null +++ b/examples/streaming_compression_thread_pool.c @@ -0,0 +1,180 @@ +/* + * Copyright (c) Martin Liska, SUSE, Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +#include // printf +#include // free +#include // memset, strcat, strlen +#include // presumes zstd library is installed +#include "common.h" // Helper functions, CHECK(), and CHECK_ZSTD() +#include + +typedef struct compress_args +{ + const char *fname; + char *outName; + int cLevel; +#if defined(ZSTD_STATIC_LINKING_ONLY) + ZSTD_threadPool *pool; +#endif +} compress_args_t; + +static void *compressFile_orDie(void *data) +{ + const int nbThreads = 16; + + compress_args_t *args = (compress_args_t *)data; + fprintf (stderr, "Starting compression of %s with level %d, using %d threads\n", args->fname, args->cLevel, nbThreads); + /* Open the input and output files. */ + FILE* const fin = fopen_orDie(args->fname, "rb"); + FILE* const fout = fopen_orDie(args->outName, "wb"); + /* Create the input and output buffers. + * They may be any size, but we recommend using these functions to size them. + * Performance will only suffer significantly for very tiny buffers. + */ + size_t const buffInSize = ZSTD_CStreamInSize(); + void* const buffIn = malloc_orDie(buffInSize); + size_t const buffOutSize = ZSTD_CStreamOutSize(); + void* const buffOut = malloc_orDie(buffOutSize); + + /* Create the context. */ + ZSTD_CCtx* const cctx = ZSTD_createCCtx(); + CHECK(cctx != NULL, "ZSTD_createCCtx() failed!"); + +#if defined(ZSTD_STATIC_LINKING_ONLY) + size_t r = ZSTD_CCtx_refThreadPool(cctx, args->pool); + CHECK(r == 0, "ZSTD_CCtx_refThreadPool failed!"); +#endif + + /* Set any parameters you want. + * Here we set the compression level, and enable the checksum. + */ + CHECK_ZSTD( ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, args->cLevel) ); + CHECK_ZSTD( ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1) ); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, nbThreads); + + /* This loop reads from the input file, compresses that entire chunk, + * and writes all output produced to the output file. + */ + size_t const toRead = buffInSize; + for (;;) { + size_t read = fread_orDie(buffIn, toRead, fin); + /* Select the flush mode. + * If the read may not be finished (read == toRead) we use + * ZSTD_e_continue. If this is the last chunk, we use ZSTD_e_end. + * Zstd optimizes the case where the first flush mode is ZSTD_e_end, + * since it knows it is compressing the entire source in one pass. + */ + int const lastChunk = (read < toRead); + ZSTD_EndDirective const mode = lastChunk ? ZSTD_e_end : ZSTD_e_continue; + /* Set the input buffer to what we just read. + * We compress until the input buffer is empty, each time flushing the + * output. + */ + ZSTD_inBuffer input = { buffIn, read, 0 }; + int finished; + do { + /* Compress into the output buffer and write all of the output to + * the file so we can reuse the buffer next iteration. + */ + ZSTD_outBuffer output = { buffOut, buffOutSize, 0 }; + size_t const remaining = ZSTD_compressStream2(cctx, &output , &input, mode); + CHECK_ZSTD(remaining); + fwrite_orDie(buffOut, output.pos, fout); + /* If we're on the last chunk we're finished when zstd returns 0, + * which means its consumed all the input AND finished the frame. + * Otherwise, we're finished when we've consumed all the input. + */ + finished = lastChunk ? (remaining == 0) : (input.pos == input.size); + } while (!finished); + CHECK(input.pos == input.size, + "Impossible: zstd only returns 0 when the input is completely consumed!"); + + if (lastChunk) { + break; + } + } + + fprintf (stderr, "Finishing compression of %s\n", args->outName); + + ZSTD_freeCCtx(cctx); + fclose_orDie(fout); + fclose_orDie(fin); + free(buffIn); + free(buffOut); + free(args->outName); + + return NULL; +} + + +static char* createOutFilename_orDie(const char* filename) +{ + size_t const inL = strlen(filename); + size_t const outL = inL + 5; + void* const outSpace = malloc_orDie(outL); + memset(outSpace, 0, outL); + strcat(outSpace, filename); + strcat(outSpace, ".zst"); + return (char*)outSpace; +} + +int main(int argc, const char** argv) +{ + const char* const exeName = argv[0]; + + if (argc<=3) { + printf("wrong arguments\n"); + printf("usage:\n"); + printf("%s POOL_SIZE LEVEL FILES\n", exeName); + return 1; + } + + int pool_size = atoi (argv[1]); + CHECK(pool_size != 0, "can't parse POOL_SIZE!"); + + int level = atoi (argv[2]); + CHECK(level != 0, "can't parse LEVEL!"); + + argc -= 3; + argv += 3; + +#if defined(ZSTD_STATIC_LINKING_ONLY) + ZSTD_threadPool *pool = ZSTD_createThreadPool (pool_size); + CHECK(pool != NULL, "ZSTD_createThreadPool() failed!"); + fprintf (stderr, "Using shared thread pool of size %d\n", pool_size); +#else + fprintf (stderr, "All threads use its own thread pool\n"); +#endif + + pthread_t *threads = malloc_orDie(argc * sizeof(pthread_t)); + compress_args_t *args = malloc_orDie(argc * sizeof(compress_args_t)); + + for (unsigned i = 0; i < argc; i++) + { + args[i].fname = argv[i]; + args[i].outName = createOutFilename_orDie(args[i].fname); + args[i].cLevel = level; +#if defined(ZSTD_STATIC_LINKING_ONLY) + args[i].pool = pool; +#endif + + pthread_create (&threads[i], NULL, compressFile_orDie, &args[i]); + } + + for (unsigned i = 0; i < argc; i++) + pthread_join (threads[i], NULL); + +#if defined(ZSTD_STATIC_LINKING_ONLY) + ZSTD_freeThreadPool (pool); +#endif + + return 0; +} diff --git a/examples/streaming_decompression.c b/examples/streaming_decompression.c index bcd861b756c..95fa1122773 100644 --- a/examples/streaming_decompression.c +++ b/examples/streaming_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -34,7 +34,10 @@ static void decompressFile_orDie(const char* fname) */ size_t const toRead = buffInSize; size_t read; + size_t lastRet = 0; + int isEmpty = 1; while ( (read = fread_orDie(buffIn, toRead, fin)) ) { + isEmpty = 0; ZSTD_inBuffer input = { buffIn, read, 0 }; /* Given a valid frame, zstd won't consume the last byte of the frame * until it has flushed all of the decompressed data of the frame. @@ -53,9 +56,24 @@ static void decompressFile_orDie(const char* fname) size_t const ret = ZSTD_decompressStream(dctx, &output , &input); CHECK_ZSTD(ret); fwrite_orDie(buffOut, output.pos, fout); + lastRet = ret; } } + if (isEmpty) { + fprintf(stderr, "input is empty\n"); + exit(1); + } + + if (lastRet != 0) { + /* The last return value from ZSTD_decompressStream did not end on a + * frame, but we reached the end of the file! We assume this is an + * error, and the input was truncated. + */ + fprintf(stderr, "EOF before end of stream: %zu\n", lastRet); + exit(1); + } + ZSTD_freeDCtx(dctx); fclose_orDie(fin); fclose_orDie(fout); diff --git a/examples/streaming_memory_usage.c b/examples/streaming_memory_usage.c index 26835788abe..957acb61a39 100644 --- a/examples/streaming_memory_usage.c +++ b/examples/streaming_memory_usage.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/BUCK b/lib/BUCK index 637c20d667c..60c6bbb54d9 100644 --- a/lib/BUCK +++ b/lib/BUCK @@ -65,9 +65,7 @@ cxx_library( name='zdict', header_namespace='', visibility=['PUBLIC'], - exported_headers=subdir_glob([ - ('dictBuilder', 'zdict.h'), - ]), + exported_headers=['zdict.h'], headers=subdir_glob([ ('dictBuilder', 'divsufsort.h'), ('dictBuilder', 'cover.h'), @@ -131,10 +129,10 @@ cxx_library( name='errors', header_namespace='', visibility=['PUBLIC'], - exported_headers=subdir_glob([ - ('common', 'error_private.h'), - ('common', 'zstd_errors.h'), - ]), + exported_headers=[ + 'zstd_errors.h', + 'common/error_private.h', + ] srcs=['common/error_private.c'], ) diff --git a/lib/Makefile b/lib/Makefile index 404f5b69210..569bd6090c9 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -1,211 +1,278 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the # LICENSE file in the root directory of this source tree) and the GPLv2 (found # in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. # ################################################################ -# Version numbers -LIBVER_MAJOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ./zstd.h` -LIBVER_MINOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ./zstd.h` -LIBVER_PATCH_SCRIPT:=`sed -n '/define ZSTD_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ./zstd.h` -LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT) -LIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT)) -LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT)) -LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT)) -LIBVER := $(shell echo $(LIBVER_SCRIPT)) -VERSION?= $(LIBVER) - -CPPFLAGS+= -I. -I./common -DXXH_NAMESPACE=ZSTD_ -ifeq ($(OS),Windows_NT) # MinGW assumed -CPPFLAGS += -D__USE_MINGW_ANSI_STDIO # compatibility with %zu formatting -endif -CFLAGS ?= -O3 -DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ - -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ - -Wstrict-prototypes -Wundef -Wpointer-arith \ - -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ - -Wredundant-decls -Wmissing-prototypes -Wc++-compat -CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) -FLAGS = $(CPPFLAGS) $(CFLAGS) - -HAVE_COLORNEVER = $(shell echo a | grep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) -GREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) -GREP_OPTIONS += --color=never -endif -GREP = grep $(GREP_OPTIONS) +# default target (when running `make` with no argument) +lib-release: -ZSTDCOMMON_FILES := $(sort $(wildcard common/*.c)) -ZSTDCOMP_FILES := $(sort $(wildcard compress/*.c)) -ZSTDDECOMP_FILES := $(sort $(wildcard decompress/*.c)) -ZDICT_FILES := $(sort $(wildcard dictBuilder/*.c)) -ZDEPR_FILES := $(sort $(wildcard deprecated/*.c)) -ZSTD_FILES := $(ZSTDCOMMON_FILES) - -ZSTD_LEGACY_SUPPORT ?= 5 +# Modules ZSTD_LIB_COMPRESSION ?= 1 ZSTD_LIB_DECOMPRESSION ?= 1 ZSTD_LIB_DICTBUILDER ?= 1 -ZSTD_LIB_DEPRECATED ?= 1 -HUF_FORCE_DECOMPRESS_X1 ?= 0 -HUF_FORCE_DECOMPRESS_X2 ?= 0 -ZSTD_FORCE_DECOMPRESS_SHORT ?= 0 -ZSTD_FORCE_DECOMPRESS_LONG ?= 0 -ZSTD_NO_INLINE ?= 0 -ZSTD_STRIP_ERROR_STRINGS ?= 0 -ZSTD_LEGACY_MULTITHREADED_API ?= 0 +ZSTD_LIB_DEPRECATED ?= 0 +# Input variables for libzstd.mk ifeq ($(ZSTD_LIB_COMPRESSION), 0) - ZSTD_LIB_DICTBUILDER = 0 - ZSTD_LIB_DEPRECATED = 0 + ZSTD_LIB_DICTBUILDER = 0 + ZSTD_LIB_DEPRECATED = 0 endif ifeq ($(ZSTD_LIB_DECOMPRESSION), 0) - ZSTD_LEGACY_SUPPORT = 0 - ZSTD_LIB_DEPRECATED = 0 + ZSTD_LEGACY_SUPPORT = 0 + ZSTD_LIB_DEPRECATED = 0 endif +include libzstd.mk + +ZSTD_FILES := $(ZSTD_COMMON_FILES) $(ZSTD_LEGACY_FILES) + ifneq ($(ZSTD_LIB_COMPRESSION), 0) - ZSTD_FILES += $(ZSTDCOMP_FILES) + ZSTD_FILES += $(ZSTD_COMPRESS_FILES) endif ifneq ($(ZSTD_LIB_DECOMPRESSION), 0) - ZSTD_FILES += $(ZSTDDECOMP_FILES) + ZSTD_FILES += $(ZSTD_DECOMPRESS_FILES) endif ifneq ($(ZSTD_LIB_DEPRECATED), 0) - ZSTD_FILES += $(ZDEPR_FILES) + ZSTD_FILES += $(ZSTD_DEPRECATED_FILES) endif ifneq ($(ZSTD_LIB_DICTBUILDER), 0) - ZSTD_FILES += $(ZDICT_FILES) -endif - -ifneq ($(HUF_FORCE_DECOMPRESS_X1), 0) - CFLAGS += -DHUF_FORCE_DECOMPRESS_X1 + ZSTD_FILES += $(ZSTD_DICTBUILDER_FILES) endif -ifneq ($(HUF_FORCE_DECOMPRESS_X2), 0) - CFLAGS += -DHUF_FORCE_DECOMPRESS_X2 -endif +ZSTD_LOCAL_SRC := $(notdir $(ZSTD_FILES)) +ZSTD_LOCAL_OBJ0 := $(ZSTD_LOCAL_SRC:.c=.o) +ZSTD_LOCAL_OBJ := $(ZSTD_LOCAL_OBJ0:.S=.o) -ifneq ($(ZSTD_FORCE_DECOMPRESS_SHORT), 0) - CFLAGS += -DZSTD_FORCE_DECOMPRESS_SHORT -endif +VERSION := $(ZSTD_VERSION) -ifneq ($(ZSTD_FORCE_DECOMPRESS_LONG), 0) - CFLAGS += -DZSTD_FORCE_DECOMPRESS_LONG -endif +# Note: by default, the static library is built single-threaded and dynamic library is built +# multi-threaded. It is possible to force multi or single threaded builds by appending +# -mt or -nomt to the build target (like lib-mt for multi-threaded, lib-nomt for single-threaded). -ifneq ($(ZSTD_NO_INLINE), 0) - CFLAGS += -DZSTD_NO_INLINE -endif -ifneq ($(ZSTD_STRIP_ERROR_STRINGS), 0) - CFLAGS += -DZSTD_STRIP_ERROR_STRINGS -endif +CPPFLAGS_DYNLIB += -DZSTD_MULTITHREAD # dynamic library build defaults to multi-threaded +LDFLAGS_DYNLIB += -pthread +CPPFLAGS_STATICLIB += # static library build defaults to single-threaded -ifneq ($(ZSTD_LEGACY_MULTITHREADED_API), 0) - CFLAGS += -DZSTD_LEGACY_MULTITHREADED_API -endif +# pkg-config Libs.private points to LDFLAGS_DYNLIB +PCLIB := $(LDFLAGS_DYNLIB) -ifneq ($(ZSTD_LEGACY_SUPPORT), 0) -ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0) - ZSTD_FILES += $(shell ls legacy/*.c | $(GREP) 'v0[$(ZSTD_LEGACY_SUPPORT)-7]') -endif - CPPFLAGS += -I./legacy +ifeq ($(findstring GCC,$(CCVER)),GCC) +decompress/zstd_decompress_block.o : CFLAGS+=-fno-tree-vectorize endif -CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) -ZSTD_OBJ := $(patsubst %.c,%.o,$(ZSTD_FILES)) # macOS linker doesn't support -soname, and use different extension # see : https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryDesignGuidelines.html -ifeq ($(shell uname), Darwin) - SHARED_EXT = dylib - SHARED_EXT_MAJOR = $(LIBVER_MAJOR).$(SHARED_EXT) - SHARED_EXT_VER = $(LIBVER).$(SHARED_EXT) - SONAME_FLAGS = -install_name $(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER) +UNAME_TARGET_SYSTEM ?= $(UNAME) + +ifeq ($(UNAME_TARGET_SYSTEM), Darwin) + SHARED_EXT = dylib + SHARED_EXT_MAJOR = $(LIBVER_MAJOR).$(SHARED_EXT) + SHARED_EXT_VER = $(LIBVER).$(SHARED_EXT) + SONAME_FLAGS = -install_name $(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER) else - SONAME_FLAGS = -Wl,-soname=libzstd.$(SHARED_EXT).$(LIBVER_MAJOR) - SHARED_EXT = so - SHARED_EXT_MAJOR = $(SHARED_EXT).$(LIBVER_MAJOR) - SHARED_EXT_VER = $(SHARED_EXT).$(LIBVER) + ifeq ($(UNAME_TARGET_SYSTEM), AIX) + SONAME_FLAGS = + else + SONAME_FLAGS = -Wl,-soname=libzstd.$(SHARED_EXT).$(LIBVER_MAJOR) + endif + SHARED_EXT = so + SHARED_EXT_MAJOR = $(SHARED_EXT).$(LIBVER_MAJOR) + SHARED_EXT_VER = $(SHARED_EXT).$(LIBVER) endif -.PHONY: default all clean install uninstall +.PHONY: all +all: lib + -default: lib-release +.PHONY: libzstd.a # must be run every time +libzstd.a: CPPFLAGS += $(CPPFLAGS_STATICLIB) -all: lib +SET_CACHE_DIRECTORY = \ + +$(MAKE) --no-print-directory $@ \ + BUILD_DIR=obj/$(HASH_DIR) \ + CPPFLAGS="$(CPPFLAGS)" \ + CFLAGS="$(CFLAGS)" \ + LDFLAGS="$(LDFLAGS)" -libzstd.a: ARFLAGS = rcs -libzstd.a: $(ZSTD_OBJ) - @echo compiling static library - @$(AR) $(ARFLAGS) $@ $^ +ifndef BUILD_DIR +# determine BUILD_DIR from compilation flags -libzstd.a-mt: CPPFLAGS += -DZSTD_MULTITHREAD -libzstd.a-mt: libzstd.a +libzstd.a: + $(SET_CACHE_DIRECTORY) -ifneq (,$(filter Windows%,$(OS))) +else +# BUILD_DIR is defined + +ZSTD_STATICLIB_DIR := $(BUILD_DIR)/static +ZSTD_STATICLIB := $(ZSTD_STATICLIB_DIR)/libzstd.a +ZSTD_STATICLIB_OBJ := $(addprefix $(ZSTD_STATICLIB_DIR)/,$(ZSTD_LOCAL_OBJ)) +$(ZSTD_STATICLIB): ARFLAGS = rcs +$(ZSTD_STATICLIB): | $(ZSTD_STATICLIB_DIR) +$(ZSTD_STATICLIB): $(ZSTD_STATICLIB_OBJ) + # Check for multithread flag at target execution time + $(if $(filter -DZSTD_MULTITHREAD,$(CPPFLAGS)),\ + @echo compiling multi-threaded static library $(LIBVER),\ + @echo compiling single-threaded static library $(LIBVER)) + $(AR) $(ARFLAGS) $@ $^ + +libzstd.a: $(ZSTD_STATICLIB) + $(CP) $< $@ + +endif -LIBZSTD = dll\libzstd.dll +ifneq (,$(filter Windows%,$(TARGET_SYSTEM))) + +LIBZSTD = dll/libzstd.dll $(LIBZSTD): $(ZSTD_FILES) @echo compiling dynamic library $(LIBVER) - $(CC) $(FLAGS) -DZSTD_DLL_EXPORT=1 -Wl,--out-implib,dll\libzstd.lib -shared $^ -o $@ + $(CC) $(FLAGS) -DZSTD_DLL_EXPORT=1 -Wl,--out-implib,dll/libzstd.dll.a -shared $^ -o $@ -else +else # not Windows LIBZSTD = libzstd.$(SHARED_EXT_VER) -$(LIBZSTD): LDFLAGS += -shared -fPIC -fvisibility=hidden -$(LIBZSTD): $(ZSTD_FILES) - @echo compiling dynamic library $(LIBVER) - @$(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@ +.PHONY: $(LIBZSTD) # must be run every time +$(LIBZSTD): CPPFLAGS += $(CPPFLAGS_DYNLIB) +$(LIBZSTD): CFLAGS += -fPIC -fvisibility=hidden +$(LIBZSTD): LDFLAGS += -shared $(LDFLAGS_DYNLIB) + +ifndef BUILD_DIR +# determine BUILD_DIR from compilation flags + +$(LIBZSTD): + $(SET_CACHE_DIRECTORY) + +else +# BUILD_DIR is defined + +ZSTD_DYNLIB_DIR := $(BUILD_DIR)/dynamic +ZSTD_DYNLIB := $(ZSTD_DYNLIB_DIR)/$(LIBZSTD) +ZSTD_DYNLIB_OBJ := $(addprefix $(ZSTD_DYNLIB_DIR)/,$(ZSTD_LOCAL_OBJ)) + +$(ZSTD_DYNLIB): | $(ZSTD_DYNLIB_DIR) +$(ZSTD_DYNLIB): $(ZSTD_DYNLIB_OBJ) +# Check for multithread flag at target execution time + $(if $(filter -DZSTD_MULTITHREAD,$(CPPFLAGS)),\ + @echo compiling multi-threaded dynamic library $(LIBVER),\ + @echo compiling single-threaded dynamic library $(LIBVER)) + $(CC) $(FLAGS) $^ $(SONAME_FLAGS) -o $@ @echo creating versioned links - @ln -sf $@ libzstd.$(SHARED_EXT_MAJOR) - @ln -sf $@ libzstd.$(SHARED_EXT) + $(LN) -sf $@ libzstd.$(SHARED_EXT_MAJOR) + $(LN) -sf $@ libzstd.$(SHARED_EXT) -endif +$(LIBZSTD): $(ZSTD_DYNLIB) + $(CP) $< $@ +endif # ifndef BUILD_DIR +endif # if windows +.PHONY: libzstd libzstd : $(LIBZSTD) -libzstd-mt : CPPFLAGS += -DZSTD_MULTITHREAD -libzstd-mt : libzstd +.PHONY: lib +lib : libzstd.a libzstd + + +# note : do not define lib-mt or lib-release as .PHONY +# make does not consider implicit pattern rule for .PHONY target + +%-mt : CPPFLAGS_DYNLIB := -DZSTD_MULTITHREAD +%-mt : CPPFLAGS_STATICLIB := -DZSTD_MULTITHREAD +%-mt : LDFLAGS_DYNLIB := -pthread +%-mt : PCLIB := +%-mt : PCMTLIB := $(LDFLAGS_DYNLIB) +%-mt : % + @echo multi-threaded build completed + +%-nomt : CPPFLAGS_DYNLIB := +%-nomt : LDFLAGS_DYNLIB := +%-nomt : CPPFLAGS_STATICLIB := +%-nomt : PCLIB := +%-nomt : % + @echo single-threaded build completed + +%-release : DEBUGFLAGS := +%-release : % + @echo release build completed + + +# Generate .h dependencies automatically + +# -MMD: compiler generates dependency information as a side-effect of compilation, without system headers +# -MP: adds phony target for each dependency other than main file. +DEPFLAGS = -MMD -MP + +# ensure that ZSTD_DYNLIB_DIR exists prior to generating %.o +$(ZSTD_DYNLIB_DIR)/%.o : %.c | $(ZSTD_DYNLIB_DIR) + @echo CC $@ + $(COMPILE.c) $(DEPFLAGS) $(OUTPUT_OPTION) $< + +$(ZSTD_STATICLIB_DIR)/%.o : %.c | $(ZSTD_STATICLIB_DIR) + @echo CC $@ + $(COMPILE.c) $(DEPFLAGS) $(OUTPUT_OPTION) $< + +$(ZSTD_DYNLIB_DIR)/%.o : %.S | $(ZSTD_DYNLIB_DIR) + @echo AS $@ + $(COMPILE.S) $(OUTPUT_OPTION) $< -lib: libzstd.a libzstd +$(ZSTD_STATICLIB_DIR)/%.o : %.S | $(ZSTD_STATICLIB_DIR) + @echo AS $@ + $(COMPILE.S) $(OUTPUT_OPTION) $< -lib-mt: CPPFLAGS += -DZSTD_MULTITHREAD -lib-mt: lib +MKDIR ?= mkdir -p +$(BUILD_DIR) $(ZSTD_DYNLIB_DIR) $(ZSTD_STATICLIB_DIR): + $(MKDIR) $@ -lib-release lib-release-mt: DEBUGFLAGS := -lib-release: lib -lib-release-mt: lib-mt +DEPFILES := $(ZSTD_DYNLIB_OBJ:.o=.d) $(ZSTD_STATICLIB_OBJ:.o=.d) +$(DEPFILES): -# Special case : building library in single-thread mode _and_ without zstdmt_compress.c -ZSTDMT_FILES = compress/zstdmt_compress.c -ZSTD_NOMT_FILES = $(filter-out $(ZSTDMT_FILES),$(ZSTD_FILES)) -libzstd-nomt: LDFLAGS += -shared -fPIC -fvisibility=hidden +# The leading '-' means: do not fail is include fails (ex: directory does not exist yet) +-include $(wildcard $(DEPFILES)) + + +# Special case : build library in single-thread mode _and_ without zstdmt_compress.c +# Note : we still need threading.c and pool.c for the dictionary builder, +# but they will correctly behave single-threaded. +ZSTDMT_FILES = zstdmt_compress.c +ZSTD_NOMT_FILES = $(filter-out $(ZSTDMT_FILES),$(notdir $(ZSTD_FILES))) +libzstd-nomt: CFLAGS += -fPIC -fvisibility=hidden +libzstd-nomt: LDFLAGS += -shared libzstd-nomt: $(ZSTD_NOMT_FILES) @echo compiling single-thread dynamic library $(LIBVER) @echo files : $(ZSTD_NOMT_FILES) - @$(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@ + @if echo "$(ZSTD_NOMT_FILES)" | tr ' ' '\n' | $(GREP) -q zstdmt; then \ + echo "Error: Found zstdmt in list."; \ + exit 1; \ + fi + $(CC) $(FLAGS) $^ $(SONAME_FLAGS) -o $@ +.PHONY: clean clean: - @$(RM) -r *.dSYM # macOS-specific - @$(RM) core *.o *.a *.gcda *.$(SHARED_EXT) *.$(SHARED_EXT).* libzstd.pc - @$(RM) dll/libzstd.dll dll/libzstd.lib libzstd-nomt* - @$(RM) common/*.o compress/*.o decompress/*.o dictBuilder/*.o legacy/*.o deprecated/*.o + $(RM) -r *.dSYM # macOS-specific + $(RM) core *.o *.a *.gcda *.$(SHARED_EXT) *.$(SHARED_EXT).* libzstd.pc + $(RM) dll/libzstd.dll dll/libzstd.lib libzstd-nomt* + $(RM) -r obj/* @echo Cleaning library completed #----------------------------------------------------------------------------- -# make install is validated only for Linux, macOS, BSD, Hurd and Solaris targets +# make install is validated only for below listed environments #----------------------------------------------------------------------------- -ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku)) +ifneq (,$(filter $(INSTALL_OS_LIST),$(UNAME))) + +lib: libzstd.pc + +HAS_EXPLICIT_EXEC_PREFIX := $(if $(or $(EXEC_PREFIX),$(exec_prefix)),1,) DESTDIR ?= # directory variables : GNU conventions prefer lowercase @@ -214,73 +281,109 @@ DESTDIR ?= prefix ?= /usr/local PREFIX ?= $(prefix) exec_prefix ?= $(PREFIX) -libdir ?= $(exec_prefix)/lib +EXEC_PREFIX ?= $(exec_prefix) +libdir ?= $(EXEC_PREFIX)/lib LIBDIR ?= $(libdir) includedir ?= $(PREFIX)/include INCLUDEDIR ?= $(includedir) -ifneq (,$(filter $(shell uname),FreeBSD NetBSD DragonFly)) -PKGCONFIGDIR ?= $(PREFIX)/libdata/pkgconfig +PCINCDIR := $(patsubst $(PREFIX)%,%,$(INCLUDEDIR)) +PCLIBDIR := $(patsubst $(EXEC_PREFIX)%,%,$(LIBDIR)) + +# If we successfully stripped off a prefix, we'll add a reference to the +# relevant pc variable. +PCINCPREFIX := $(if $(findstring $(INCLUDEDIR),$(PCINCDIR)),,$${prefix}) +PCLIBPREFIX := $(if $(findstring $(LIBDIR),$(PCLIBDIR)),,$${exec_prefix}) + +# If no explicit EXEC_PREFIX was set by the caller, write it out as a reference +# to PREFIX, rather than as a resolved value. +PCEXEC_PREFIX := $(if $(HAS_EXPLICIT_EXEC_PREFIX),$(EXEC_PREFIX),$${prefix}) + + +ifneq ($(MT),) + PCLIB := + PCMTLIB := $(LDFLAGS_DYNLIB) +else + PCLIB := $(LDFLAGS_DYNLIB) +endif + +ifneq (,$(filter FreeBSD NetBSD DragonFly,$(UNAME))) + PKGCONFIGDIR ?= $(PREFIX)/libdata/pkgconfig else -PKGCONFIGDIR ?= $(LIBDIR)/pkgconfig + PKGCONFIGDIR ?= $(LIBDIR)/pkgconfig endif -ifneq (,$(filter $(shell uname),SunOS)) -INSTALL ?= ginstall +ifneq (,$(filter SunOS,$(UNAME))) + INSTALL ?= ginstall else -INSTALL ?= install + INSTALL ?= install endif INSTALL_PROGRAM ?= $(INSTALL) INSTALL_DATA ?= $(INSTALL) -m 644 -libzstd.pc: +# pkg-config library define. +# For static single-threaded library declare -pthread in Libs.private +# For static multi-threaded library declare -pthread in Libs and Cflags +.PHONY: libzstd.pc libzstd.pc: libzstd.pc.in @echo creating pkgconfig - @sed -e 's|@PREFIX@|$(PREFIX)|' \ - -e 's|@LIBDIR@|$(LIBDIR)|' \ - -e 's|@INCLUDEDIR@|$(INCLUDEDIR)|' \ - -e 's|@VERSION@|$(VERSION)|' \ - $< >$@ - + @sed \ + -e 's|@PREFIX@|$(PREFIX)|' \ + -e 's|@EXEC_PREFIX@|$(PCEXEC_PREFIX)|' \ + -e 's|@INCLUDEDIR@|$(PCINCPREFIX)$(PCINCDIR)|' \ + -e 's|@LIBDIR@|$(PCLIBPREFIX)$(PCLIBDIR)|' \ + -e 's|@VERSION@|$(VERSION)|' \ + -e 's|@LIBS_MT@|$(PCMTLIB)|' \ + -e 's|@LIBS_PRIVATE@|$(PCLIB)|' \ + $< >$@ + +.PHONY: install install: install-pc install-static install-shared install-includes @echo zstd static and shared library installed +.PHONY: install-pc install-pc: libzstd.pc - @$(INSTALL) -d -m 755 $(DESTDIR)$(PKGCONFIGDIR)/ - @$(INSTALL_DATA) libzstd.pc $(DESTDIR)$(PKGCONFIGDIR)/ - -install-static: libzstd.a + [ -e $(DESTDIR)$(PKGCONFIGDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(PKGCONFIGDIR)/ + $(INSTALL_DATA) libzstd.pc $(DESTDIR)$(PKGCONFIGDIR)/ + +.PHONY: install-static +install-static: + # only generate libzstd.a if it's not already present + [ -e libzstd.a ] || $(MAKE) libzstd.a-release + [ -e $(DESTDIR)$(LIBDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)/ @echo Installing static library - @$(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)/ - @$(INSTALL_DATA) libzstd.a $(DESTDIR)$(LIBDIR) + $(INSTALL_DATA) libzstd.a $(DESTDIR)$(LIBDIR) -install-shared: libzstd +.PHONY: install-shared +install-shared: + # only generate libzstd.so if it's not already present + [ -e $(LIBZSTD) ] || $(MAKE) libzstd-release + [ -e $(DESTDIR)$(LIBDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)/ @echo Installing shared library - @$(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)/ - @$(INSTALL_PROGRAM) $(LIBZSTD) $(DESTDIR)$(LIBDIR) - @ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) - @ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) + $(INSTALL_PROGRAM) $(LIBZSTD) $(DESTDIR)$(LIBDIR) + $(LN) -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) + $(LN) -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) +.PHONY: install-includes install-includes: + [ -e $(DESTDIR)$(INCLUDEDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(INCLUDEDIR)/ @echo Installing includes - @$(INSTALL) -d -m 755 $(DESTDIR)$(INCLUDEDIR)/ - @$(INSTALL_DATA) zstd.h $(DESTDIR)$(INCLUDEDIR) - @$(INSTALL_DATA) common/zstd_errors.h $(DESTDIR)$(INCLUDEDIR) - @$(INSTALL_DATA) deprecated/zbuff.h $(DESTDIR)$(INCLUDEDIR) # prototypes generate deprecation warnings - @$(INSTALL_DATA) dictBuilder/zdict.h $(DESTDIR)$(INCLUDEDIR) + $(INSTALL_DATA) zstd.h $(DESTDIR)$(INCLUDEDIR) + $(INSTALL_DATA) zstd_errors.h $(DESTDIR)$(INCLUDEDIR) + $(INSTALL_DATA) zdict.h $(DESTDIR)$(INCLUDEDIR) +.PHONY: uninstall uninstall: - @$(RM) $(DESTDIR)$(LIBDIR)/libzstd.a - @$(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) - @$(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) - @$(RM) $(DESTDIR)$(LIBDIR)/$(LIBZSTD) - @$(RM) $(DESTDIR)$(PKGCONFIGDIR)/libzstd.pc - @$(RM) $(DESTDIR)$(INCLUDEDIR)/zstd.h - @$(RM) $(DESTDIR)$(INCLUDEDIR)/zstd_errors.h - @$(RM) $(DESTDIR)$(INCLUDEDIR)/zbuff.h # Deprecated streaming functions - @$(RM) $(DESTDIR)$(INCLUDEDIR)/zdict.h + $(RM) $(DESTDIR)$(LIBDIR)/libzstd.a + $(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) + $(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) + $(RM) $(DESTDIR)$(LIBDIR)/$(LIBZSTD) + $(RM) $(DESTDIR)$(PKGCONFIGDIR)/libzstd.pc + $(RM) $(DESTDIR)$(INCLUDEDIR)/zstd.h + $(RM) $(DESTDIR)$(INCLUDEDIR)/zstd_errors.h + $(RM) $(DESTDIR)$(INCLUDEDIR)/zdict.h @echo zstd libraries successfully uninstalled endif diff --git a/lib/README.md b/lib/README.md index 792729b1f9b..3974de160ec 100644 --- a/lib/README.md +++ b/lib/README.md @@ -7,30 +7,52 @@ in order to make it easier to select or exclude features. #### Building -`Makefile` script is provided, supporting [Makefile conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html#Makefile-Conventions), +A `Makefile` script is provided, supporting [Makefile conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html#Makefile-Conventions), including commands variables, staged install, directory variables and standard targets. - `make` : generates both static and dynamic libraries -- `make install` : install libraries and headers in target system directories +- `make install` : install libraries, headers and pkg-config in local system directories -`libzstd` default scope is pretty large, including compression, decompression, dictionary builder, -and support for decoding legacy formats >= v0.5.0. +`libzstd` default scope is extensive, including compression, decompression, dictionary builder, +and support for decoding legacy formats >= v0.5.0 by default. The scope can be reduced on demand (see paragraph _modular build_). +#### Multiarch Support + +For multiarch systems (like Debian/Ubuntu), libraries should be installed to architecture-specific directories. +When creating packages for such systems, use the `LIBDIR` variable to specify the correct multiarch path: + +```bash +# For x86_64 systems on Ubuntu/Debian: +make install PREFIX=/usr LIBDIR=/usr/lib/x86_64-linux-gnu + +# For ARM64 systems on Ubuntu/Debian: +make install PREFIX=/usr LIBDIR=/usr/lib/aarch64-linux-gnu +``` + +This will not only install the files in the correct directories, but also generate the correct paths for `pkg-config`. #### Multithreading support -Multithreading is disabled by default when building with `make`. +When building with `make`, by default the dynamic library is multithreaded and static library is single-threaded (for compatibility reasons). + Enabling multithreading requires 2 conditions : - set build macro `ZSTD_MULTITHREAD` (`-DZSTD_MULTITHREAD` for `gcc`) - for POSIX systems : compile with pthread (`-pthread` compilation flag for `gcc`) -Both conditions are automatically applied when invoking `make lib-mt` target. +For convenience, we provide a build target to generate multi and single threaded libraries: +- Force enable multithreading on both dynamic and static libraries by appending `-mt` to the target, e.g. `make lib-mt`. + Note that the `.pc` generated on calling `make lib-mt` will already include the require Libs and Cflags. +- Force disable multithreading on both dynamic and static libraries by appending `-nomt` to the target, e.g. `make lib-nomt`. +- By default, as mentioned before, dynamic library is multithreaded, and static library is single-threaded, e.g. `make lib`. When linking a POSIX program with a multithreaded version of `libzstd`, -note that it's necessary to request the `-pthread` flag during link stage. +note that it's necessary to invoke the `-pthread` flag during link stage. + +The `.pc` generated from `make install` or `make install-pc` always assume a single-threaded static library +is compiled. To correctly generate a `.pc` for the multi-threaded static library, set `MT=1` as ENV variable. Multithreading capabilities are exposed -via the [advanced API defined in `lib/zstd.h`](https://github.com/facebook/zstd/blob/v1.3.8/lib/zstd.h#L592). +via the [advanced API defined in `lib/zstd.h`](https://github.com/facebook/zstd/blob/v1.4.3/lib/zstd.h#L351). #### API @@ -42,8 +64,8 @@ Zstandard's stable API is exposed within [lib/zstd.h](zstd.h). Optional advanced features are exposed via : -- `lib/common/zstd_errors.h` : translates `size_t` function results - into a `ZSTD_ErrorCode`, for accurate error handling. +- `lib/zstd_errors.h` : translates `size_t` function results + into a `ZSTD_ErrorCode`, for accurate error handling. - `ZSTD_STATIC_LINKING_ONLY` : if this macro is defined _before_ including `zstd.h`, it unlocks access to the experimental API, @@ -84,34 +106,111 @@ The file structure is designed to make this selection manually achievable for an For example, advanced API for version `v0.4` is exposed in `lib/legacy/zstd_v04.h` . - While invoking `make libzstd`, it's possible to define build macros - `ZSTD_LIB_COMPRESSION, ZSTD_LIB_DECOMPRESSION`, `ZSTD_LIB_DICTBUILDER`, - and `ZSTD_LIB_DEPRECATED` as `0` to forgo compilation of the corresponding features. - This will also disable compilation of all dependencies - (eg. `ZSTD_LIB_COMPRESSION=0` will also disable dictBuilder). - -- There are some additional build macros that can be used to minify the decoder. - - Zstandard often has more than one implementation of a piece of functionality, - where each implementation optimizes for different scenarios. For example, the - Huffman decoder has complementary implementations that decode the stream one - symbol at a time or two symbols at a time. Zstd normally includes both (and - dispatches between them at runtime), but by defining `HUF_FORCE_DECOMPRESS_X1` - or `HUF_FORCE_DECOMPRESS_X2`, you can force the use of one or the other, avoiding + `ZSTD_LIB_COMPRESSION`, `ZSTD_LIB_DECOMPRESSION`, `ZSTD_LIB_DICTBUILDER`, + and `ZSTD_LIB_DEPRECATED` as `0` to forgo compilation of the + corresponding features. This will also disable compilation of all + dependencies (e.g. `ZSTD_LIB_COMPRESSION=0` will also disable + dictBuilder). + +- There are a number of options that can help minimize the binary size of + `libzstd`. + + The first step is to select the components needed (using the above-described + `ZSTD_LIB_COMPRESSION` etc.). + + The next step is to set `ZSTD_LIB_MINIFY` to `1` when invoking `make`. This + disables various optional components and changes the compilation flags to + prioritize space-saving. + + Detailed options: Zstandard's code and build environment is set up by default + to optimize above all else for performance. In pursuit of this goal, Zstandard + makes significant trade-offs in code size. For example, Zstandard often has + more than one implementation of a particular component, with each + implementation optimized for different scenarios. For example, the Huffman + decoder has complementary implementations that decode the stream one symbol at + a time or two symbols at a time. Zstd normally includes both (and dispatches + between them at runtime), but by defining `HUF_FORCE_DECOMPRESS_X1` or + `HUF_FORCE_DECOMPRESS_X2`, you can force the use of one or the other, avoiding compilation of the other. Similarly, `ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT` and `ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG` force the compilation and use of only one or the other of two decompression implementations. The smallest binary is achieved by using `HUF_FORCE_DECOMPRESS_X1` and - `ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT`. + `ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT` (implied by `ZSTD_LIB_MINIFY`). + + On the compressor side, Zstd's compression levels map to several internal + strategies. In environments where the higher compression levels aren't used, + it is possible to exclude all but the fastest strategy with + `ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP=1`. (Note that this will change + the behavior of the default compression level.) Or if you want to retain the + default compressor as well, you can set + `ZSTD_LIB_EXCLUDE_COMPRESSORS_GREEDY_AND_UP=1`, at the cost of an additional + ~20KB or so. For squeezing the last ounce of size out, you can also define `ZSTD_NO_INLINE`, which disables inlining, and `ZSTD_STRIP_ERROR_STRINGS`, which removes the error messages that are otherwise returned by - `ZSTD_getErrorName`. + `ZSTD_getErrorName` (implied by `ZSTD_LIB_MINIFY`). + + Finally, when integrating into your application, make sure you're doing link- + time optimization and unused symbol garbage collection (via some combination of, + e.g., `-flto`, `-ffat-lto-objects`, `-fuse-linker-plugin`, + `-ffunction-sections`, `-fdata-sections`, `-fmerge-all-constants`, + `-Wl,--gc-sections`, `-Wl,-z,norelro`, and an archiver that understands + the compiler's intermediate representation, e.g., `AR=gcc-ar`). Consult your + compiler's documentation. - While invoking `make libzstd`, the build macro `ZSTD_LEGACY_MULTITHREADED_API=1` will expose the deprecated `ZSTDMT` API exposed by `zstdmt_compress.h` in the shared library, which is now hidden by default. +- The build macro `STATIC_BMI2` can be set to 1 to force usage of `bmi2` instructions. + It is generally not necessary to set this build macro, + because `STATIC_BMI2` will be automatically set to 1 + on detecting the presence of the corresponding instruction set in the compilation target. + It's nonetheless available as an optional manual toggle for better control, + and can also be used to forcefully disable `bmi2` instructions by setting it to 0. + +- The build macro `DYNAMIC_BMI2` can be set to 1 or 0 in order to generate binaries + which can detect at runtime the presence of BMI2 instructions, and use them only if present. + These instructions contribute to better performance, notably on the decoder side. + By default, this feature is automatically enabled on detecting + the right instruction set (x64) and compiler (clang or gcc >= 5). + It's obviously disabled for different cpus, + or when BMI2 instruction set is _required_ by the compiler command line + (in this case, only the BMI2 code path is generated). + Setting this macro will either force to generate the BMI2 dispatcher (1) + or prevent it (0). It overrides automatic detection. + +- The build macro `ZSTD_NO_UNUSED_FUNCTIONS` can be defined to hide the definitions of functions + that zstd does not use. Not all unused functions are hidden, but they can be if needed. + Currently, this macro will hide function definitions in FSE and HUF that use an excessive + amount of stack space. + +- The build macro `ZSTD_NO_INTRINSICS` can be defined to disable all explicit intrinsics. + Compiler builtins are still used. + +- The build macro `ZSTD_DECODER_INTERNAL_BUFFER` can be set to control + the amount of extra memory used during decompression to store literals. + This defaults to 64kB. Reducing this value reduces the memory footprint of + `ZSTD_DCtx` decompression contexts, + but might also result in a small decompression speed cost. + +- The C compiler macros `ZSTDLIB_VISIBLE`, `ZSTDERRORLIB_VISIBLE` and `ZDICTLIB_VISIBLE` + can be overridden to control the visibility of zstd's API. Additionally, + `ZSTDLIB_STATIC_API` and `ZDICTLIB_STATIC_API` can be overridden to control the visibility + of zstd's static API. Specifically, it can be set to `ZSTDLIB_HIDDEN` to hide the symbols + from the shared library. These macros default to `ZSTDLIB_VISIBILITY`, + `ZSTDERRORLIB_VSIBILITY`, and `ZDICTLIB_VISIBILITY` if unset, for backwards compatibility + with the old macro names. + +- The C compiler macro `HUF_DISABLE_FAST_DECODE` disables the newer Huffman fast C + and assembly decoding loops. You may want to use this macro if these loops are + slower on your platform. + +- The macro `ZDICT_QSORT` can enforce selection of a specific sorting variant, + which is useful when autodetection fails, for example with older versions of `musl`. + For this scenario, it can be set as `ZDICT_QSORT=ZDICT_QSORT_C90`. + Other selectable suffixes are `_GNU`, `_APPLE`, `_MSVC` and `_C11`. #### Windows : using MinGW+MSYS to create DLL @@ -129,6 +228,26 @@ file it should be linked with `dll\libzstd.dll`. For example: The compiled executable will require ZSTD DLL which is available at `dll\libzstd.dll`. +#### Advanced Build options + +The build system requires a hash function in order to +separate object files created with different compilation flags. +By default, it tries to use `md5sum` or equivalent. +The hash function can be manually switched by setting the `HASH` variable. +For example : `make HASH=xxhsum` +The hash function needs to generate at least 64-bit using hexadecimal format. +When no hash function is found, +the Makefile just generates all object files into the same default directory, +irrespective of compilation flags. +This functionality only matters if `libzstd` is compiled multiple times +with different build flags. + +The build directory, where object files are stored +can also be manually controlled using variable `BUILD_DIR`, +for example `make BUILD_DIR=objectDir/v1`. +In which case, the hash function doesn't matter. + + #### Deprecated API Obsolete API on their way out are stored in directory `lib/deprecated`. diff --git a/lib/common/allocations.h b/lib/common/allocations.h new file mode 100644 index 00000000000..5e899550109 --- /dev/null +++ b/lib/common/allocations.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* This file provides custom allocation primitives + */ + +#define ZSTD_DEPS_NEED_MALLOC +#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */ + +#include "compiler.h" /* MEM_STATIC */ +#define ZSTD_STATIC_LINKING_ONLY +#include "../zstd.h" /* ZSTD_customMem */ + +#ifndef ZSTD_ALLOCATIONS_H +#define ZSTD_ALLOCATIONS_H + +/* custom memory allocation functions */ + +MEM_STATIC void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem) +{ + if (customMem.customAlloc) + return customMem.customAlloc(customMem.opaque, size); + return ZSTD_malloc(size); +} + +MEM_STATIC void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem) +{ + if (customMem.customAlloc) { + /* calloc implemented as malloc+memset; + * not as efficient as calloc, but next best guess for custom malloc */ + void* const ptr = customMem.customAlloc(customMem.opaque, size); + ZSTD_memset(ptr, 0, size); + return ptr; + } + return ZSTD_calloc(1, size); +} + +MEM_STATIC void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) +{ + if (ptr!=NULL) { + if (customMem.customFree) + customMem.customFree(customMem.opaque, ptr); + else + ZSTD_free(ptr); + } +} + +#endif /* ZSTD_ALLOCATIONS_H */ diff --git a/lib/common/bits.h b/lib/common/bits.h new file mode 100644 index 00000000000..910ffa38812 --- /dev/null +++ b/lib/common/bits.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_BITS_H +#define ZSTD_BITS_H + +#include "mem.h" + +MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val) +{ + assert(val != 0); + { + static const U32 DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3, + 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, + 26, 12, 18, 6, 11, 5, 10, 9}; + return DeBruijnBytePos[((U32) ((val & (0-val)) * 0x077CB531U)) >> 27]; + } +} + +MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val) +{ + assert(val != 0); +#if defined(_MSC_VER) +# if STATIC_BMI2 + return (unsigned)_tzcnt_u32(val); +# else + if (val != 0) { + unsigned long r; + _BitScanForward(&r, val); + return (unsigned)r; + } else { + __assume(0); /* Should not reach this code path */ + } +# endif +#elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)__builtin_ctz(val); +#elif defined(__ICCARM__) + return (unsigned)__builtin_ctz(val); +#else + return ZSTD_countTrailingZeros32_fallback(val); +#endif +} + +MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) +{ + assert(val != 0); + { + static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29, + 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, + 19, 27, 23, 6, 26, 5, 4, 31}; + val |= val >> 1; + val |= val >> 2; + val |= val >> 4; + val |= val >> 8; + val |= val >> 16; + return 31 - DeBruijnClz[(val * 0x07C4ACDDU) >> 27]; + } +} + +MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val) +{ + assert(val != 0); +#if defined(_MSC_VER) +# if STATIC_BMI2 + return (unsigned)_lzcnt_u32(val); +# else + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, val); + return (unsigned)(31 - r); + } else { + __assume(0); /* Should not reach this code path */ + } +# endif +#elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)__builtin_clz(val); +#elif defined(__ICCARM__) + return (unsigned)__builtin_clz(val); +#else + return ZSTD_countLeadingZeros32_fallback(val); +#endif +} + +MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val) +{ + assert(val != 0); +#if defined(_MSC_VER) && defined(_WIN64) +# if STATIC_BMI2 + return (unsigned)_tzcnt_u64(val); +# else + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, val); + return (unsigned)r; + } else { + __assume(0); /* Should not reach this code path */ + } +# endif +#elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(__LP64__) + return (unsigned)__builtin_ctzll(val); +#elif defined(__ICCARM__) + return (unsigned)__builtin_ctzll(val); +#else + { + U32 mostSignificantWord = (U32)(val >> 32); + U32 leastSignificantWord = (U32)val; + if (leastSignificantWord == 0) { + return 32 + ZSTD_countTrailingZeros32(mostSignificantWord); + } else { + return ZSTD_countTrailingZeros32(leastSignificantWord); + } + } +#endif +} + +MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val) +{ + assert(val != 0); +#if defined(_MSC_VER) && defined(_WIN64) +# if STATIC_BMI2 + return (unsigned)_lzcnt_u64(val); +# else + if (val != 0) { + unsigned long r; + _BitScanReverse64(&r, val); + return (unsigned)(63 - r); + } else { + __assume(0); /* Should not reach this code path */ + } +# endif +#elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)(__builtin_clzll(val)); +#elif defined(__ICCARM__) + return (unsigned)(__builtin_clzll(val)); +#else + { + U32 mostSignificantWord = (U32)(val >> 32); + U32 leastSignificantWord = (U32)val; + if (mostSignificantWord == 0) { + return 32 + ZSTD_countLeadingZeros32(leastSignificantWord); + } else { + return ZSTD_countLeadingZeros32(mostSignificantWord); + } + } +#endif +} + +MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val) +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { + return ZSTD_countTrailingZeros64((U64)val) >> 3; + } else { + return ZSTD_countTrailingZeros32((U32)val) >> 3; + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { + return ZSTD_countLeadingZeros64((U64)val) >> 3; + } else { + return ZSTD_countLeadingZeros32((U32)val) >> 3; + } + } +} + +MEM_STATIC unsigned ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ +{ + assert(val != 0); + return 31 - ZSTD_countLeadingZeros32(val); +} + +/* ZSTD_rotateRight_*(): + * Rotates a bitfield to the right by "count" bits. + * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts + */ +MEM_STATIC +U64 ZSTD_rotateRight_U64(U64 const value, U32 count) { + assert(count < 64); + count &= 0x3F; /* for fickle pattern recognition */ + return (value >> count) | (U64)(value << ((0U - count) & 0x3F)); +} + +MEM_STATIC +U32 ZSTD_rotateRight_U32(U32 const value, U32 count) { + assert(count < 32); + count &= 0x1F; /* for fickle pattern recognition */ + return (value >> count) | (U32)(value << ((0U - count) & 0x1F)); +} + +MEM_STATIC +U16 ZSTD_rotateRight_U16(U16 const value, U32 count) { + assert(count < 16); + count &= 0x0F; /* for fickle pattern recognition */ + return (value >> count) | (U16)(value << ((0U - count) & 0x0F)); +} + +#endif /* ZSTD_BITS_H */ diff --git a/lib/common/bitstream.h b/lib/common/bitstream.h index d955bd677b5..3b7ad483d9e 100644 --- a/lib/common/bitstream.h +++ b/lib/common/bitstream.h @@ -1,43 +1,19 @@ /* ****************************************************************** - bitstream - Part of FSE library - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * bitstream + * Part of FSE library + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ #ifndef BITSTREAM_H_MODULE #define BITSTREAM_H_MODULE -#if defined (__cplusplus) -extern "C" { -#endif - /* * This API consists of small unitary functions, which must be inlined for best performance. * Since link-time-optimization is not available for all compilers, @@ -48,15 +24,20 @@ extern "C" { * Dependencies ******************************************/ #include "mem.h" /* unaligned access routines */ +#include "compiler.h" /* UNLIKELY() */ #include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */ #include "error_private.h" /* error codes and messages */ - +#include "bits.h" /* ZSTD_highbit32 */ /*========================================= * Target specific =========================================*/ -#if defined(__BMI__) && defined(__GNUC__) -# include /* support for bextr (experimental) */ +#ifndef ZSTD_NO_INTRINSICS +# if (defined(__BMI__) || defined(__BMI2__)) && defined(__GNUC__) +# include /* support for bextr (experimental)/bzhi */ +# elif defined(__ICCARM__) +# include +# endif #endif #define STREAM_ACCUMULATOR_MIN_32 25 @@ -67,12 +48,13 @@ extern "C" { /*-****************************************** * bitStream encoding API (write forward) ********************************************/ +typedef size_t BitContainerType; /* bitStream can mix input from multiple sources. * A critical property of these streams is that they encode and decode in **reverse** direction. * So the first bit sequence you add will be the last to be read, like a LIFO stack. */ typedef struct { - size_t bitContainer; + BitContainerType bitContainer; unsigned bitPos; char* startPtr; char* ptr; @@ -80,7 +62,7 @@ typedef struct { } BIT_CStream_t; MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); -MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits); MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); @@ -89,7 +71,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); * `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. * * bits are first added to a local register. -* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. +* Local register is BitContainerType, 64-bits on 64-bits systems, or 32-bits on 32-bits systems. * Writing data into memory is an explicit operation, performed by the flushBits function. * Hence keep track how many bits are potentially stored into local register to avoid register overflow. * After a flushBits, a maximum of 7 bits might still be stored into local register. @@ -106,28 +88,28 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); * bitStream decoding API (read backward) **********************************************/ typedef struct { - size_t bitContainer; + BitContainerType bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; const char* limitPtr; } BIT_DStream_t; -typedef enum { BIT_DStream_unfinished = 0, - BIT_DStream_endOfBuffer = 1, - BIT_DStream_completed = 2, - BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ - /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ +typedef enum { BIT_DStream_unfinished = 0, /* fully refilled */ + BIT_DStream_endOfBuffer = 1, /* still some bits left in bitstream */ + BIT_DStream_completed = 2, /* bitstream entirely consumed, bit-exact */ + BIT_DStream_overflow = 3 /* user requested more bits than present in bitstream */ + } BIT_DStream_status; /* result of BIT_reloadDStream() */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); -MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); +MEM_STATIC BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /* Start by invoking BIT_initDStream(). * A chunk of the bitStream is then stored into a local register. -* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). +* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (BitContainerType). * You can then retrieve bitFields stored into the local register, **in reverse order**. * Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. * A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. @@ -139,7 +121,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /*-**************************************** * unsafe API ******************************************/ -MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits); /* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); @@ -148,37 +130,6 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ - - -/*-************************************************************** -* Internal functions -****************************************************************/ -MEM_STATIC unsigned BIT_highbit32 (U32 val) -{ - assert(val != 0); - { -# if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; -# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return 31 - __builtin_clz (val); -# else /* Software version */ - static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, - 11, 14, 16, 18, 22, 25, 3, 30, - 8, 12, 20, 28, 15, 17, 24, 7, - 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; -# endif - } -} - /*===== Local Constants =====*/ static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, @@ -208,16 +159,31 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, return 0; } +FORCE_INLINE_TEMPLATE BitContainerType BIT_getLowerBits(BitContainerType bitContainer, U32 const nbBits) +{ +#if STATIC_BMI2 && !defined(ZSTD_NO_INTRINSICS) +# if (defined(__x86_64__) || defined(_M_X64)) && !defined(__ILP32__) + return _bzhi_u64(bitContainer, nbBits); +# else + DEBUG_STATIC_ASSERT(sizeof(bitContainer) == sizeof(U32)); + return _bzhi_u32(bitContainer, nbBits); +# endif +#else + assert(nbBits < BIT_MASK_SIZE); + return bitContainer & BIT_mask[nbBits]; +#endif +} + /*! BIT_addBits() : * can add up to 31 bits into `bitC`. * Note : does not check for register overflow ! */ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, - size_t value, unsigned nbBits) + BitContainerType value, unsigned nbBits) { - MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32); + DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32); assert(nbBits < BIT_MASK_SIZE); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); - bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; + bitC->bitContainer |= BIT_getLowerBits(value, nbBits) << bitC->bitPos; bitC->bitPos += nbBits; } @@ -225,7 +191,7 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, * works only if `value` is _clean_, * meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, - size_t value, unsigned nbBits) + BitContainerType value, unsigned nbBits) { assert((value>>nbBits) == 0); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); @@ -240,9 +206,9 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC) { size_t const nbBytes = bitC->bitPos >> 3; assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); + assert(bitC->ptr <= bitC->endPtr); MEM_writeLEST(bitC->ptr, bitC->bitContainer); bitC->ptr += nbBytes; - assert(bitC->ptr <= bitC->endPtr); bitC->bitPos &= 7; bitC->bitContainer >>= nbBytes*8; } @@ -256,6 +222,7 @@ MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC) { size_t const nbBytes = bitC->bitPos >> 3; assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); + assert(bitC->ptr <= bitC->endPtr); MEM_writeLEST(bitC->ptr, bitC->bitContainer); bitC->ptr += nbBytes; if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; @@ -271,7 +238,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) BIT_addBitsFast(bitC, 1, 1); /* endMark */ BIT_flushBits(bitC); if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ - return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); + return (size_t)(bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); } @@ -286,7 +253,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { - if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } + if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } bitD->start = (const char*)srcBuffer; bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); @@ -295,35 +262,35 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); bitD->bitContainer = MEM_readLEST(bitD->ptr); { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; - bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ + bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } } else { bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { - case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); - /* fall-through */ + case 7: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); + ZSTD_FALLTHROUGH; - case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); - /* fall-through */ + case 6: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); + ZSTD_FALLTHROUGH; - case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); - /* fall-through */ + case 5: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); + ZSTD_FALLTHROUGH; - case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; - /* fall-through */ + case 4: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[3]) << 24; + ZSTD_FALLTHROUGH; - case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; - /* fall-through */ + case 3: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[2]) << 16; + ZSTD_FALLTHROUGH; - case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; - /* fall-through */ + case 2: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[1]) << 8; + ZSTD_FALLTHROUGH; default: break; } { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; - bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ } bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; @@ -332,23 +299,26 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si return srcSize; } -MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) +FORCE_INLINE_TEMPLATE BitContainerType BIT_getUpperBits(BitContainerType bitContainer, U32 const start) { return bitContainer >> start; } -MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) +FORCE_INLINE_TEMPLATE BitContainerType BIT_getMiddleBits(BitContainerType bitContainer, U32 const start, U32 const nbBits) { U32 const regMask = sizeof(bitContainer)*8 - 1; /* if start > regMask, bitstream is corrupted, and result is undefined */ assert(nbBits < BIT_MASK_SIZE); + /* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better + * than accessing memory. When bmi2 instruction is not present, we consider + * such cpus old (pre-Haswell, 2013) and their performance is not of that + * importance. + */ +#if defined(__x86_64__) || defined(_M_X64) + return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1); +#else return (bitContainer >> (start & regMask)) & BIT_mask[nbBits]; -} - -MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) -{ - assert(nbBits < BIT_MASK_SIZE); - return bitContainer & BIT_mask[nbBits]; +#endif } /*! BIT_lookBits() : @@ -357,7 +327,7 @@ MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) * On 32-bits, maxNbBits==24. * On 64-bits, maxNbBits==56. * @return : value extracted */ -MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) +FORCE_INLINE_TEMPLATE BitContainerType BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) { /* arbitrate between double-shift and shift+mask */ #if 1 @@ -373,14 +343,14 @@ MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) /*! BIT_lookBitsFast() : * unsafe version; only works if nbBits >= 1 */ -MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) +MEM_STATIC BitContainerType BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) { U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; assert(nbBits >= 1); return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); } -MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) +FORCE_INLINE_TEMPLATE void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } @@ -389,44 +359,77 @@ MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) * Read (consume) next n bits from local register and update. * Pay attention to not read more than nbBits contained into local register. * @return : extracted value. */ -MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) +FORCE_INLINE_TEMPLATE BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) { - size_t const value = BIT_lookBits(bitD, nbBits); + BitContainerType const value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } /*! BIT_readBitsFast() : - * unsafe version; only works only if nbBits >= 1 */ -MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) + * unsafe version; only works if nbBits >= 1 */ +MEM_STATIC BitContainerType BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) { - size_t const value = BIT_lookBitsFast(bitD, nbBits); + BitContainerType const value = BIT_lookBitsFast(bitD, nbBits); assert(nbBits >= 1); BIT_skipBits(bitD, nbBits); return value; } +/*! BIT_reloadDStream_internal() : + * Simple variant of BIT_reloadDStream(), with two conditions: + * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8 + * 2. look window is valid after shifted down : bitD->ptr >= bitD->start + */ +MEM_STATIC BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD) +{ + assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8); + bitD->ptr -= bitD->bitsConsumed >> 3; + assert(bitD->ptr >= bitD->start); + bitD->bitsConsumed &= 7; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return BIT_DStream_unfinished; +} + +/*! BIT_reloadDStreamFast() : + * Similar to BIT_reloadDStream(), but with two differences: + * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! + * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this + * point you must use BIT_reloadDStream() to reload. + */ +MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) +{ + if (UNLIKELY(bitD->ptr < bitD->limitPtr)) + return BIT_DStream_overflow; + return BIT_reloadDStream_internal(bitD); +} + /*! BIT_reloadDStream() : * Refill `bitD` from buffer previously set in BIT_initDStream() . - * This function is safe, it guarantees it will not read beyond src buffer. + * This function is safe, it guarantees it will not never beyond src buffer. * @return : status of `BIT_DStream_t` internal register. * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ -MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) +FORCE_INLINE_TEMPLATE BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { - if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ + /* note : once in overflow mode, a bitstream remains in this mode until it's reset */ + if (UNLIKELY(bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))) { + static const BitContainerType zeroFilled = 0; + bitD->ptr = (const char*)&zeroFilled; /* aliasing is allowed for char */ + /* overflow detected, erroneous scenario or end of stream: no update */ return BIT_DStream_overflow; + } + + assert(bitD->ptr >= bitD->start); if (bitD->ptr >= bitD->limitPtr) { - bitD->ptr -= bitD->bitsConsumed >> 3; - bitD->bitsConsumed &= 7; - bitD->bitContainer = MEM_readLEST(bitD->ptr); - return BIT_DStream_unfinished; + return BIT_reloadDStream_internal(bitD); } if (bitD->ptr == bitD->start) { + /* reached end of bitStream => no update */ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } - /* start < ptr < limitPtr */ + /* start < ptr < limitPtr => cautious update */ { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { @@ -448,8 +451,4 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } -#if defined (__cplusplus) -} -#endif - #endif /* BITSTREAM_H_MODULE */ diff --git a/lib/common/compiler.h b/lib/common/compiler.h index 0836e3ed27a..5e70570ec7a 100644 --- a/lib/common/compiler.h +++ b/lib/common/compiler.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,19 +11,23 @@ #ifndef ZSTD_COMPILER_H #define ZSTD_COMPILER_H +#include + +#include "portability_macros.h" + /*-******************************************************* * Compiler specifics *********************************************************/ /* force inlining */ #if !defined(ZSTD_NO_INLINE) -#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # define INLINE_KEYWORD inline #else # define INLINE_KEYWORD #endif -#if defined(__GNUC__) +#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) # define FORCE_INLINE_ATTR __attribute__((always_inline)) #elif defined(_MSC_VER) # define FORCE_INLINE_ATTR __forceinline @@ -38,12 +42,30 @@ #endif +/** + On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC). + This explicitly marks such functions as __cdecl so that the code will still compile + if a CC other than __cdecl has been made the default. +*/ +#if defined(_MSC_VER) +# define WIN_CDECL __cdecl +#else +# define WIN_CDECL +#endif + +/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ +#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) +# define UNUSED_ATTR __attribute__((unused)) +#else +# define UNUSED_ATTR +#endif + /** * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant * parameters. They must be inlined for the compiler to eliminate the constant * branches. */ -#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR UNUSED_ATTR /** * HINT_INLINE is used to help the compiler generate better code. It is *not* * used for "templates", so it can be tweaked based on the compilers @@ -58,74 +80,122 @@ #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 # define HINT_INLINE static INLINE_KEYWORD #else -# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR +# define HINT_INLINE FORCE_INLINE_TEMPLATE +#endif + +/* "soft" inline : + * The compiler is free to select if it's a good idea to inline or not. + * The main objective is to silence compiler warnings + * when a defined function in included but not used. + * + * Note : this macro is prefixed `MEM_` because it used to be provided by `mem.h` unit. + * Updating the prefix is probably preferable, but requires a fairly large codemod, + * since this name is used everywhere. + */ +#ifndef MEM_STATIC /* already defined in Linux Kernel mem.h */ +#if defined(__GNUC__) +# define MEM_STATIC static __inline UNUSED_ATTR +#elif defined(__IAR_SYSTEMS_ICC__) +# define MEM_STATIC static inline UNUSED_ATTR +#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define MEM_STATIC static inline +#elif defined(_MSC_VER) +# define MEM_STATIC static __inline +#else +# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif #endif /* force no inlining */ #ifdef _MSC_VER # define FORCE_NOINLINE static __declspec(noinline) #else -# ifdef __GNUC__ +# if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) # define FORCE_NOINLINE static __attribute__((__noinline__)) # else # define FORCE_NOINLINE static # endif #endif + /* target attribute */ -#ifndef __has_attribute - #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */ -#endif -#if defined(__GNUC__) +#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) # define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) #else # define TARGET_ATTRIBUTE(target) #endif -/* Enable runtime BMI2 dispatch based on the CPU. - * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. +/* Target attribute for BMI2 dynamic dispatch. + * Enable lzcnt, bmi, and bmi2. + * We test for bmi1 & bmi2. lzcnt is included in bmi1. */ -#ifndef DYNAMIC_BMI2 - #if ((defined(__clang__) && __has_attribute(__target__)) \ - || (defined(__GNUC__) \ - && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ - && (defined(__x86_64__) || defined(_M_X86)) \ - && !defined(__BMI2__) - # define DYNAMIC_BMI2 1 - #else - # define DYNAMIC_BMI2 0 - #endif -#endif +#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2") /* prefetch * can be disabled, by declaring NO_PREFETCH build macro */ #if defined(NO_PREFETCH) -# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ -# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ +# define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */ +# define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */ #else -# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ +# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) && !defined(_M_ARM64EC) /* _mm_prefetch() is not defined outside of x86/x64 */ # include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ # define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) # define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1) # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) # define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) # define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */) +# elif defined(__aarch64__) +# define PREFETCH_L1(ptr) do { __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))); } while (0) +# define PREFETCH_L2(ptr) do { __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))); } while (0) # else -# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ -# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ +# define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */ +# define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */ # endif #endif /* NO_PREFETCH */ #define CACHELINE_SIZE 64 -#define PREFETCH_AREA(p, s) { \ - const char* const _ptr = (const char*)(p); \ - size_t const _size = (size_t)(s); \ - size_t _pos; \ - for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \ - PREFETCH_L2(_ptr + _pos); \ - } \ -} +#define PREFETCH_AREA(p, s) \ + do { \ + const char* const _ptr = (const char*)(p); \ + size_t const _size = (size_t)(s); \ + size_t _pos; \ + for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \ + PREFETCH_L2(_ptr + _pos); \ + } \ + } while (0) + +/* vectorization + * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax, + * and some compilers, like Intel ICC and MCST LCC, do not support it at all. */ +#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__) +# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5) +# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize"))) +# else +# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")") +# endif +#else +# define DONT_VECTORIZE +#endif + +/* Tell the compiler that a branch is likely or unlikely. + * Only use these macros if it causes the compiler to generate better code. + * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc + * and clang, please do. + */ +#if defined(__GNUC__) +#define LIKELY(x) (__builtin_expect((x), 1)) +#define UNLIKELY(x) (__builtin_expect((x), 0)) +#else +#define LIKELY(x) (x) +#define UNLIKELY(x) (x) +#endif + +#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))) +# define ZSTD_UNREACHABLE do { assert(0), __builtin_unreachable(); } while (0) +#else +# define ZSTD_UNREACHABLE do { assert(0); } while (0) +#endif /* disable warnings */ #ifdef _MSC_VER /* Visual Studio */ @@ -137,4 +207,276 @@ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #endif +/* compile time determination of SIMD support */ +#if !defined(ZSTD_NO_INTRINSICS) +# if defined(__AVX2__) +# define ZSTD_ARCH_X86_AVX2 +# endif +# if defined(__SSE2__) || defined(_M_X64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) +# define ZSTD_ARCH_X86_SSE2 +# endif +# if defined(__ARM_NEON) || defined(_M_ARM64) +# define ZSTD_ARCH_ARM_NEON +# endif +# if defined(__ARM_FEATURE_SVE) +# define ZSTD_ARCH_ARM_SVE +# endif +# if defined(__ARM_FEATURE_SVE2) +# define ZSTD_ARCH_ARM_SVE2 +# endif +# if defined(__riscv) && defined(__riscv_vector) +# if ((defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 14) || \ + (defined(__clang__) && __clang_major__ >= 19)) + #define ZSTD_ARCH_RISCV_RVV +# endif +#endif +# +# if defined(ZSTD_ARCH_X86_AVX2) +# include +# endif +# if defined(ZSTD_ARCH_X86_SSE2) +# include +# elif defined(ZSTD_ARCH_ARM_NEON) +# include +# endif +# if defined(ZSTD_ARCH_ARM_SVE) || defined(ZSTD_ARCH_ARM_SVE2) +# include +# endif +# if defined(ZSTD_ARCH_RISCV_RVV) +# include +# endif +#endif + +/* C-language Attributes are added in C23. */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute) +# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) +#else +# define ZSTD_HAS_C_ATTRIBUTE(x) 0 +#endif + +/* Only use C++ attributes in C++. Some compilers report support for C++ + * attributes when compiling with C. + */ +#if defined(__cplusplus) && defined(__has_cpp_attribute) +# define ZSTD_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define ZSTD_HAS_CPP_ATTRIBUTE(x) 0 +#endif + +/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute. + * - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough + * - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough + * - Else: __attribute__((__fallthrough__)) + */ +#ifndef ZSTD_FALLTHROUGH +# if ZSTD_HAS_C_ATTRIBUTE(fallthrough) +# define ZSTD_FALLTHROUGH [[fallthrough]] +# elif ZSTD_HAS_CPP_ATTRIBUTE(fallthrough) +# define ZSTD_FALLTHROUGH [[fallthrough]] +# elif __has_attribute(__fallthrough__) +/* Leading semicolon is to satisfy gcc-11 with -pedantic. Without the semicolon + * gcc complains about: a label can only be part of a statement and a declaration is not a statement. + */ +# define ZSTD_FALLTHROUGH ; __attribute__((__fallthrough__)) +# else +# define ZSTD_FALLTHROUGH +# endif +#endif + +/*-************************************************************** +* Alignment +*****************************************************************/ + +/* @return 1 if @u is a 2^n value, 0 otherwise + * useful to check a value is valid for alignment restrictions */ +MEM_STATIC int ZSTD_isPower2(size_t u) { + return (u & (u-1)) == 0; +} + +/* this test was initially positioned in mem.h, + * but this file is removed (or replaced) for linux kernel + * so it's now hosted in compiler.h, + * which remains valid for both user & kernel spaces. + */ + +#ifndef ZSTD_ALIGNOF +# if defined(__GNUC__) || defined(_MSC_VER) +/* covers gcc, clang & MSVC */ +/* note : this section must come first, before C11, + * due to a limitation in the kernel source generator */ +# define ZSTD_ALIGNOF(T) __alignof(T) + +# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +/* C11 support */ +# include +# define ZSTD_ALIGNOF(T) alignof(T) + +# else +/* No known support for alignof() - imperfect backup */ +# define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T)) + +# endif +#endif /* ZSTD_ALIGNOF */ + +#ifndef ZSTD_ALIGNED +/* C90-compatible alignment macro (GCC/Clang). Adjust for other compilers if needed. */ +# if defined(__GNUC__) || defined(__clang__) +# define ZSTD_ALIGNED(a) __attribute__((aligned(a))) +# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ +# define ZSTD_ALIGNED(a) _Alignas(a) +#elif defined(_MSC_VER) +# define ZSTD_ALIGNED(n) __declspec(align(n)) +# else + /* this compiler will require its own alignment instruction */ +# define ZSTD_ALIGNED(...) +# endif +#endif /* ZSTD_ALIGNED */ + + +/*-************************************************************** +* Sanitizer +*****************************************************************/ + +/** + * Zstd relies on pointer overflow in its decompressor. + * We add this attribute to functions that rely on pointer overflow. + */ +#ifndef ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +# if __has_attribute(no_sanitize) +# if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 8 + /* gcc < 8 only has signed-integer-overlow which triggers on pointer overflow */ +# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("signed-integer-overflow"))) +# else + /* older versions of clang [3.7, 5.0) will warn that pointer-overflow is ignored. */ +# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("pointer-overflow"))) +# endif +# else +# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +# endif +#endif + +/** + * Helper function to perform a wrapped pointer difference without triggering + * UBSAN. + * + * @returns lhs - rhs with wrapping + */ +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +ptrdiff_t ZSTD_wrappedPtrDiff(unsigned char const* lhs, unsigned char const* rhs) +{ + return lhs - rhs; +} + +/** + * Helper function to perform a wrapped pointer add without triggering UBSAN. + * + * @return ptr + add with wrapping + */ +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +const void* ZSTD_wrappedPtrAdd(const void* ptr, ptrdiff_t add) +{ + return (const char*)ptr + add; +} + +/** + * Helper function to perform a wrapped pointer subtraction without triggering + * UBSAN. + * + * @return ptr - sub with wrapping + */ +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +const void* ZSTD_wrappedPtrSub(const void* ptr, ptrdiff_t sub) +{ + return (const char*)ptr - sub; +} + +/** + * Helper function to add to a pointer that works around C's undefined behavior + * of adding 0 to NULL. + * + * @returns `ptr + add` except it defines `NULL + 0 == NULL`. + */ +MEM_STATIC +void* ZSTD_maybeNullPtrAdd(void* ptr, ptrdiff_t add) +{ + return add > 0 ? (char*)ptr + add : ptr; +} + +/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an + * abundance of caution, disable our custom poisoning on mingw. */ +#ifdef __MINGW32__ +#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE +#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1 +#endif +#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE +#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1 +#endif +#endif + +#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) +/* Not all platforms that support msan provide sanitizers/msan_interface.h. + * We therefore declare the functions we need ourselves, rather than trying to + * include the header file... */ +#include /* size_t */ +#define ZSTD_DEPS_NEED_STDINT +#include "zstd_deps.h" /* intptr_t */ + +/* Make memory region fully initialized (without changing its contents). */ +void __msan_unpoison(const volatile void *a, size_t size); + +/* Make memory region fully uninitialized (without changing its contents). + This is a legacy interface that does not update origin information. Use + __msan_allocated_memory() instead. */ +void __msan_poison(const volatile void *a, size_t size); + +/* Returns the offset of the first (at least partially) poisoned byte in the + memory range, or -1 if the whole range is good. */ +intptr_t __msan_test_shadow(const volatile void *x, size_t size); + +/* Print shadow and origin for the memory range to stderr in a human-readable + format. */ +void __msan_print_shadow(const volatile void *x, size_t size); +#endif + +#if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE) +/* Not all platforms that support asan provide sanitizers/asan_interface.h. + * We therefore declare the functions we need ourselves, rather than trying to + * include the header file... */ +#include /* size_t */ + +/** + * Marks a memory region ([addr, addr+size)) as unaddressable. + * + * This memory must be previously allocated by your program. Instrumented + * code is forbidden from accessing addresses in this region until it is + * unpoisoned. This function is not guaranteed to poison the entire region - + * it could poison only a subregion of [addr, addr+size) due to ASan + * alignment restrictions. + * + * \note This function is not thread-safe because no two threads can poison or + * unpoison memory in the same memory region simultaneously. + * + * \param addr Start of memory region. + * \param size Size of memory region. */ +void __asan_poison_memory_region(void const volatile *addr, size_t size); + +/** + * Marks a memory region ([addr, addr+size)) as addressable. + * + * This memory must be previously allocated by your program. Accessing + * addresses in this region is allowed until this region is poisoned again. + * This function could unpoison a super-region of [addr, addr+size) due + * to ASan alignment restrictions. + * + * \note This function is not thread-safe because no two threads can + * poison or unpoison memory in the same memory region simultaneously. + * + * \param addr Start of memory region. + * \param size Size of memory region. */ +void __asan_unpoison_memory_region(void const volatile *addr, size_t size); +#endif + #endif /* ZSTD_COMPILER_H */ diff --git a/lib/common/cpu.h b/lib/common/cpu.h index 5f0923fc928..3f15d560f0c 100644 --- a/lib/common/cpu.h +++ b/lib/common/cpu.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,8 +16,6 @@ * https://github.com/facebook/folly/blob/master/folly/CpuId.h */ -#include - #include "mem.h" #ifdef _MSC_VER @@ -37,6 +35,7 @@ MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { U32 f7b = 0; U32 f7c = 0; #if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) +#if !defined(_M_X64) || !defined(__clang__) || __clang_major__ >= 16 int reg[4]; __cpuid((int*)reg, 0); { @@ -52,6 +51,41 @@ MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { f7c = (U32)reg[2]; } } +#else + /* Clang compiler has a bug (fixed in https://reviews.llvm.org/D101338) in + * which the `__cpuid` intrinsic does not save and restore `rbx` as it needs + * to due to being a reserved register. So in that case, do the `cpuid` + * ourselves. Clang supports inline assembly anyway. + */ + U32 n; + __asm__( + "pushq %%rbx\n\t" + "cpuid\n\t" + "popq %%rbx\n\t" + : "=a"(n) + : "a"(0) + : "rcx", "rdx"); + if (n >= 1) { + U32 f1a; + __asm__( + "pushq %%rbx\n\t" + "cpuid\n\t" + "popq %%rbx\n\t" + : "=a"(f1a), "=c"(f1c), "=d"(f1d) + : "a"(1) + :); + } + if (n >= 7) { + __asm__( + "pushq %%rbx\n\t" + "cpuid\n\t" + "movq %%rbx, %%rax\n\t" + "popq %%rbx" + : "=a"(f7b), "=c"(f7c) + : "a"(7), "c"(0) + : "rdx"); + } +#endif #elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__) /* The following block like the normal cpuid branch below, but gcc * reserves ebx for use of its pic register so we must specially diff --git a/lib/common/debug.c b/lib/common/debug.c index 3ebdd1cb15a..9d0b7d229c1 100644 --- a/lib/common/debug.c +++ b/lib/common/debug.c @@ -1,35 +1,15 @@ /* ****************************************************************** - debug - Part of FSE library - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * debug + * Part of FSE library + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ @@ -41,4 +21,10 @@ #include "debug.h" +#if !defined(ZSTD_LINUX_KERNEL) || (DEBUGLEVEL>=2) +/* We only use this when DEBUGLEVEL>=2, but we get -Werror=pedantic errors if a + * translation unit is empty. So remove this from Linux kernel builds, but + * otherwise just leave it in. + */ int g_debuglevel = DEBUGLEVEL; +#endif diff --git a/lib/common/debug.h b/lib/common/debug.h index b4fc89d4974..4b60ddf7f51 100644 --- a/lib/common/debug.h +++ b/lib/common/debug.h @@ -1,35 +1,15 @@ /* ****************************************************************** - debug - Part of FSE library - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * debug + * Part of FSE library + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ @@ -52,10 +32,6 @@ #ifndef DEBUG_H_12987983217 #define DEBUG_H_12987983217 -#if defined (__cplusplus) -extern "C" { -#endif - /* static assert is triggered at compile time, leaving no runtime artefact. * static assert only works with compile-time constants. @@ -71,15 +47,6 @@ extern "C" { #endif -/* DEBUGFILE can be defined externally, - * typically through compiler command line. - * note : currently useless. - * Value must be stderr or stdout */ -#ifndef DEBUGFILE -# define DEBUGFILE stderr -#endif - - /* recommended values for DEBUGLEVEL : * 0 : release mode, no debug, all run-time checks disabled * 1 : enables assert() only, no display @@ -96,7 +63,8 @@ extern "C" { */ #if (DEBUGLEVEL>=1) -# include +# define ZSTD_DEPS_NEED_ASSERT +# include "zstd_deps.h" #else # ifndef assert /* assert may be already defined, due to prior #include */ # define assert(condition) ((void)0) /* disable assert (default) */ @@ -104,7 +72,8 @@ extern "C" { #endif #if (DEBUGLEVEL>=2) -# include +# define ZSTD_DEPS_NEED_IO +# include "zstd_deps.h" extern int g_debuglevel; /* the variable is only declared, it actually lives in debug.c, and is shared by the whole process. @@ -112,23 +81,27 @@ extern int g_debuglevel; /* the variable is only declared, It's useful when enabling very verbose levels on selective conditions (such as position in src) */ -# define RAWLOG(l, ...) { \ - if (l<=g_debuglevel) { \ - fprintf(stderr, __VA_ARGS__); \ - } } -# define DEBUGLOG(l, ...) { \ - if (l<=g_debuglevel) { \ - fprintf(stderr, __FILE__ ": " __VA_ARGS__); \ - fprintf(stderr, " \n"); \ - } } +# define RAWLOG(l, ...) \ + do { \ + if (l<=g_debuglevel) { \ + ZSTD_DEBUG_PRINT(__VA_ARGS__); \ + } \ + } while (0) + +#define STRINGIFY(x) #x +#define TOSTRING(x) STRINGIFY(x) +#define LINE_AS_STRING TOSTRING(__LINE__) + +# define DEBUGLOG(l, ...) \ + do { \ + if (l<=g_debuglevel) { \ + ZSTD_DEBUG_PRINT(__FILE__ ":" LINE_AS_STRING ": " __VA_ARGS__); \ + ZSTD_DEBUG_PRINT(" \n"); \ + } \ + } while (0) #else -# define RAWLOG(l, ...) {} /* disabled */ -# define DEBUGLOG(l, ...) {} /* disabled */ -#endif - - -#if defined (__cplusplus) -} +# define RAWLOG(l, ...) do { } while (0) /* disabled */ +# define DEBUGLOG(l, ...) do { } while (0) /* disabled */ #endif #endif /* DEBUG_H_12987983217 */ diff --git a/lib/common/entropy_common.c b/lib/common/entropy_common.c index b12944e1de9..e2173afb0a8 100644 --- a/lib/common/entropy_common.c +++ b/lib/common/entropy_common.c @@ -1,36 +1,16 @@ -/* - Common functions of New Generation Entropy library - Copyright (C) 2016, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c -*************************************************************************** */ +/* ****************************************************************** + * Common functions of New Generation Entropy library + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ /* ************************************* * Dependencies @@ -39,8 +19,8 @@ #include "error_private.h" /* ERR_*, ERROR */ #define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ #include "fse.h" -#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */ #include "huf.h" +#include "bits.h" /* ZSDT_highbit32, ZSTD_countTrailingZeros32 */ /*=== Version ===*/ @@ -58,8 +38,9 @@ const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); } /*-************************************************************** * FSE NCount encoding-decoding ****************************************************************/ -size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, - const void* headerBuffer, size_t hbSize) +FORCE_INLINE_TEMPLATE +size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize) { const BYTE* const istart = (const BYTE*) headerBuffer; const BYTE* const iend = istart + hbSize; @@ -70,23 +51,23 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t U32 bitStream; int bitCount; unsigned charnum = 0; + unsigned const maxSV1 = *maxSVPtr + 1; int previous0 = 0; - if (hbSize < 4) { - /* This function only works when hbSize >= 4 */ - char buffer[4]; - memset(buffer, 0, sizeof(buffer)); - memcpy(buffer, headerBuffer, hbSize); + if (hbSize < 8) { + /* This function only works when hbSize >= 8 */ + char buffer[8] = {0}; + ZSTD_memcpy(buffer, headerBuffer, hbSize); { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr, buffer, sizeof(buffer)); if (FSE_isError(countSize)) return countSize; if (countSize > hbSize) return ERROR(corruption_detected); return countSize; } } - assert(hbSize >= 4); + assert(hbSize >= 8); /* init */ - memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */ + ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */ bitStream = MEM_readLE32(ip); nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); @@ -97,36 +78,58 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t threshold = 1<1) & (charnum<=*maxSVPtr)) { + for (;;) { if (previous0) { - unsigned n0 = charnum; - while ((bitStream & 0xFFFF) == 0xFFFF) { - n0 += 24; - if (ip < iend-5) { - ip += 2; - bitStream = MEM_readLE32(ip) >> bitCount; + /* Count the number of repeats. Each time the + * 2-bit repeat code is 0b11 there is another + * repeat. + * Avoid UB by setting the high bit to 1. + */ + int repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1; + while (repeats >= 12) { + charnum += 3 * 12; + if (LIKELY(ip <= iend-7)) { + ip += 3; } else { - bitStream >>= 16; - bitCount += 16; - } } - while ((bitStream & 3) == 3) { - n0 += 3; - bitStream >>= 2; - bitCount += 2; + bitCount -= (int)(8 * (iend - 7 - ip)); + bitCount &= 31; + ip = iend - 4; + } + bitStream = MEM_readLE32(ip) >> bitCount; + repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1; } - n0 += bitStream & 3; + charnum += 3 * repeats; + bitStream >>= 2 * repeats; + bitCount += 2 * repeats; + + /* Add the final repeat which isn't 0b11. */ + assert((bitStream & 3) < 3); + charnum += bitStream & 3; bitCount += 2; - if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); - while (charnum < n0) normalizedCounter[charnum++] = 0; - if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { + + /* This is an error, but break and return an error + * at the end, because returning out of a loop makes + * it harder for the compiler to optimize. + */ + if (charnum >= maxSV1) break; + + /* We don't need to set the normalized count to 0 + * because we already memset the whole buffer to 0. + */ + + if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { assert((bitCount >> 3) <= 3); /* For first condition to work */ ip += bitCount>>3; bitCount &= 7; - bitStream = MEM_readLE32(ip) >> bitCount; } else { - bitStream >>= 2; - } } - { int const max = (2*threshold-1) - remaining; + bitCount -= (int)(8 * (iend - 4 - ip)); + bitCount &= 31; + ip = iend - 4; + } + bitStream = MEM_readLE32(ip) >> bitCount; + } + { + int const max = (2*threshold-1) - remaining; int count; if ((bitStream & (threshold-1)) < (U32)max) { @@ -139,24 +142,43 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t } count--; /* extra accuracy */ - remaining -= count < 0 ? -count : count; /* -1 means +1 */ + /* When it matters (small blocks), this is a + * predictable branch, because we don't use -1. + */ + if (count >= 0) { + remaining -= count; + } else { + assert(count == -1); + remaining += count; + } normalizedCounter[charnum++] = (short)count; previous0 = !count; - while (remaining < threshold) { - nbBits--; - threshold >>= 1; + + assert(threshold > 1); + if (remaining < threshold) { + /* This branch can be folded into the + * threshold update condition because we + * know that threshold > 1. + */ + if (remaining <= 1) break; + nbBits = ZSTD_highbit32(remaining) + 1; + threshold = 1 << (nbBits - 1); } + if (charnum >= maxSV1) break; - if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { + if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); + bitCount &= 31; ip = iend - 4; } - bitStream = MEM_readLE32(ip) >> (bitCount & 31); - } } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */ + bitStream = MEM_readLE32(ip) >> bitCount; + } } if (remaining != 1) return ERROR(corruption_detected); + /* Only possible when there are too many zeros. */ + if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall); if (bitCount > 32) return ERROR(corruption_detected); *maxSVPtr = charnum-1; @@ -164,6 +186,43 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t return ip-istart; } +/* Avoids the FORCE_INLINE of the _body() function. */ +static size_t FSE_readNCount_body_default( + short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize) +{ + return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); +} + +#if DYNAMIC_BMI2 +BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2( + short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize) +{ + return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); +} +#endif + +size_t FSE_readNCount_bmi2( + short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize, int bmi2) +{ +#if DYNAMIC_BMI2 + if (bmi2) { + return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); + } +#endif + (void)bmi2; + return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); +} + +size_t FSE_readNCount( + short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize) +{ + return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0); +} + /*! HUF_readStats() : Read compact Huffman tree, saved by HUF_writeCTable(). @@ -175,6 +234,17 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize) +{ + U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; + return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0); +} + +FORCE_INLINE_TEMPLATE size_t +HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize, + void* workSpace, size_t wkspSize, + int bmi2) { U32 weightTotal; const BYTE* ip = (const BYTE*) src; @@ -183,7 +253,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; - /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */ + /* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) { /* special header */ oSize = iSize - 127; @@ -197,31 +267,31 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, huffWeight[n+1] = ip[n/2] & 15; } } } else { /* header compressed with FSE (normal case) */ - FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */ if (iSize+1 > srcSize) return ERROR(srcSize_wrong); - oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */ + /* max (hwSize-1) values decoded, as last one is implied */ + oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2); if (FSE_isError(oSize)) return oSize; } /* collect weight stats */ - memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); + ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); weightTotal = 0; { U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected); + if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } } if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ - { U32 const tableLog = BIT_highbit32(weightTotal) + 1; + { U32 const tableLog = ZSTD_highbit32(weightTotal) + 1; if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); *tableLogPtr = tableLog; /* determine last weight */ { U32 const total = 1 << tableLog; U32 const rest = total - weightTotal; - U32 const verif = 1 << BIT_highbit32(rest); - U32 const lastWeight = BIT_highbit32(rest) + 1; + U32 const verif = 1 << ZSTD_highbit32(rest); + U32 const lastWeight = ZSTD_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; @@ -234,3 +304,37 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, *nbSymbolsPtr = (U32)(oSize+1); return iSize+1; } + +/* Avoids the FORCE_INLINE of the _body() function. */ +static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize, + void* workSpace, size_t wkspSize) +{ + return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0); +} + +#if DYNAMIC_BMI2 +static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize, + void* workSpace, size_t wkspSize) +{ + return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1); +} +#endif + +size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize, + void* workSpace, size_t wkspSize, + int flags) +{ +#if DYNAMIC_BMI2 + if (flags & HUF_flags_bmi2) { + return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); + } +#endif + (void)flags; + return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); +} diff --git a/lib/common/error_private.c b/lib/common/error_private.c index 7c1bb67a23f..7e7f24f6756 100644 --- a/lib/common/error_private.c +++ b/lib/common/error_private.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -27,9 +27,11 @@ const char* ERR_getErrorString(ERR_enum code) case PREFIX(version_unsupported): return "Version not supported"; case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; - case PREFIX(corruption_detected): return "Corrupted block detected"; + case PREFIX(corruption_detected): return "Data corruption detected"; case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; + case PREFIX(literals_headerWrong): return "Header of Literals' block doesn't respect format specification"; case PREFIX(parameter_unsupported): return "Unsupported parameter"; + case PREFIX(parameter_combination_unsupported): return "Unsupported combination of parameters"; case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; case PREFIX(init_missing): return "Context should be init first"; case PREFIX(memory_allocation): return "Allocation error : not enough memory"; @@ -38,15 +40,23 @@ const char* ERR_getErrorString(ERR_enum code) case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; + case PREFIX(cannotProduce_uncompressedBlock): return "This mode cannot generate an uncompressed block"; + case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected"; case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; case PREFIX(dictionary_wrong): return "Dictionary mismatch"; case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; case PREFIX(srcSize_wrong): return "Src size is incorrect"; case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer"; + case PREFIX(noForwardProgress_destFull): return "Operation made no progress over multiple calls, due to output buffer being full"; + case PREFIX(noForwardProgress_inputEmpty): return "Operation made no progress over multiple calls, due to input being empty"; /* following error codes are not stable and may be removed or changed in a future version */ case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; + case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong"; + case PREFIX(srcBuffer_wrong): return "Source buffer is wrong"; + case PREFIX(sequenceProducer_failed): return "Block-level external sequence producer returned an error code"; + case PREFIX(externalSequences_invalid): return "External sequences are not valid"; case PREFIX(maxCode): default: return notErrorCode; } diff --git a/lib/common/error_private.h b/lib/common/error_private.h index 0d2fa7e34b0..9dcc8595123 100644 --- a/lib/common/error_private.h +++ b/lib/common/error_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,17 +13,13 @@ #ifndef ERROR_H_MODULE #define ERROR_H_MODULE -#if defined (__cplusplus) -extern "C" { -#endif - - /* **************************************** * Dependencies ******************************************/ -#include /* size_t */ -#include "zstd_errors.h" /* enum list */ - +#include "../zstd_errors.h" /* enum list */ +#include "compiler.h" +#include "debug.h" +#include "zstd_deps.h" /* size_t */ /* **************************************** * Compiler-specific @@ -49,7 +45,7 @@ typedef ZSTD_ErrorCode ERR_enum; /*-**************************************** * Error codes handling ******************************************/ -#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ +#undef ERROR /* already defined on Visual Studio */ #define ERROR(name) ZSTD_ERROR(name) #define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) @@ -57,6 +53,15 @@ ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } +/* check and forward error code */ +#define CHECK_V_F(e, f) \ + size_t const e = f; \ + do { \ + if (ERR_isError(e)) \ + return e; \ + } while (0) +#define CHECK_F(f) do { CHECK_V_F(_var_err__, f); } while (0) + /*-**************************************** * Error Strings @@ -69,8 +74,85 @@ ERR_STATIC const char* ERR_getErrorName(size_t code) return ERR_getErrorString(ERR_getErrorCode(code)); } -#if defined (__cplusplus) +/** + * Ignore: this is an internal helper. + * + * This is a helper function to help force C99-correctness during compilation. + * Under strict compilation modes, variadic macro arguments can't be empty. + * However, variadic function arguments can be. Using a function therefore lets + * us statically check that at least one (string) argument was passed, + * independent of the compilation flags. + */ +static INLINE_KEYWORD UNUSED_ATTR +void _force_has_format_string(const char *format, ...) { + (void)format; } -#endif + +/** + * Ignore: this is an internal helper. + * + * We want to force this function invocation to be syntactically correct, but + * we don't want to force runtime evaluation of its arguments. + */ +#define _FORCE_HAS_FORMAT_STRING(...) \ + do { \ + if (0) { \ + _force_has_format_string(__VA_ARGS__); \ + } \ + } while (0) + +#define ERR_QUOTE(str) #str + +/** + * Return the specified error if the condition evaluates to true. + * + * In debug modes, prints additional information. + * In order to do that (particularly, printing the conditional that failed), + * this can't just wrap RETURN_ERROR(). + */ +#define RETURN_ERROR_IF(cond, err, ...) \ + do { \ + if (cond) { \ + RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ + __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return ERROR(err); \ + } \ + } while (0) + +/** + * Unconditionally return the specified error. + * + * In debug modes, prints additional information. + */ +#define RETURN_ERROR(err, ...) \ + do { \ + RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ + __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return ERROR(err); \ + } while(0) + +/** + * If the provided expression evaluates to an error code, returns that error code. + * + * In debug modes, prints additional information. + */ +#define FORWARD_IF_ERROR(err, ...) \ + do { \ + size_t const err_code = (err); \ + if (ERR_isError(err_code)) { \ + RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ + __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return err_code; \ + } \ + } while(0) #endif /* ERROR_H_MODULE */ diff --git a/lib/common/fse.h b/lib/common/fse.h index 811c670bddc..b6c2a3e9ccc 100644 --- a/lib/common/fse.h +++ b/lib/common/fse.h @@ -1,41 +1,16 @@ /* ****************************************************************** - FSE : Finite State Entropy codec - Public Prototypes declaration - Copyright (C) 2013-2016, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * FSE : Finite State Entropy codec + * Public Prototypes declaration + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ - -#if defined (__cplusplus) -extern "C" { -#endif - #ifndef FSE_H #define FSE_H @@ -43,8 +18,7 @@ extern "C" { /*-***************************************** * Dependencies ******************************************/ -#include /* size_t, ptrdiff_t */ - +#include "zstd_deps.h" /* size_t, ptrdiff_t */ /*-***************************************** * FSE_PUBLIC_API : control library symbols visibility @@ -73,34 +47,6 @@ extern "C" { FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ -/*-**************************************** -* FSE simple functions -******************************************/ -/*! FSE_compress() : - Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'. - 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize). - @return : size of compressed data (<= dstCapacity). - Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! - if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. - if FSE_isError(return), compression failed (more details using FSE_getErrorName()) -*/ -FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -/*! FSE_decompress(): - Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', - into already allocated destination buffer 'dst', of size 'dstCapacity'. - @return : size of regenerated data (<= maxDstSize), - or an error code, which can be tested using FSE_isError() . - - ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!! - Why ? : making this distinction requires a header. - Header management is intentionally delegated to the user layer, which can better manage special cases. -*/ -FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, - const void* cSrc, size_t cSrcSize); - - /*-***************************************** * Tool functions ******************************************/ @@ -111,20 +57,6 @@ FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ -/*-***************************************** -* FSE advanced functions -******************************************/ -/*! FSE_compress2() : - Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog' - Both parameters can be defined as '0' to mean : use default value - @return : size of compressed data - Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!! - if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. - if FSE_isError(return), it's an error code. -*/ -FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); - - /*-***************************************** * FSE detailed API ******************************************/ @@ -157,10 +89,16 @@ FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize /*! FSE_normalizeCount(): normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). + useLowProbCount is a boolean parameter which trades off compressed size for + faster header decoding. When it is set to 1, the compressed data will be slightly + smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be + faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0 + is a good default, since header deserialization makes a big speed difference. + Otherwise, useLowProbCount=1 is a good default, since the speed difference is small. @return : tableLog, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, - const unsigned* count, size_t srcSize, unsigned maxSymbolValue); + const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount); /*! FSE_NCountWriteBound(): Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. @@ -178,8 +116,6 @@ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, /*! Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ -FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); -FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); /*! FSE_buildCTable(): Builds `ct`, which must be already allocated, using FSE_createCTable(). @@ -248,23 +184,14 @@ FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); -/*! Constructor and Destructor of FSE_DTable. - Note that its size depends on 'tableLog' */ +/*! FSE_readNCount_bmi2(): + * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise. + */ +FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter, + unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, + const void* rBuffer, size_t rBuffSize, int bmi2); + typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ -FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); -FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); - -/*! FSE_buildDTable(): - Builds 'dt', which must be already allocated, using FSE_createDTable(). - return : 0, or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); - -/*! FSE_decompress_usingDTable(): - Decompress compressed source `cSrc` of size `cSrcSize` using `dt` - into `dst` which must be already allocated. - @return : size of regenerated data (necessarily <= `dstCapacity`), - or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); /*! Tutorial : @@ -296,24 +223,22 @@ If there is an error, the function will return an error code, which can be teste #endif /* FSE_H */ + #if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) #define FSE_H_FSE_STATIC_LINKING_ONLY - -/* *** Dependency *** */ #include "bitstream.h" - /* ***************************************** * Static allocation *******************************************/ /* FSE buffer bounds */ #define FSE_NCOUNTBOUND 512 -#define FSE_BLOCKBOUND(size) (size + (size>>7)) +#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */) #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ -#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) -#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1< 12) ? (1 << (maxTableLog - 2)) : 1024) ) -size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); - -size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); -/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ - size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); /**< build a fake FSE_CTable, designed to compress always the same symbolValue */ /* FSE_buildCTable_wksp() : * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). - * `wkspSize` must be >= `(1<= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`. + * See FSE_buildCTable_wksp() for breakdown of workspace usage. */ +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */) +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)) size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); -size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits); -/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */ +#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8) +#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned)) +FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); +/**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */ -size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue); -/**< build a fake FSE_DTable, designed to always generate the same symbolValue */ - -size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog); -/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */ +#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1) +#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned)) +size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2); +/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)`. + * Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't */ typedef enum { FSE_repeat_none, /**< Cannot use the previous table */ @@ -536,20 +456,20 @@ MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, un FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* const stateTable = (const U16*)(statePtr->stateTable); U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); - BIT_addBits(bitC, statePtr->value, nbBitsOut); + BIT_addBits(bitC, (BitContainerType)statePtr->value, nbBitsOut); statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; } MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) { - BIT_addBits(bitC, statePtr->value, statePtr->stateLog); + BIT_addBits(bitC, (BitContainerType)statePtr->value, statePtr->stateLog); BIT_flushBits(bitC); } /* FSE_getMaxNbBits() : * Approximate maximum cost of a symbol, in bits. - * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) + * Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) * note 1 : assume symbolValue is valid (<= maxSymbolValue) * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue) @@ -664,6 +584,9 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) #ifndef FSE_DEFAULT_MEMORY_USAGE # define FSE_DEFAULT_MEMORY_USAGE 13 #endif +#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE) +# error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE" +#endif /*!FSE_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. @@ -697,12 +620,6 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) # error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" #endif -#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3) - +#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3) #endif /* FSE_STATIC_LINKING_ONLY */ - - -#if defined (__cplusplus) -} -#endif diff --git a/lib/common/fse_decompress.c b/lib/common/fse_decompress.c index 72bbead5bee..c8f1bb0cf23 100644 --- a/lib/common/fse_decompress.c +++ b/lib/common/fse_decompress.c @@ -1,48 +1,29 @@ /* ****************************************************************** - FSE : Finite State Entropy decoder - Copyright (C) 2013-2015, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c + * FSE : Finite State Entropy decoder + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Includes ****************************************************************/ -#include /* malloc, free, qsort */ -#include /* memcpy, memset */ +#include "debug.h" /* assert */ #include "bitstream.h" #include "compiler.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" #include "error_private.h" +#include "zstd_deps.h" /* ZSTD_memcpy */ +#include "bits.h" /* ZSTD_highbit32 */ /* ************************************************************** @@ -51,9 +32,6 @@ #define FSE_isError ERR_isError #define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ -/* check and forward error code */ -#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; } - /* ************************************************************** * Templates @@ -77,30 +55,19 @@ #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) - -/* Function templates */ -FSE_DTable* FSE_createDTable (unsigned tableLog) -{ - if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; - return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); -} - -void FSE_freeDTable (FSE_DTable* dt) -{ - free(dt); -} - -size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize) { void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); - U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; + U16* symbolNext = (U16*)workSpace; + BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1); U32 const maxSV1 = maxSymbolValue + 1; U32 const tableSize = 1 << tableLog; U32 highThreshold = tableSize-1; /* Sanity Checks */ + if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge); if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); @@ -116,13 +83,57 @@ size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned symbolNext[s] = 1; } else { if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0; - symbolNext[s] = normalizedCounter[s]; + symbolNext[s] = (U16)normalizedCounter[s]; } } } - memcpy(dt, &DTableH, sizeof(DTableH)); + ZSTD_memcpy(dt, &DTableH, sizeof(DTableH)); } /* Spread symbols */ - { U32 const tableMask = tableSize-1; + if (highThreshold == tableSize - 1) { + size_t const tableMask = tableSize-1; + size_t const step = FSE_TABLESTEP(tableSize); + /* First lay down the symbols in order. + * We use a uint64_t to lay down 8 bytes at a time. This reduces branch + * misses since small blocks generally have small table logs, so nearly + * all symbols have counts <= 8. We ensure we have 8 bytes at the end of + * our buffer to handle the over-write. + */ + { U64 const add = 0x0101010101010101ull; + size_t pos = 0; + U64 sv = 0; + U32 s; + for (s=0; stableLog = 0; - DTableH->fastMode = 0; - - cell->newState = 0; - cell->symbol = symbolValue; - cell->nbBits = 0; - - return 0; -} - - -size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) -{ - void* ptr = dt; - FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; - void* dPtr = dt + 1; - FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSV1 = tableMask+1; - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) return ERROR(GENERIC); /* min size */ - - /* Build Decoding Table */ - DTableH->tableLog = (U16)nbBits; - DTableH->fastMode = 1; - for (s=0; s= ostart); + return (size_t)(op-ostart); } +typedef struct { + short ncount[FSE_MAX_SYMBOL_VALUE + 1]; +} FSE_DecompressWksp; -size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, - const void* cSrc, size_t cSrcSize, - const FSE_DTable* dt) -{ - const void* ptr = dt; - const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; - const U32 fastMode = DTableH->fastMode; - /* select fast mode (static) */ - if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); - return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); -} - - -size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog) +FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body( + void* dst, size_t dstCapacity, + const void* cSrc, size_t cSrcSize, + unsigned maxLog, void* workSpace, size_t wkspSize, + int bmi2) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; - short counting[FSE_MAX_SYMBOL_VALUE+1]; unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; + FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace; + size_t const dtablePos = sizeof(FSE_DecompressWksp) / sizeof(FSE_DTable); + FSE_DTable* const dtable = (FSE_DTable*)workSpace + dtablePos; + + FSE_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0); + if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC); + + /* correct offset to dtable depends on this property */ + FSE_STATIC_ASSERT(sizeof(FSE_DecompressWksp) % sizeof(FSE_DTable) == 0); /* normal FSE decoding mode */ - size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); - if (FSE_isError(NCountLength)) return NCountLength; - //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */ - if (tableLog > maxLog) return ERROR(tableLog_tooLarge); - ip += NCountLength; - cSrcSize -= NCountLength; + { size_t const NCountLength = + FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2); + if (FSE_isError(NCountLength)) return NCountLength; + if (tableLog > maxLog) return ERROR(tableLog_tooLarge); + assert(NCountLength <= cSrcSize); + ip += NCountLength; + cSrcSize -= NCountLength; + } - CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) ); + if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge); + assert(sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog) <= wkspSize); + workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog); + wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog); - return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */ -} + CHECK_F( FSE_buildDTable_internal(dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) ); + { + const void* ptr = dtable; + const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; + const U32 fastMode = DTableH->fastMode; -typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; + /* select fast mode (static) */ + if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1); + return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0); + } +} -size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize) +/* Avoids the FORCE_INLINE of the _body() function. */ +static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) { - DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ - return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG); + return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0); } +#if DYNAMIC_BMI2 +BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) +{ + return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1); +} +#endif +size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2) +{ +#if DYNAMIC_BMI2 + if (bmi2) { + return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); + } +#endif + (void)bmi2; + return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); +} #endif /* FSE_COMMONDEFS_ONLY */ diff --git a/lib/common/huf.h b/lib/common/huf.h index 6b572c448d9..4b142c4f996 100644 --- a/lib/common/huf.h +++ b/lib/common/huf.h @@ -1,144 +1,44 @@ /* ****************************************************************** - huff0 huffman codec, - part of Finite State Entropy library - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * huff0 huffman codec, + * part of Finite State Entropy library + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ -#if defined (__cplusplus) -extern "C" { -#endif - #ifndef HUF_H_298734234 #define HUF_H_298734234 /* *** Dependencies *** */ -#include /* size_t */ - - -/* *** library symbols visibility *** */ -/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, - * HUF symbols remain "private" (internal symbols for library only). - * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ -#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) -# define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) -#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ -# define HUF_PUBLIC_API __declspec(dllexport) -#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) -# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ -#else -# define HUF_PUBLIC_API -#endif - - -/* ========================== */ -/* *** simple functions *** */ -/* ========================== */ - -/** HUF_compress() : - * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. - * 'dst' buffer must be already allocated. - * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). - * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. - * @return : size of compressed data (<= `dstCapacity`). - * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! - * if HUF_isError(return), compression failed (more details using HUF_getErrorName()) - */ -HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -/** HUF_decompress() : - * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', - * into already allocated buffer 'dst', of minimum size 'dstSize'. - * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. - * Note : in contrast with FSE, HUF_decompress can regenerate - * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, - * because it knows size to regenerate (originalSize). - * @return : size of regenerated data (== originalSize), - * or an error code, which can be tested using HUF_isError() - */ -HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, - const void* cSrc, size_t cSrcSize); - +#include "zstd_deps.h" /* size_t */ +#include "mem.h" /* U32 */ +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" /* *** Tool functions *** */ -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ -HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ +#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ +size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ /* Error Management */ -HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ -HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ - - -/* *** Advanced function *** */ - -/** HUF_compress2() : - * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. - * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . - * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ -HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog); - -/** HUF_compress4X_wksp() : - * Same as HUF_compress2(), but uses externally allocated `workSpace`. - * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */ -#define HUF_WORKSPACE_SIZE (6 << 10) -#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) -HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog, - void* workSpace, size_t wkspSize); - -#endif /* HUF_H_298734234 */ - -/* ****************************************************************** - * WARNING !! - * The following section contains advanced and experimental definitions - * which shall never be used in the context of a dynamic library, - * because they are not guaranteed to remain stable in the future. - * Only consider them in association with static linking. - * *****************************************************************/ -#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) -#define HUF_H_HUF_STATIC_LINKING_ONLY +unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ +const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ -/* *** Dependencies *** */ -#include "mem.h" /* U32 */ +#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */) +#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64)) /* *** Constants *** */ -#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ +#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ #define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ #define HUF_SYMBOLVALUE_MAX 255 -#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) # error "HUF_TABLELOG_MAX is too large !" #endif @@ -153,12 +53,12 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* static allocation of HUF's Compression Table */ -#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */ -#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32)) +/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */ +typedef size_t HUF_CElt; /* consider it an incomplete type */ +#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */ +#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t)) #define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ - U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \ - void* name##hv = &(name##hb); \ - HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */ + HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */ /* static allocation of HUF's DTable */ typedef U32 HUF_DTable; @@ -172,25 +72,49 @@ typedef U32 HUF_DTable; /* **************************************** * Advanced decompression functions ******************************************/ -size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -#endif -size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ -size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ -#endif +/** + * Huffman flags bitset. + * For all flags, 0 is the default value. + */ +typedef enum { + /** + * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime. + * Otherwise: Ignored. + */ + HUF_flags_bmi2 = (1 << 0), + /** + * If set: Test possible table depths to find the one that produces the smallest header + encoded size. + * If unset: Use heuristic to find the table depth. + */ + HUF_flags_optimalDepth = (1 << 1), + /** + * If set: If the previous table can encode the input, always reuse the previous table. + * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output. + */ + HUF_flags_preferRepeat = (1 << 2), + /** + * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress. + * If unset: Always histogram the entire input. + */ + HUF_flags_suspectUncompressible = (1 << 3), + /** + * If set: Don't use assembly implementations + * If unset: Allow using assembly implementations + */ + HUF_flags_disableAsm = (1 << 4), + /** + * If set: Don't use the fast decoding loop, always use the fallback decoding loop. + * If unset: Use the fast decoding loop when possible. + */ + HUF_flags_disableFast = (1 << 5) +} HUF_flags_e; /* **************************************** * HUF detailed API * ****************************************/ +#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra /*! HUF_compress() does the following: * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") @@ -203,33 +127,38 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, * For example, it's possible to compress several blocks using the same 'CTable', * or to save and regenerate 'CTable' using external methods. */ -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); -typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ -size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ -size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); -size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +unsigned HUF_minTableLog(unsigned symbolCardinality); +unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue); +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace, + size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */ +size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize); +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags); +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); typedef enum { HUF_repeat_none, /**< Cannot use the previous table */ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ } HUF_repeat; + /** HUF_compress4X_repeat() : * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. - * If preferRepeat then the old table will always be used if valid. */ + * If preferRepeat then the old table will always be used if valid. + * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); + HUF_CElt* hufTable, HUF_repeat* repeat, int flags); /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. */ -#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) +#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192) #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, @@ -244,15 +173,40 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize); +/*! HUF_readStats_wksp() : + * Same as HUF_readStats() but takes an external workspace which must be + * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE. + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. + */ +#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1) +#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned)) +size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, + U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize, + void* workspace, size_t wkspSize, + int flags); + /** HUF_readCTable() : * Loading a CTable saved with HUF_writeCTable() */ -size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights); -/** HUF_getNbBits() : +/** HUF_getNbBitsFromCTable() : * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX - * Note 1 : is not inlined, as HUF_CElt definition is private - * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */ -U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue); + * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0 + * Note 2 : is not inlined, as HUF_CElt definition is private + */ +U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue); + +typedef struct { + BYTE tableLog; + BYTE maxSymbolValue; + BYTE unused[sizeof(size_t) - 2]; +} HUF_CTableHeader; + +/** HUF_readCTableHeader() : + * @returns The header from the CTable specifying the tableLog and the maxSymbolValue. + */ +HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable); /* * HUF_decompress() does the following: @@ -278,81 +232,46 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); * a required workspace size greater than that specified in the following * macro. */ -#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10) +#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9)) #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize); -size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); -size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); -#endif - -size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif - /* ====================== */ /* single stream variants */ /* ====================== */ -size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); -size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ -size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags); /** HUF_compress1X_repeat() : * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. - * If preferRepeat then the old table will always be used if valid. */ + * If preferRepeat then the old table will always be used if valid. + * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); + HUF_CElt* hufTable, HUF_repeat* repeat, int flags); -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /**< double-symbols decoder */ #endif -size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); -size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); +/* BMI2 variants. + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. + */ +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags); #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #endif - -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags); +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif - -/* BMI2 variants. - * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. - */ -size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags); #endif -size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); -size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); -#endif /* HUF_STATIC_LINKING_ONLY */ - -#if defined (__cplusplus) -} -#endif +#endif /* HUF_H_298734234 */ diff --git a/lib/common/mem.h b/lib/common/mem.h index 5da248756ff..e66a2eaeb27 100644 --- a/lib/common/mem.h +++ b/lib/common/mem.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,15 +11,13 @@ #ifndef MEM_H_MODULE #define MEM_H_MODULE -#if defined (__cplusplus) -extern "C" { -#endif - /*-**************************************** * Dependencies ******************************************/ -#include /* size_t, ptrdiff_t */ -#include /* memcpy */ +#include /* size_t, ptrdiff_t */ +#include "compiler.h" /* __has_builtin */ +#include "debug.h" /* DEBUG_STATIC_ASSERT */ +#include "zstd_deps.h" /* ZSTD_memcpy */ /*-**************************************** @@ -28,32 +26,22 @@ extern "C" { #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ -#endif -#if defined(__GNUC__) -# define MEM_STATIC static __inline __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#elif defined(__ICCARM__) +# include #endif -#ifndef __has_builtin -# define __has_builtin(x) 0 /* compat. with non-clang compilers */ -#endif - -/* code only tested on 32 and 64 bits systems */ -#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } -MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } - - /*-************************************************************** * Basic Types *****************************************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include +# if defined(_AIX) +# include +# else +# include /* intptr_t */ +# endif typedef uint8_t BYTE; + typedef uint8_t U8; + typedef int8_t S8; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; @@ -66,6 +54,8 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size # error "this implementation requires char to be exactly 8-bit type" #endif typedef unsigned char BYTE; + typedef unsigned char U8; + typedef signed char S8; #if USHRT_MAX != 65535 # error "this implementation requires short to be exactly 16-bit type" #endif @@ -82,27 +72,64 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size typedef signed long long S64; #endif +/*-************************************************************** +* Memory I/O API +*****************************************************************/ +/*=== Static platform detection ===*/ +MEM_STATIC unsigned MEM_32bits(void); +MEM_STATIC unsigned MEM_64bits(void); +MEM_STATIC unsigned MEM_isLittleEndian(void); + +/*=== Native unaligned read/write ===*/ +MEM_STATIC U16 MEM_read16(const void* memPtr); +MEM_STATIC U32 MEM_read32(const void* memPtr); +MEM_STATIC U64 MEM_read64(const void* memPtr); +MEM_STATIC size_t MEM_readST(const void* memPtr); + +MEM_STATIC void MEM_write16(void* memPtr, U16 value); +MEM_STATIC void MEM_write32(void* memPtr, U32 value); +MEM_STATIC void MEM_write64(void* memPtr, U64 value); + +/*=== Little endian unaligned read/write ===*/ +MEM_STATIC U16 MEM_readLE16(const void* memPtr); +MEM_STATIC U32 MEM_readLE24(const void* memPtr); +MEM_STATIC U32 MEM_readLE32(const void* memPtr); +MEM_STATIC U64 MEM_readLE64(const void* memPtr); +MEM_STATIC size_t MEM_readLEST(const void* memPtr); + +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val); +MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val); +MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32); +MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64); +MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val); + +/*=== Big endian unaligned read/write ===*/ +MEM_STATIC U32 MEM_readBE32(const void* memPtr); +MEM_STATIC U64 MEM_readBE64(const void* memPtr); +MEM_STATIC size_t MEM_readBEST(const void* memPtr); + +MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32); +MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64); +MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val); + +/*=== Byteswap ===*/ +MEM_STATIC U32 MEM_swap32(U32 in); +MEM_STATIC U64 MEM_swap64(U64 in); +MEM_STATIC size_t MEM_swapST(size_t in); + /*-************************************************************** -* Memory I/O +* Memory I/O Implementation *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. +/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory: + * Method 0 : always use `memcpy()`. Safe and portable. + * Method 1 : Use compiler extension to set unaligned access. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) + * Default : method 1 if supported, else method 0 */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define MEM_FORCE_MEMORY_ACCESS 2 -# elif defined(__INTEL_COMPILER) || defined(__GNUC__) +# ifdef __GNUC__ # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif @@ -112,8 +139,24 @@ MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { +#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + return 1; +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) + return 0; +#elif defined(__clang__) && __LITTLE_ENDIAN__ + return 1; +#elif defined(__clang__) && __BIG_ENDIAN__ + return 0; +#elif defined(_MSC_VER) && (_M_X64 || _M_IX86) + return 1; +#elif defined(__DMC__) && defined(_M_IX86) + return 1; +#elif defined(__IAR_SYSTEMS_ICC__) && __LITTLE_ENDIAN__ + return 1; +#else const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; +#endif } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) @@ -131,30 +174,19 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) - __pragma( pack(push, 1) ) - typedef struct { U16 v; } unalign16; - typedef struct { U32 v; } unalign32; - typedef struct { U64 v; } unalign64; - typedef struct { size_t v; } unalignArch; - __pragma( pack(pop) ) -#else - typedef struct { U16 v; } __attribute__((packed)) unalign16; - typedef struct { U32 v; } __attribute__((packed)) unalign32; - typedef struct { U64 v; } __attribute__((packed)) unalign64; - typedef struct { size_t v; } __attribute__((packed)) unalignArch; -#endif +typedef __attribute__((aligned(1))) U16 unalign16; +typedef __attribute__((aligned(1))) U32 unalign32; +typedef __attribute__((aligned(1))) U64 unalign64; +typedef __attribute__((aligned(1))) size_t unalignArch; -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } -MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } +MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; } +MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; } +MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; } +MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; } -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; } #else @@ -163,41 +195,49 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = MEM_STATIC U16 MEM_read16(const void* memPtr) { - U16 val; memcpy(&val, memPtr, sizeof(val)); return val; + U16 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { - U32 val; memcpy(&val, memPtr, sizeof(val)); return val; + U32 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { - U64 val; memcpy(&val, memPtr, sizeof(val)); return val; + U64 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC size_t MEM_readST(const void* memPtr) { - size_t val; memcpy(&val, memPtr, sizeof(val)); return val; + size_t val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { - memcpy(memPtr, &value, sizeof(value)); + ZSTD_memcpy(memPtr, &value, sizeof(value)); } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { - memcpy(memPtr, &value, sizeof(value)); + ZSTD_memcpy(memPtr, &value, sizeof(value)); } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { - memcpy(memPtr, &value, sizeof(value)); + ZSTD_memcpy(memPtr, &value, sizeof(value)); } #endif /* MEM_FORCE_MEMORY_ACCESS */ +MEM_STATIC U32 MEM_swap32_fallback(U32 in) +{ + return ((in << 24) & 0xff000000 ) | + ((in << 8) & 0x00ff0000 ) | + ((in >> 8) & 0x0000ff00 ) | + ((in >> 24) & 0x000000ff ); +} + MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -205,23 +245,16 @@ MEM_STATIC U32 MEM_swap32(U32 in) #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ || (defined(__clang__) && __has_builtin(__builtin_bswap32)) return __builtin_bswap32(in); +#elif defined(__ICCARM__) + return __REV(in); #else - return ((in << 24) & 0xff000000 ) | - ((in << 8) & 0x00ff0000 ) | - ((in >> 8) & 0x0000ff00 ) | - ((in >> 24) & 0x000000ff ); + return MEM_swap32_fallback(in); #endif } -MEM_STATIC U64 MEM_swap64(U64 in) +MEM_STATIC U64 MEM_swap64_fallback(U64 in) { -#if defined(_MSC_VER) /* Visual Studio */ - return _byteswap_uint64(in); -#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ - || (defined(__clang__) && __has_builtin(__builtin_bswap64)) - return __builtin_bswap64(in); -#else - return ((in << 56) & 0xff00000000000000ULL) | + return ((in << 56) & 0xff00000000000000ULL) | ((in << 40) & 0x00ff000000000000ULL) | ((in << 24) & 0x0000ff0000000000ULL) | ((in << 8) & 0x000000ff00000000ULL) | @@ -229,6 +262,17 @@ MEM_STATIC U64 MEM_swap64(U64 in) ((in >> 24) & 0x0000000000ff0000ULL) | ((in >> 40) & 0x000000000000ff00ULL) | ((in >> 56) & 0x00000000000000ffULL); +} + +MEM_STATIC U64 MEM_swap64(U64 in) +{ +#if defined(_MSC_VER) /* Visual Studio */ + return _byteswap_uint64(in); +#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ + || (defined(__clang__) && __has_builtin(__builtin_bswap64)) + return __builtin_bswap64(in); +#else + return MEM_swap64_fallback(in); #endif } @@ -265,7 +309,7 @@ MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) MEM_STATIC U32 MEM_readLE24(const void* memPtr) { - return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); + return (U32)MEM_readLE16(memPtr) + ((U32)(((const BYTE*)memPtr)[2]) << 16); } MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) @@ -372,9 +416,7 @@ MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) MEM_writeBE64(memPtr, (U64)val); } - -#if defined (__cplusplus) -} -#endif +/* code only tested on 32 and 64 bits systems */ +MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } #endif /* MEM_H_MODULE */ diff --git a/lib/common/pool.c b/lib/common/pool.c index 7a829454328..3adcefc9a50 100644 --- a/lib/common/pool.c +++ b/lib/common/pool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,9 +10,9 @@ /* ====== Dependencies ======= */ -#include /* size_t */ +#include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */ +#include "zstd_deps.h" /* size_t */ #include "debug.h" /* assert */ -#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */ #include "pool.h" /* ====== Compiler specifics ====== */ @@ -86,7 +86,7 @@ static void* POOL_thread(void* opaque) { { POOL_job const job = ctx->queue[ctx->queueHead]; ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; ctx->numThreadsBusy++; - ctx->queueEmpty = ctx->queueHead == ctx->queueTail; + ctx->queueEmpty = (ctx->queueHead == ctx->queueTail); /* Unlock the mutex, signal a pusher, and run the job */ ZSTD_pthread_cond_signal(&ctx->queuePushCond); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); @@ -96,43 +96,51 @@ static void* POOL_thread(void* opaque) { /* If the intended queue size was 0, signal after finishing job */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); ctx->numThreadsBusy--; - if (ctx->queueSize == 1) { - ZSTD_pthread_cond_signal(&ctx->queuePushCond); - } + ZSTD_pthread_cond_signal(&ctx->queuePushCond); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); } } /* for (;;) */ assert(0); /* Unreachable */ } +/* ZSTD_createThreadPool() : public access point */ +POOL_ctx* ZSTD_createThreadPool(size_t numThreads) { + return POOL_create (numThreads, 0); +} + POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); } POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, - ZSTD_customMem customMem) { + ZSTD_customMem customMem) +{ POOL_ctx* ctx; /* Check parameters */ if (!numThreads) { return NULL; } /* Allocate the context and zero initialize */ - ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem); + ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem); if (!ctx) { return NULL; } /* Initialize the job queue. * It needs one extra space since one space is wasted to differentiate * empty and full queues. */ ctx->queueSize = queueSize + 1; - ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem); + ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem); ctx->queueHead = 0; ctx->queueTail = 0; ctx->numThreadsBusy = 0; ctx->queueEmpty = 1; - (void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL); - (void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL); - (void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL); + { + int error = 0; + error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL); + error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL); + error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL); + if (error) { POOL_free(ctx); return NULL; } + } ctx->shutdown = 0; /* Allocate space for the thread handles */ - ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem); + ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); ctx->threadCapacity = 0; ctx->customMem = customMem; /* Check for errors */ @@ -165,7 +173,7 @@ static void POOL_join(POOL_ctx* ctx) { /* Join all of the threads */ { size_t i; for (i = 0; i < ctx->threadCapacity; ++i) { - ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */ + ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */ } } } @@ -175,14 +183,27 @@ void POOL_free(POOL_ctx *ctx) { ZSTD_pthread_mutex_destroy(&ctx->queueMutex); ZSTD_pthread_cond_destroy(&ctx->queuePushCond); ZSTD_pthread_cond_destroy(&ctx->queuePopCond); - ZSTD_free(ctx->queue, ctx->customMem); - ZSTD_free(ctx->threads, ctx->customMem); - ZSTD_free(ctx, ctx->customMem); + ZSTD_customFree(ctx->queue, ctx->customMem); + ZSTD_customFree(ctx->threads, ctx->customMem); + ZSTD_customFree(ctx, ctx->customMem); } +/*! POOL_joinJobs() : + * Waits for all queued jobs to finish executing. + */ +void POOL_joinJobs(POOL_ctx* ctx) { + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) { + ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); + } + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); +} +void ZSTD_freeThreadPool (ZSTD_threadPool* pool) { + POOL_free (pool); +} -size_t POOL_sizeof(POOL_ctx *ctx) { +size_t POOL_sizeof(const POOL_ctx* ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ return sizeof(*ctx) + ctx->queueSize * sizeof(POOL_job) @@ -199,11 +220,11 @@ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads) return 0; } /* numThreads > threadCapacity */ - { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); + { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); if (!threadPool) return 1; /* replace existing thread pool */ - memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool)); - ZSTD_free(ctx->threads, ctx->customMem); + ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(ZSTD_pthread_t)); + ZSTD_customFree(ctx->threads, ctx->customMem); ctx->threads = threadPool; /* Initialize additional threads */ { size_t threadId; @@ -247,9 +268,12 @@ static int isQueueFull(POOL_ctx const* ctx) { } -static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) +static void +POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) { - POOL_job const job = {function, opaque}; + POOL_job job; + job.function = function; + job.opaque = opaque; assert(ctx != NULL); if (ctx->shutdown) return; @@ -297,21 +321,28 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) struct POOL_ctx_s { int dummy; }; -static POOL_ctx g_ctx; +static POOL_ctx g_poolCtx; POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); } -POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { +POOL_ctx* +POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) +{ (void)numThreads; (void)queueSize; (void)customMem; - return &g_ctx; + return &g_poolCtx; } void POOL_free(POOL_ctx* ctx) { - assert(!ctx || ctx == &g_ctx); + assert(!ctx || ctx == &g_poolCtx); + (void)ctx; +} + +void POOL_joinJobs(POOL_ctx* ctx){ + assert(!ctx || ctx == &g_poolCtx); (void)ctx; } @@ -331,9 +362,9 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { return 1; } -size_t POOL_sizeof(POOL_ctx* ctx) { +size_t POOL_sizeof(const POOL_ctx* ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ - assert(ctx == &g_ctx); + assert(ctx == &g_poolCtx); return sizeof(*ctx); } diff --git a/lib/common/pool.h b/lib/common/pool.h index 458d37f13c3..f39b7f1eb99 100644 --- a/lib/common/pool.h +++ b/lib/common/pool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,14 +11,10 @@ #ifndef POOL_H #define POOL_H -#if defined (__cplusplus) -extern "C" { -#endif - -#include /* size_t */ +#include "zstd_deps.h" #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */ -#include "zstd.h" +#include "../zstd.h" typedef struct POOL_ctx_s POOL_ctx; @@ -38,10 +34,16 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, */ void POOL_free(POOL_ctx* ctx); + +/*! POOL_joinJobs() : + * Waits for all queued jobs to finish executing. + */ +void POOL_joinJobs(POOL_ctx* ctx); + /*! POOL_resize() : * Expands or shrinks pool's number of threads. * This is more efficient than releasing + creating a new context, - * since it tries to preserve and re-use existing threads. + * since it tries to preserve and reuse existing threads. * `numThreads` must be at least 1. * @return : 0 when resize was successful, * !0 (typically 1) if there is an error. @@ -53,7 +55,7 @@ int POOL_resize(POOL_ctx* ctx, size_t numThreads); * @return threadpool memory usage * note : compatible with NULL (returns 0 in this case) */ -size_t POOL_sizeof(POOL_ctx* ctx); +size_t POOL_sizeof(const POOL_ctx* ctx); /*! POOL_function : * The function type that can be added to a thread pool. @@ -70,15 +72,10 @@ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); /*! POOL_tryAdd() : - * Add the job `function(opaque)` to thread pool _if_ a worker is available. + * Add the job `function(opaque)` to thread pool _if_ a queue slot is available. * Returns immediately even if not (does not block). * @return : 1 if successful, 0 if not. */ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque); - -#if defined (__cplusplus) -} -#endif - #endif diff --git a/lib/common/portability_macros.h b/lib/common/portability_macros.h new file mode 100644 index 00000000000..bcca634e419 --- /dev/null +++ b/lib/common/portability_macros.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_PORTABILITY_MACROS_H +#define ZSTD_PORTABILITY_MACROS_H + +/** + * This header file contains macro definitions to support portability. + * This header is shared between C and ASM code, so it MUST only + * contain macro definitions. It MUST not contain any C code. + * + * This header ONLY defines macros to detect platforms/feature support. + * + */ + + +/* compat. with non-clang compilers */ +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif + +/* compat. with non-clang compilers */ +#ifndef __has_builtin +# define __has_builtin(x) 0 +#endif + +/* compat. with non-clang compilers */ +#ifndef __has_feature +# define __has_feature(x) 0 +#endif + +/* detects whether we are being compiled under msan */ +#ifndef ZSTD_MEMORY_SANITIZER +# if __has_feature(memory_sanitizer) +# define ZSTD_MEMORY_SANITIZER 1 +# else +# define ZSTD_MEMORY_SANITIZER 0 +# endif +#endif + +/* detects whether we are being compiled under asan */ +#ifndef ZSTD_ADDRESS_SANITIZER +# if __has_feature(address_sanitizer) +# define ZSTD_ADDRESS_SANITIZER 1 +# elif defined(__SANITIZE_ADDRESS__) +# define ZSTD_ADDRESS_SANITIZER 1 +# else +# define ZSTD_ADDRESS_SANITIZER 0 +# endif +#endif + +/* detects whether we are being compiled under dfsan */ +#ifndef ZSTD_DATAFLOW_SANITIZER +# if __has_feature(dataflow_sanitizer) +# define ZSTD_DATAFLOW_SANITIZER 1 +# else +# define ZSTD_DATAFLOW_SANITIZER 0 +# endif +#endif + +/* Mark the internal assembly functions as hidden */ +#ifdef __ELF__ +# define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func +#elif defined(__APPLE__) +# define ZSTD_HIDE_ASM_FUNCTION(func) .private_extern func +#else +# define ZSTD_HIDE_ASM_FUNCTION(func) +#endif + +/* Compile time determination of BMI2 support */ +#ifndef STATIC_BMI2 +# if defined(__BMI2__) +# define STATIC_BMI2 1 +# elif defined(_MSC_VER) && defined(__AVX2__) +# define STATIC_BMI2 1 /* MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2 */ +# endif +#endif + +#ifndef STATIC_BMI2 +# define STATIC_BMI2 0 +#endif + +/* Enable runtime BMI2 dispatch based on the CPU. + * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. + */ +#ifndef DYNAMIC_BMI2 +# if ((defined(__clang__) && __has_attribute(__target__)) \ + || (defined(__GNUC__) \ + && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ + && (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)) \ + && !defined(__BMI2__) +# define DYNAMIC_BMI2 1 +# else +# define DYNAMIC_BMI2 0 +# endif +#endif + +/** + * Only enable assembly for GNU C compatible compilers, + * because other platforms may not support GAS assembly syntax. + * + * Only enable assembly for Linux / MacOS / Win32, other platforms may + * work, but they haven't been tested. This could likely be + * extended to BSD systems. + * + * Disable assembly when MSAN is enabled, because MSAN requires + * 100% of code to be instrumented to work. + */ +#if defined(__GNUC__) +# if defined(__linux__) || defined(__linux) || defined(__APPLE__) || defined(_WIN32) +# if ZSTD_MEMORY_SANITIZER +# define ZSTD_ASM_SUPPORTED 0 +# elif ZSTD_DATAFLOW_SANITIZER +# define ZSTD_ASM_SUPPORTED 0 +# else +# define ZSTD_ASM_SUPPORTED 1 +# endif +# else +# define ZSTD_ASM_SUPPORTED 0 +# endif +#else +# define ZSTD_ASM_SUPPORTED 0 +#endif + +/** + * Determines whether we should enable assembly for x86-64 + * with BMI2. + * + * Enable if all of the following conditions hold: + * - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM + * - Assembly is supported + * - We are compiling for x86-64 and either: + * - DYNAMIC_BMI2 is enabled + * - BMI2 is supported at compile time + */ +#if !defined(ZSTD_DISABLE_ASM) && \ + ZSTD_ASM_SUPPORTED && \ + defined(__x86_64__) && \ + (DYNAMIC_BMI2 || defined(__BMI2__)) +# define ZSTD_ENABLE_ASM_X86_64_BMI2 1 +#else +# define ZSTD_ENABLE_ASM_X86_64_BMI2 0 +#endif + +/* + * For x86 ELF targets, add .note.gnu.property section for Intel CET in + * assembly sources when CET is enabled. + * + * Additionally, any function that may be called indirectly must begin + * with ZSTD_CET_ENDBRANCH. + */ +#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \ + && defined(__has_include) +# if __has_include() +# include +# define ZSTD_CET_ENDBRANCH _CET_ENDBR +# endif +#endif + +#ifndef ZSTD_CET_ENDBRANCH +# define ZSTD_CET_ENDBRANCH +#endif + +/** + * ZSTD_IS_DETERMINISTIC_BUILD must be set to 0 if any compilation macro is + * active that impacts the compressed output. + * + * NOTE: ZSTD_MULTITHREAD is allowed to be set or unset. + */ +#if defined(ZSTD_CLEVEL_DEFAULT) \ + || defined(ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) +# define ZSTD_IS_DETERMINISTIC_BUILD 0 +#else +# define ZSTD_IS_DETERMINISTIC_BUILD 1 +#endif + +#endif /* ZSTD_PORTABILITY_MACROS_H */ diff --git a/lib/common/threading.c b/lib/common/threading.c index f3d4fa84184..1d5c97d3cea 100644 --- a/lib/common/threading.c +++ b/lib/common/threading.c @@ -2,68 +2,126 @@ * Copyright (c) 2016 Tino Reichardt * All rights reserved. * + * You can contact the author at: + * - zstdmt source repository: https://github.com/mcmilk/zstdmt + * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). - * - * You can contact the author at: - * - zstdmt source repository: https://github.com/mcmilk/zstdmt + * You may select, at your option, one of the above-listed licenses. */ /** * This file will hold wrapper for systems, which do not support pthreads */ +#include "threading.h" + /* create fake symbol to avoid empty translation unit warning */ int g_ZSTD_threading_useless_symbol; #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** - * Windows minimalist Pthread Wrapper, based on : - * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + * Windows minimalist Pthread Wrapper */ /* === Dependencies === */ #include #include -#include "threading.h" /* === Implementation === */ +typedef struct { + void* (*start_routine)(void*); + void* arg; + int initialized; + ZSTD_pthread_cond_t initialized_cond; + ZSTD_pthread_mutex_t initialized_mutex; +} ZSTD_thread_params_t; + static unsigned __stdcall worker(void *arg) { - ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; - thread->arg = thread->start_routine(thread->arg); + void* (*start_routine)(void*); + void* thread_arg; + + /* Initialized thread_arg and start_routine and signal main thread that we don't need it + * to wait any longer. + */ + { + ZSTD_thread_params_t* thread_param = (ZSTD_thread_params_t*)arg; + thread_arg = thread_param->arg; + start_routine = thread_param->start_routine; + + /* Signal main thread that we are running and do not depend on its memory anymore */ + ZSTD_pthread_mutex_lock(&thread_param->initialized_mutex); + thread_param->initialized = 1; + ZSTD_pthread_cond_signal(&thread_param->initialized_cond); + ZSTD_pthread_mutex_unlock(&thread_param->initialized_mutex); + } + + start_routine(thread_arg); + return 0; } int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg) { + ZSTD_thread_params_t thread_param; (void)unused; - thread->arg = arg; - thread->start_routine = start_routine; - thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); - if (!thread->handle) + if (thread==NULL) return -1; + *thread = NULL; + + thread_param.start_routine = start_routine; + thread_param.arg = arg; + thread_param.initialized = 0; + + /* Setup thread initialization synchronization */ + if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) { + /* Should never happen on Windows */ + return -1; + } + if(ZSTD_pthread_mutex_init(&thread_param.initialized_mutex, NULL)) { + /* Should never happen on Windows */ + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); + return -1; + } + + /* Spawn thread */ + *thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL); + if (*thread==NULL) { + ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex); + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); return errno; - else - return 0; + } + + /* Wait for thread to be initialized */ + ZSTD_pthread_mutex_lock(&thread_param.initialized_mutex); + while(!thread_param.initialized) { + ZSTD_pthread_cond_wait(&thread_param.initialized_cond, &thread_param.initialized_mutex); + } + ZSTD_pthread_mutex_unlock(&thread_param.initialized_mutex); + ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex); + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); + + return 0; } -int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) +int ZSTD_pthread_join(ZSTD_pthread_t thread) { DWORD result; - if (!thread.handle) return 0; + if (!thread) return 0; + + result = WaitForSingleObject(thread, INFINITE); + CloseHandle(thread); - result = WaitForSingleObject(thread.handle, INFINITE); switch (result) { case WAIT_OBJECT_0: - if (value_ptr) *value_ptr = thread.arg; return 0; case WAIT_ABANDONED: return EINVAL; @@ -73,3 +131,66 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) } #endif /* ZSTD_MULTITHREAD */ + +#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32) + +#define ZSTD_DEPS_NEED_MALLOC +#include "zstd_deps.h" + +int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr) +{ + assert(mutex != NULL); + *mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t)); + if (!*mutex) + return 1; + { + int const ret = pthread_mutex_init(*mutex, attr); + if (ret != 0) { + ZSTD_free(*mutex); + *mutex = NULL; + } + return ret; + } +} + +int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex) +{ + assert(mutex != NULL); + if (!*mutex) + return 0; + { + int const ret = pthread_mutex_destroy(*mutex); + ZSTD_free(*mutex); + return ret; + } +} + +int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr) +{ + assert(cond != NULL); + *cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t)); + if (!*cond) + return 1; + { + int const ret = pthread_cond_init(*cond, attr); + if (ret != 0) { + ZSTD_free(*cond); + *cond = NULL; + } + return ret; + } +} + +int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond) +{ + assert(cond != NULL); + if (!*cond) + return 0; + { + int const ret = pthread_cond_destroy(*cond); + ZSTD_free(*cond); + return ret; + } +} + +#endif diff --git a/lib/common/threading.h b/lib/common/threading.h index d806c89d01c..e123cdf14a3 100644 --- a/lib/common/threading.h +++ b/lib/common/threading.h @@ -2,26 +2,24 @@ * Copyright (c) 2016 Tino Reichardt * All rights reserved. * + * You can contact the author at: + * - zstdmt source repository: https://github.com/mcmilk/zstdmt + * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). - * - * You can contact the author at: - * - zstdmt source repository: https://github.com/mcmilk/zstdmt + * You may select, at your option, one of the above-listed licenses. */ #ifndef THREADING_H_938743 #define THREADING_H_938743 -#if defined (__cplusplus) -extern "C" { -#endif +#include "debug.h" #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** - * Windows minimalist Pthread Wrapper, based on : - * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + * Windows minimalist Pthread Wrapper */ #ifdef WINVER # undef WINVER @@ -59,26 +57,23 @@ extern "C" { #define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) /* ZSTD_pthread_create() and ZSTD_pthread_join() */ -typedef struct { - HANDLE handle; - void* (*start_routine)(void*); - void* arg; -} ZSTD_pthread_t; +typedef HANDLE ZSTD_pthread_t; int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg); -int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); +int ZSTD_pthread_join(ZSTD_pthread_t thread); /** * add here more wrappers as required */ - -#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ +#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ /* === POSIX Systems === */ # include +#if DEBUGLEVEL < 1 + #define ZSTD_pthread_mutex_t pthread_mutex_t #define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b)) #define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a)) @@ -94,7 +89,34 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) -#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) +#define ZSTD_pthread_join(a) pthread_join((a),NULL) + +#else /* DEBUGLEVEL >= 1 */ + +/* Debug implementation of threading. + * In this implementation we use pointers for mutexes and condition variables. + * This way, if we forget to init/destroy them the program will crash or ASAN + * will report leaks. + */ + +#define ZSTD_pthread_mutex_t pthread_mutex_t* +int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr); +int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex); +#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a)) +#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a)) + +#define ZSTD_pthread_cond_t pthread_cond_t* +int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr); +int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond); +#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b)) +#define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a)) +#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a)) + +#define ZSTD_pthread_t pthread_t +#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) +#define ZSTD_pthread_join(a) pthread_join((a),NULL) + +#endif #else /* ZSTD_MULTITHREAD not defined */ /* No multithreading support */ @@ -116,8 +138,5 @@ typedef int ZSTD_pthread_cond_t; #endif /* ZSTD_MULTITHREAD */ -#if defined (__cplusplus) -} -#endif #endif /* THREADING_H_938743 */ diff --git a/lib/common/xxhash.c b/lib/common/xxhash.c index 30599aaae41..052cd522824 100644 --- a/lib/common/xxhash.c +++ b/lib/common/xxhash.c @@ -1,876 +1,18 @@ /* -* xxHash - Fast Hash algorithm -* Copyright (C) 2012-2016, Yann Collet -* -* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are -* met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following disclaimer -* in the documentation and/or other materials provided with the -* distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -* -* You can contact the author at : -* - xxHash homepage: http://www.xxhash.com -* - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - - -/* ************************************* -* Tuning parameters -***************************************/ -/*!XXH_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. - * It can generate buggy code on targets which do not support unaligned memory accesses. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://stackoverflow.com/a/32095106/646947 for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define XXH_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -/*!XXH_ACCEPT_NULL_INPUT_POINTER : - * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. - * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. - * By default, this option is disabled. To enable it, uncomment below define : - */ -/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ - -/*!XXH_FORCE_NATIVE_FORMAT : - * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. - * Results are therefore identical for little-endian and big-endian CPU. - * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. - * Should endian-independence be of no importance for your application, you may set the #define below to 1, - * to improve speed for Big-endian CPU. - * This option has no impact on Little_Endian CPU. + * xxHash - Extremely Fast Hash algorithm + * Copyright (c) Yann Collet - Meta Platforms, Inc + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ -#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ -# define XXH_FORCE_NATIVE_FORMAT 0 -#endif -/*!XXH_FORCE_ALIGN_CHECK : - * This is a minor performance trick, only useful with lots of very small keys. - * It means : check for aligned/unaligned input. - * The check costs one initial branch per hash; set to 0 when the input data - * is guaranteed to be aligned. +/* + * xxhash.c instantiates functions defined in xxhash.h */ -#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif -#endif - -/* ************************************* -* Includes & Memory related functions -***************************************/ -/* Modify the local functions below should you wish to use some other memory routines */ -/* for malloc(), free() */ -#include -#include /* size_t */ -static void* XXH_malloc(size_t s) { return malloc(s); } -static void XXH_free (void* p) { free(p); } -/* for memcpy() */ -#include -static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } +#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */ +#define XXH_IMPLEMENTATION /* access definitions */ -#ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY -#endif #include "xxhash.h" - - -/* ************************************* -* Compiler Specific Options -***************************************/ -#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# define INLINE_KEYWORD inline -#else -# define INLINE_KEYWORD -#endif - -#if defined(__GNUC__) -# define FORCE_INLINE_ATTR __attribute__((always_inline)) -#elif defined(_MSC_VER) -# define FORCE_INLINE_ATTR __forceinline -#else -# define FORCE_INLINE_ATTR -#endif - -#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR - - -#ifdef _MSC_VER -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -#endif - - -/* ************************************* -* Basic Types -***************************************/ -#ifndef MEM_MODULE -# define MEM_MODULE -# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; -# else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; - typedef signed int S32; - typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ -# endif -#endif - - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } -static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign; - -static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -#else - -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ - -static U32 XXH_read32(const void* memPtr) -{ - U32 val; - memcpy(&val, memPtr, sizeof(val)); - return val; -} - -static U64 XXH_read64(const void* memPtr) -{ - U64 val; - memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - - -/* **************************************** -* Compiler-specific Functions and Macros -******************************************/ -#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) - -/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ -#if defined(_MSC_VER) -# define XXH_rotl32(x,r) _rotl(x,r) -# define XXH_rotl64(x,r) _rotl64(x,r) -#else -# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) -# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) -#endif - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap32 _byteswap_ulong -# define XXH_swap64 _byteswap_uint64 -#elif GCC_VERSION >= 403 -# define XXH_swap32 __builtin_bswap32 -# define XXH_swap64 __builtin_bswap64 -#else -static U32 XXH_swap32 (U32 x) -{ - return ((x << 24) & 0xff000000 ) | - ((x << 8) & 0x00ff0000 ) | - ((x >> 8) & 0x0000ff00 ) | - ((x >> 24) & 0x000000ff ); -} -static U64 XXH_swap64 (U64 x) -{ - return ((x << 56) & 0xff00000000000000ULL) | - ((x << 40) & 0x00ff000000000000ULL) | - ((x << 24) & 0x0000ff0000000000ULL) | - ((x << 8) & 0x000000ff00000000ULL) | - ((x >> 8) & 0x00000000ff000000ULL) | - ((x >> 24) & 0x0000000000ff0000ULL) | - ((x >> 40) & 0x000000000000ff00ULL) | - ((x >> 56) & 0x00000000000000ffULL); -} -#endif - - -/* ************************************* -* Architecture Macros -***************************************/ -typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; - -/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ -#ifndef XXH_CPU_LITTLE_ENDIAN - static const int g_one = 1; -# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) -#endif - - -/* *************************** -* Memory reads -*****************************/ -typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; - -FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); - else - return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); -} - -FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE32_align(ptr, endian, XXH_unaligned); -} - -static U32 XXH_readBE32(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); -} - -FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); - else - return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); -} - -FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE64_align(ptr, endian, XXH_unaligned); -} - -static U64 XXH_readBE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); -} - - -/* ************************************* -* Macros -***************************************/ -#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ - - -/* ************************************* -* Constants -***************************************/ -static const U32 PRIME32_1 = 2654435761U; -static const U32 PRIME32_2 = 2246822519U; -static const U32 PRIME32_3 = 3266489917U; -static const U32 PRIME32_4 = 668265263U; -static const U32 PRIME32_5 = 374761393U; - -static const U64 PRIME64_1 = 11400714785074694791ULL; -static const U64 PRIME64_2 = 14029467366897019727ULL; -static const U64 PRIME64_3 = 1609587929392839161ULL; -static const U64 PRIME64_4 = 9650029242287828579ULL; -static const U64 PRIME64_5 = 2870177450012600261ULL; - -XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } - - -/* ************************** -* Utils -****************************/ -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); -} - -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); -} - - -/* *************************** -* Simple Hash Functions -*****************************/ - -static U32 XXH32_round(U32 seed, U32 input) -{ - seed += input * PRIME32_2; - seed = XXH_rotl32(seed, 13); - seed *= PRIME32_1; - return seed; -} - -FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U32 h32; -#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)16; - } -#endif - - if (len>=16) { - const BYTE* const limit = bEnd - 16; - U32 v1 = seed + PRIME32_1 + PRIME32_2; - U32 v2 = seed + PRIME32_2; - U32 v3 = seed + 0; - U32 v4 = seed - PRIME32_1; - - do { - v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; - v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; - v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; - v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; - } while (p<=limit); - - h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); - } else { - h32 = seed + PRIME32_5; - } - - h32 += (U32) len; - - while (p+4<=bEnd) { - h32 += XXH_get32bits(p) * PRIME32_3; - h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; - p+=4; - } - - while (p> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - - return h32; -} - - -XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH32_CREATESTATE_STATIC(state); - XXH32_reset(state, seed); - XXH32_update(state, input, len); - return XXH32_digest(state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - - -static U64 XXH64_round(U64 acc, U64 input) -{ - acc += input * PRIME64_2; - acc = XXH_rotl64(acc, 31); - acc *= PRIME64_1; - return acc; -} - -static U64 XXH64_mergeRound(U64 acc, U64 val) -{ - val = XXH64_round(0, val); - acc ^= val; - acc = acc * PRIME64_1 + PRIME64_4; - return acc; -} - -FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - U64 h64; -#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)32; - } -#endif - - if (len>=32) { - const BYTE* const limit = bEnd - 32; - U64 v1 = seed + PRIME64_1 + PRIME64_2; - U64 v2 = seed + PRIME64_2; - U64 v3 = seed + 0; - U64 v4 = seed - PRIME64_1; - - do { - v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; - v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; - v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; - v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; - } while (p<=limit); - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - - } else { - h64 = seed + PRIME64_5; - } - - h64 += (U64) len; - - while (p+8<=bEnd) { - U64 const k1 = XXH64_round(0, XXH_get64bits(p)); - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; - p+=8; - } - - if (p+4<=bEnd) { - h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - p+=4; - } - - while (p> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - - return h64; -} - - -XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH64_CREATESTATE_STATIC(state); - XXH64_reset(state, seed); - XXH64_update(state, input, len); - return XXH64_digest(state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - - -/* ************************************************** -* Advanced Hash Functions -****************************************************/ - -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) -{ - return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) -{ - return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - - -/*** Hash feed ***/ - -XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) -{ - XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ - state.v1 = seed + PRIME32_1 + PRIME32_2; - state.v2 = seed + PRIME32_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME32_1; - memcpy(statePtr, &state, sizeof(state)); - return XXH_OK; -} - - -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) -{ - XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ - state.v1 = seed + PRIME64_1 + PRIME64_2; - state.v2 = seed + PRIME64_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME64_1; - memcpy(statePtr, &state, sizeof(state)); - return XXH_OK; -} - - -FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (input==NULL) return XXH_ERROR; -#endif - - state->total_len_32 += (unsigned)len; - state->large_len |= (len>=16) | (state->total_len_32>=16); - - if (state->memsize + len < 16) { /* fill in tmp buffer */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); - state->memsize += (unsigned)len; - return XXH_OK; - } - - if (state->memsize) { /* some data left from previous update */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); - { const U32* p32 = state->mem32; - state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; - state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; - state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; - state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; - } - p += 16-state->memsize; - state->memsize = 0; - } - - if (p <= bEnd-16) { - const BYTE* const limit = bEnd - 16; - U32 v1 = state->v1; - U32 v2 = state->v2; - U32 v3 = state->v3; - U32 v4 = state->v4; - - do { - v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; - v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; - v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; - v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH32_update_endian(state_in, input, len, XXH_bigEndian); -} - - - -FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) -{ - const BYTE * p = (const BYTE*)state->mem32; - const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; - U32 h32; - - if (state->large_len) { - h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); - } else { - h32 = state->v3 /* == seed */ + PRIME32_5; - } - - h32 += state->total_len_32; - - while (p+4<=bEnd) { - h32 += XXH_readLE32(p, endian) * PRIME32_3; - h32 = XXH_rotl32(h32, 17) * PRIME32_4; - p+=4; - } - - while (p> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - - return h32; -} - - -XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_digest_endian(state_in, XXH_littleEndian); - else - return XXH32_digest_endian(state_in, XXH_bigEndian); -} - - - -/* **** XXH64 **** */ - -FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (input==NULL) return XXH_ERROR; -#endif - - state->total_len += len; - - if (state->memsize + len < 32) { /* fill in tmp buffer */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); - state->memsize += (U32)len; - return XXH_OK; - } - - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); - state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); - state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); - state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); - p += 32-state->memsize; - state->memsize = 0; - } - - if (p+32 <= bEnd) { - const BYTE* const limit = bEnd - 32; - U64 v1 = state->v1; - U64 v2 = state->v2; - U64 v3 = state->v3; - U64 v4 = state->v4; - - do { - v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; - v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; - v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; - v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH64_update_endian(state_in, input, len, XXH_bigEndian); -} - - - -FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) -{ - const BYTE * p = (const BYTE*)state->mem64; - const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; - U64 h64; - - if (state->total_len >= 32) { - U64 const v1 = state->v1; - U64 const v2 = state->v2; - U64 const v3 = state->v3; - U64 const v4 = state->v4; - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - } else { - h64 = state->v3 + PRIME64_5; - } - - h64 += (U64) state->total_len; - - while (p+8<=bEnd) { - U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; - p+=8; - } - - if (p+4<=bEnd) { - h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - p+=4; - } - - while (p> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - - return h64; -} - - -XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_digest_endian(state_in, XXH_littleEndian); - else - return XXH64_digest_endian(state_in, XXH_bigEndian); -} - - -/* ************************** -* Canonical representation -****************************/ - -/*! Default XXH result types are basic unsigned 32 and 64 bits. -* The canonical representation follows human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. -*/ - -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); - memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) -{ - return XXH_readBE32(src); -} - -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) -{ - return XXH_readBE64(src); -} diff --git a/lib/common/xxhash.h b/lib/common/xxhash.h index 9bad1f59f63..b6af402fdf4 100644 --- a/lib/common/xxhash.h +++ b/lib/common/xxhash.h @@ -1,100 +1,319 @@ /* - xxHash - Extremely Fast Hash algorithm - Header File - Copyright (C) 2012-2016, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - -/* Notice extracted from xxHash homepage : - -xxHash is an extremely fast Hash algorithm, running at RAM speed limits. -It also successfully passes all tests from the SMHasher suite. - -Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) - -Name Speed Q.Score Author -xxHash 5.4 GB/s 10 -CrapWow 3.2 GB/s 2 Andrew -MumurHash 3a 2.7 GB/s 10 Austin Appleby -SpookyHash 2.0 GB/s 10 Bob Jenkins -SBox 1.4 GB/s 9 Bret Mulvey -Lookup3 1.2 GB/s 9 Bob Jenkins -SuperFastHash 1.2 GB/s 1 Paul Hsieh -CityHash64 1.05 GB/s 10 Pike & Alakuijala -FNV 0.55 GB/s 5 Fowler, Noll, Vo -CRC32 0.43 GB/s 9 -MD5-32 0.33 GB/s 10 Ronald L. Rivest -SHA1-32 0.28 GB/s 10 - -Q.Score is a measure of quality of the hash function. -It depends on successfully passing SMHasher test set. -10 is a perfect score. - -A 64-bits version, named XXH64, is available since r35. -It offers much better speed, but for 64-bits applications only. -Name Speed on 64 bits Speed on 32 bits -XXH64 13.8 GB/s 1.9 GB/s -XXH32 6.8 GB/s 6.0 GB/s -*/ + * xxHash - Extremely Fast Hash algorithm + * Header File + * Copyright (c) Yann Collet - Meta Platforms, Inc + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ -#if defined (__cplusplus) -extern "C" { +/* Local adaptations for Zstandard */ + +#ifndef XXH_NO_XXH3 +# define XXH_NO_XXH3 #endif -#ifndef XXHASH_H_5627135585666179 -#define XXHASH_H_5627135585666179 1 +#ifndef XXH_NAMESPACE +# define XXH_NAMESPACE ZSTD_ +#endif +/*! + * @mainpage xxHash + * + * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed + * limits. + * + * It is proposed in four flavors, in three families: + * 1. @ref XXH32_family + * - Classic 32-bit hash function. Simple, compact, and runs on almost all + * 32-bit and 64-bit systems. + * 2. @ref XXH64_family + * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most + * 64-bit systems (but _not_ 32-bit systems). + * 3. @ref XXH3_family + * - Modern 64-bit and 128-bit hash function family which features improved + * strength and performance across the board, especially on smaller data. + * It benefits greatly from SIMD and 64-bit without requiring it. + * + * Benchmarks + * --- + * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04. + * The open source benchmark program is compiled with clang v10.0 using -O3 flag. + * + * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity | + * | -------------------- | ------- | ----: | ---------------: | ------------------: | + * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 | + * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 | + * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 | + * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 | + * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 | + * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 | + * | RAM sequential read | | N/A | 28.0 GB/s | N/A | + * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 | + * | City64 | | 64 | 22.0 GB/s | 76.6 | + * | T1ha2 | | 64 | 22.0 GB/s | 99.0 | + * | City128 | | 128 | 21.7 GB/s | 57.7 | + * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 | + * | XXH64() | | 64 | 19.4 GB/s | 71.0 | + * | SpookyHash | | 64 | 19.3 GB/s | 53.2 | + * | Mum | | 64 | 18.0 GB/s | 67.0 | + * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 | + * | XXH32() | | 32 | 9.7 GB/s | 71.9 | + * | City32 | | 32 | 9.1 GB/s | 66.0 | + * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 | + * | Murmur3 | | 32 | 3.9 GB/s | 56.1 | + * | SipHash* | | 64 | 3.0 GB/s | 43.2 | + * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 | + * | HighwayHash | | 64 | 1.4 GB/s | 6.0 | + * | FNV64 | | 64 | 1.2 GB/s | 62.7 | + * | Blake2* | | 256 | 1.1 GB/s | 5.1 | + * | SHA1* | | 160 | 0.8 GB/s | 5.6 | + * | MD5* | | 128 | 0.6 GB/s | 7.8 | + * @note + * - Hashes which require a specific ISA extension are noted. SSE2 is also noted, + * even though it is mandatory on x64. + * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic + * by modern standards. + * - Small data velocity is a rough average of algorithm's efficiency for small + * data. For more accurate information, see the wiki. + * - More benchmarks and strength tests are found on the wiki: + * https://github.com/Cyan4973/xxHash/wiki + * + * Usage + * ------ + * All xxHash variants use a similar API. Changing the algorithm is a trivial + * substitution. + * + * @pre + * For functions which take an input and length parameter, the following + * requirements are assumed: + * - The range from [`input`, `input + length`) is valid, readable memory. + * - The only exception is if the `length` is `0`, `input` may be `NULL`. + * - For C++, the objects must have the *TriviallyCopyable* property, as the + * functions access bytes directly as if it was an array of `unsigned char`. + * + * @anchor single_shot_example + * **Single Shot** + * + * These functions are stateless functions which hash a contiguous block of memory, + * immediately returning the result. They are the easiest and usually the fastest + * option. + * + * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits() + * + * @code{.c} + * #include + * #include "xxhash.h" + * + * // Example for a function which hashes a null terminated string with XXH32(). + * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) + * { + * // NULL pointers are only valid if the length is zero + * size_t length = (string == NULL) ? 0 : strlen(string); + * return XXH32(string, length, seed); + * } + * @endcode + * + * + * @anchor streaming_example + * **Streaming** + * + * These groups of functions allow incremental hashing of unknown size, even + * more than what would fit in a size_t. + * + * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset() + * + * @code{.c} + * #include + * #include + * #include "xxhash.h" + * // Example for a function which hashes a FILE incrementally with XXH3_64bits(). + * XXH64_hash_t hashFile(FILE* f) + * { + * // Allocate a state struct. Do not just use malloc() or new. + * XXH3_state_t* state = XXH3_createState(); + * assert(state != NULL && "Out of memory!"); + * // Reset the state to start a new hashing session. + * XXH3_64bits_reset(state); + * char buffer[4096]; + * size_t count; + * // Read the file in chunks + * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { + * // Run update() as many times as necessary to process the data + * XXH3_64bits_update(state, buffer, count); + * } + * // Retrieve the finalized hash. This will not change the state. + * XXH64_hash_t result = XXH3_64bits_digest(state); + * // Free the state. Do not use free(). + * XXH3_freeState(state); + * return result; + * } + * @endcode + * + * Streaming functions generate the xxHash value from an incremental input. + * This method is slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * An XXH state must first be allocated using `XXH*_createState()`. + * + * Start a new hash by initializing the state with a seed using `XXH*_reset()`. + * + * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. + * + * The function returns an error code, with 0 meaning OK, and any other value + * meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a + * digest, and generate new hash values later on by invoking `XXH*_digest()`. + * + * When done, release the state using `XXH*_freeState()`. + * + * + * @anchor canonical_representation_example + * **Canonical Representation** + * + * The default return values from XXH functions are unsigned 32, 64 and 128 bit + * integers. + * This the simplest and fastest format for further post-processing. + * + * However, this leaves open the question of what is the order on the byte level, + * since little and big endian conventions will store the same number differently. + * + * The canonical representation settles this issue by mandating big-endian + * convention, the same convention as human-readable numbers (large digits first). + * + * When writing hash values to storage, sending them over a network, or printing + * them, it's highly recommended to use the canonical representation to ensure + * portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values to and from + * canonical format. + * + * XXH32_canonicalFromHash(), XXH32_hashFromCanonical(), + * XXH64_canonicalFromHash(), XXH64_hashFromCanonical(), + * XXH128_canonicalFromHash(), XXH128_hashFromCanonical(), + * + * @code{.c} + * #include + * #include "xxhash.h" + * + * // Example for a function which prints XXH32_hash_t in human readable format + * void printXxh32(XXH32_hash_t hash) + * { + * XXH32_canonical_t cano; + * XXH32_canonicalFromHash(&cano, hash); + * size_t i; + * for(i = 0; i < sizeof(cano.digest); ++i) { + * printf("%02x", cano.digest[i]); + * } + * printf("\n"); + * } + * + * // Example for a function which converts XXH32_canonical_t to XXH32_hash_t + * XXH32_hash_t convertCanonicalToXxh32(XXH32_canonical_t cano) + * { + * XXH32_hash_t hash = XXH32_hashFromCanonical(&cano); + * return hash; + * } + * @endcode + * + * + * @file xxhash.h + * xxHash prototypes and implementation + */ /* **************************** -* Definitions -******************************/ -#include /* size_t */ -typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + * INLINE mode + ******************************/ +/*! + * @defgroup public Public API + * Contains details on the public xxHash functions. + * @{ + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Gives access to internal state declaration, required for static allocation. + * + * Incompatible with dynamic linking, due to risks of ABI changes. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #include "xxhash.h" + * @endcode + */ +# define XXH_STATIC_LINKING_ONLY +/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ +/*! + * @brief Gives access to internal definitions. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #define XXH_IMPLEMENTATION + * #include "xxhash.h" + * @endcode + */ +# define XXH_IMPLEMENTATION +/* Do not undef XXH_IMPLEMENTATION for Doxygen */ -/* **************************** -* API modifier -******************************/ -/** XXH_PRIVATE_API -* This is useful if you want to include xxhash functions in `static` mode -* in order to inline them, and remove their symbol from the public list. -* Methodology : -* #define XXH_PRIVATE_API -* #include "xxhash.h" -* `xxhash.c` is automatically included. -* It's not useful to compile and link it as a separate module anymore. -*/ -#ifdef XXH_PRIVATE_API -# ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY -# endif +/*! + * @brief Exposes the implementation and marks all functions as `inline`. + * + * Use these build macros to inline xxhash into the target unit. + * Inlining improves performance on small inputs, especially when the length is + * expressed as a compile-time constant: + * + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * + * It also keeps xxHash symbols private to the unit, so they are not exported. + * + * Usage: + * @code{.c} + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * @endcode + * Do not compile and link xxhash.o as a separate object, as it is not useful. + */ +# define XXH_INLINE_ALL +# undef XXH_INLINE_ALL +/*! + * @brief Exposes the implementation without marking functions as inline. + */ +# define XXH_PRIVATE_API +# undef XXH_PRIVATE_API +/*! + * @brief Emulate a namespace by transparently prefixing all symbols. + * + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries which + * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix + * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE + * (therefore, avoid empty or numeric values). + * + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. + */ +# define XXH_NAMESPACE /* YOUR NAME HERE */ +# undef XXH_NAMESPACE +#endif + +#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ + && !defined(XXH_INLINE_ALL_31684351384) + /* this section should be traversed only once */ +# define XXH_INLINE_ALL_31684351384 + /* give access to the advanced API, required to compile implementations */ +# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ +# define XXH_STATIC_LINKING_ONLY + /* make all functions private */ +# undef XXH_PUBLIC_API # if defined(__GNUC__) # define XXH_PUBLIC_API static __inline __attribute__((unused)) # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) @@ -102,204 +321,6774 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; # elif defined(_MSC_VER) # define XXH_PUBLIC_API static __inline # else -# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ + /* note: this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static # endif -#else -# define XXH_PUBLIC_API /* do nothing */ -#endif /* XXH_PRIVATE_API */ -/*!XXH_NAMESPACE, aka Namespace Emulation : + /* + * This part deals with the special case where a unit wants to inline xxHash, + * but "xxhash.h" has previously been included without XXH_INLINE_ALL, + * such as part of some previously included *.h header file. + * Without further action, the new include would just be ignored, + * and functions would effectively _not_ be inlined (silent failure). + * The following macros solve this situation by prefixing all inlined names, + * avoiding naming collision with previous inclusions. + */ + /* Before that, we unconditionally #undef all symbols, + * in case they were already defined with XXH_NAMESPACE. + * They will then be redefined for XXH_INLINE_ALL + */ +# undef XXH_versionNumber + /* XXH32 */ +# undef XXH32 +# undef XXH32_createState +# undef XXH32_freeState +# undef XXH32_reset +# undef XXH32_update +# undef XXH32_digest +# undef XXH32_copyState +# undef XXH32_canonicalFromHash +# undef XXH32_hashFromCanonical + /* XXH64 */ +# undef XXH64 +# undef XXH64_createState +# undef XXH64_freeState +# undef XXH64_reset +# undef XXH64_update +# undef XXH64_digest +# undef XXH64_copyState +# undef XXH64_canonicalFromHash +# undef XXH64_hashFromCanonical + /* XXH3_64bits */ +# undef XXH3_64bits +# undef XXH3_64bits_withSecret +# undef XXH3_64bits_withSeed +# undef XXH3_64bits_withSecretandSeed +# undef XXH3_createState +# undef XXH3_freeState +# undef XXH3_copyState +# undef XXH3_64bits_reset +# undef XXH3_64bits_reset_withSeed +# undef XXH3_64bits_reset_withSecret +# undef XXH3_64bits_update +# undef XXH3_64bits_digest +# undef XXH3_generateSecret + /* XXH3_128bits */ +# undef XXH128 +# undef XXH3_128bits +# undef XXH3_128bits_withSeed +# undef XXH3_128bits_withSecret +# undef XXH3_128bits_reset +# undef XXH3_128bits_reset_withSeed +# undef XXH3_128bits_reset_withSecret +# undef XXH3_128bits_reset_withSecretandSeed +# undef XXH3_128bits_update +# undef XXH3_128bits_digest +# undef XXH128_isEqual +# undef XXH128_cmp +# undef XXH128_canonicalFromHash +# undef XXH128_hashFromCanonical + /* Finally, free the namespace itself */ +# undef XXH_NAMESPACE + + /* employ the namespace for XXH_INLINE_ALL */ +# define XXH_NAMESPACE XXH_INLINE_ + /* + * Some identifiers (enums, type names) are not symbols, + * but they must nonetheless be renamed to avoid redeclaration. + * Alternative solution: do not redeclare them. + * However, this requires some #ifdefs, and has a more dispersed impact. + * Meanwhile, renaming can be achieved in a single place. + */ +# define XXH_IPREF(Id) XXH_NAMESPACE ## Id +# define XXH_OK XXH_IPREF(XXH_OK) +# define XXH_ERROR XXH_IPREF(XXH_ERROR) +# define XXH_errorcode XXH_IPREF(XXH_errorcode) +# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) +# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) +# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) +# define XXH32_state_s XXH_IPREF(XXH32_state_s) +# define XXH32_state_t XXH_IPREF(XXH32_state_t) +# define XXH64_state_s XXH_IPREF(XXH64_state_s) +# define XXH64_state_t XXH_IPREF(XXH64_state_t) +# define XXH3_state_s XXH_IPREF(XXH3_state_s) +# define XXH3_state_t XXH_IPREF(XXH3_state_t) +# define XXH128_hash_t XXH_IPREF(XXH128_hash_t) + /* Ensure the header is parsed again, even if it was previously included */ +# undef XXHASH_H_5627135585666179 +# undef XXHASH_H_STATIC_13879238742 +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ -If you want to include _and expose_ xxHash functions from within your own library, -but also want to avoid symbol collisions with another library which also includes xxHash, +/* **************************************************************** + * Stable API + *****************************************************************/ +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 -you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library -with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values). +/*! @brief Marks a global symbol. */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif -Note that no change is required within the calling program as long as it includes `xxhash.h` : -regular symbol name will be automatically translated by this header. -*/ #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) -# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) -# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +/* XXH32 */ +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) -# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) -# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) -# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) -# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) -# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) -# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) -# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +/* XXH64 */ +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +/* XXH3_64bits */ +# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) +# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) +# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) +# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) +# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) +# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) +# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) +# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) +# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) +# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) +# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) +# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) +# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) +# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) +# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) +/* XXH3_128bits */ +# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) +# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) +# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) +# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) +# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) +# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) +# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) +# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) +# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) +# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) +# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) +# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) +# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) +# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) +# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) +#endif + + +/* ************************************* +* Compiler specifics +***************************************/ + +/* specific declaration modes for Windows */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif #endif +#if defined (__GNUC__) +# define XXH_CONSTF __attribute__((const)) +# define XXH_PUREF __attribute__((pure)) +# define XXH_MALLOCF __attribute__((malloc)) +#else +# define XXH_CONSTF /* disable */ +# define XXH_PUREF +# define XXH_MALLOCF +#endif /* ************************************* * Version ***************************************/ #define XXH_VERSION_MAJOR 0 -#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_MINOR 8 #define XXH_VERSION_RELEASE 2 +/*! @brief Version number, encoded as two digits each */ #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) -XXH_PUBLIC_API unsigned XXH_versionNumber (void); +#if defined (__cplusplus) +extern "C" { +#endif +/*! + * @brief Obtains the xxHash version. + * + * This is mostly useful when xxHash is compiled as a shared library, + * since the returned value comes from the library, as opposed to header file. + * + * @return @ref XXH_VERSION_NUMBER of the invoked library. + */ +XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void); + +#if defined (__cplusplus) +} +#endif /* **************************** -* Simple Hash Functions +* Common basic types ******************************/ -typedef unsigned int XXH32_hash_t; -typedef unsigned long long XXH64_hash_t; +#include /* size_t */ +/*! + * @brief Exit code for the streaming API. + */ +typedef enum { + XXH_OK = 0, /*!< OK */ + XXH_ERROR /*!< Error */ +} XXH_errorcode; -XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); -XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* Don't show include */ /*! -XXH32() : - Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". - The memory between input & input+length must be valid (allocated and read-accessible). - "seed" can be used to alter the result predictably. - Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s -XXH64() : - Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". - "seed" can be used to alter the result predictably. - This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark). -*/ + * @brief An unsigned 32-bit integer. + * + * Not necessarily defined to `uint32_t` but functionally equivalent. + */ +typedef uint32_t XXH32_hash_t; +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# ifdef _AIX +# include +# else +# include +# endif + typedef uint32_t XXH32_hash_t; -/* **************************** -* Streaming Hash Functions -******************************/ -typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ -typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +#else +# include +# if UINT_MAX == 0xFFFFFFFFUL + typedef unsigned int XXH32_hash_t; +# elif ULONG_MAX == 0xFFFFFFFFUL + typedef unsigned long XXH32_hash_t; +# else +# error "unsupported platform: need a 32-bit type" +# endif +#endif + +#if defined (__cplusplus) +extern "C" { +#endif -/*! State allocation, compatible with dynamic libraries */ +/*! + * @} + * + * @defgroup XXH32_family XXH32 family + * @ingroup public + * Contains functions used in the classic 32-bit xxHash algorithm. + * + * @note + * XXH32 is useful for older platforms, with no or poor 64-bit performance. + * Note that the @ref XXH3_family provides competitive speed for both 32-bit + * and 64-bit systems, and offers true 64/128 bit hash results. + * + * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families + * @see @ref XXH32_impl for implementation details + * @{ + */ -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +/*! + * @brief Calculates the 32-bit hash of @p input using xxHash32. + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 32-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 32-bit xxHash32 value. + * + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); +#ifndef XXH_NO_STREAM +/*! + * @typedef struct XXH32_state_s XXH32_state_t + * @brief The opaque state struct for the XXH32 streaming API. + * + * @see XXH32_state_s for details. + */ +typedef struct XXH32_state_s XXH32_state_t; +/*! + * @brief Allocates an @ref XXH32_state_t. + * + * @return An allocated pointer of @ref XXH32_state_t on success. + * @return `NULL` on failure. + * + * @note Must be freed with XXH32_freeState(). + */ +XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void); +/*! + * @brief Frees an @ref XXH32_state_t. + * + * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). + * + * @return @ref XXH_OK. + * + * @note @p statePtr must be allocated with XXH32_createState(). + * + */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +/*! + * @brief Copies one @ref XXH32_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); -/* hash streaming */ +/*! + * @brief Resets an @ref XXH32_state_t to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param seed The 32-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note This function resets and seeds a state. Call it before @ref XXH32_update(). + */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); -XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); +/*! + * @brief Consumes a block of @p input to an @ref XXH32_state_t. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note Call this to incrementally consume blocks of data. + */ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); -XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); +/*! + * @brief Returns the calculated hash value from an @ref XXH32_state_t. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated 32-bit xxHash32 value from that state. + * + * @note + * Calling XXH32_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! + * @brief Canonical (big endian) representation of @ref XXH32_hash_t. + */ +typedef struct { + unsigned char digest[4]; /*!< Hash bytes, big endian */ +} XXH32_canonical_t; + +/*! + * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. + * + * @param dst The @ref XXH32_canonical_t pointer to be stored to. + * @param hash The @ref XXH32_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + * + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); + +/*! + * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. + * + * @param src The @ref XXH32_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + * + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + + +/*! @cond Doxygen ignores this part */ +#ifdef __has_attribute +# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define XXH_HAS_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ +/*! @cond Doxygen ignores this part */ /* -These functions generate the xxHash of an input provided in multiple segments. -Note that, for small input, they are slower than single-call functions, due to state management. -For small input, prefer `XXH32()` and `XXH64()` . + * C23 __STDC_VERSION__ number hasn't been specified yet. For now + * leave as `201711L` (C17 + 1). + * TODO: Update to correct value when its been specified. + */ +#define XXH_C23_VN 201711L +/*! @endcond */ -XXH state must first be allocated, using XXH*_createState() . +/*! @cond Doxygen ignores this part */ +/* C-language Attributes are added in C23. */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute) +# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) +#else +# define XXH_HAS_C_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ -Start a new hash by initializing state with a seed, using XXH*_reset(). +/*! @cond Doxygen ignores this part */ +#if defined(__cplusplus) && defined(__has_cpp_attribute) +# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define XXH_HAS_CPP_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ -Then, feed the hash state by calling XXH*_update() as many times as necessary. -Obviously, input must be allocated and read accessible. -The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute + * introduced in CPP17 and C23. + * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough + * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough + */ +#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) +# define XXH_FALLTHROUGH [[fallthrough]] +#elif XXH_HAS_ATTRIBUTE(__fallthrough__) +# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__)) +#else +# define XXH_FALLTHROUGH /* fallthrough */ +#endif +/*! @endcond */ -Finally, a hash value can be produced anytime, by using XXH*_digest(). -This function returns the nn-bits hash as an int or long long. +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_NOESCAPE for annotated pointers in public API. + * https://clang.llvm.org/docs/AttributeReference.html#noescape + * As of writing this, only supported by clang. + */ +#if XXH_HAS_ATTRIBUTE(noescape) +# define XXH_NOESCAPE __attribute__((noescape)) +#else +# define XXH_NOESCAPE +#endif +/*! @endcond */ -It's still possible to continue inserting input into the hash state after a digest, -and generate some new hashes later on, by calling again XXH*_digest(). +#if defined (__cplusplus) +} /* end of extern "C" */ +#endif -When done, free XXH state space if it was allocated dynamically. -*/ +/*! + * @} + * @ingroup public + * @{ + */ +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* don't include */ +/*! + * @brief An unsigned 64-bit integer. + * + * Not necessarily defined to `uint64_t` but functionally equivalent. + */ +typedef uint64_t XXH64_hash_t; +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# ifdef _AIX +# include +# else +# include +# endif + typedef uint64_t XXH64_hash_t; +#else +# include +# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL + /* LP64 ABI says uint64_t is unsigned long */ + typedef unsigned long XXH64_hash_t; +# else + /* the following type must have a width of 64-bit */ + typedef unsigned long long XXH64_hash_t; +# endif +#endif -/* ************************** -* Utils -****************************/ -#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */ -# define restrict /* disable restrict */ +#if defined (__cplusplus) +extern "C" { #endif +/*! + * @} + * + * @defgroup XXH64_family XXH64 family + * @ingroup public + * @{ + * Contains functions used in the classic 64-bit xxHash algorithm. + * + * @note + * XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. + * It provides better speed for systems with vector processing capabilities. + */ + +/*! + * @brief Calculates the 64-bit hash of @p input using xxHash64. + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 64-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit xxHash64 value. + * + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state); -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state); +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/*! + * @brief The opaque state struct for the XXH64 streaming API. + * + * @see XXH64_state_s for details. + */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +/*! + * @brief Allocates an @ref XXH64_state_t. + * + * @return An allocated pointer of @ref XXH64_state_t on success. + * @return `NULL` on failure. + * + * @note Must be freed with XXH64_freeState(). + */ +XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void); -/* ************************** -* Canonical representation -****************************/ -/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. -* The canonical representation uses human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. -*/ -typedef struct { unsigned char digest[4]; } XXH32_canonical_t; -typedef struct { unsigned char digest[8]; } XXH64_canonical_t; +/*! + * @brief Frees an @ref XXH64_state_t. + * + * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState(). + * + * @return @ref XXH_OK. + * + * @note @p statePtr must be allocated with XXH64_createState(). + */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); +/*! + * @brief Copies one @ref XXH64_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state); + +/*! + * @brief Resets an @ref XXH64_state_t to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note This function resets and seeds a state. Call it before @ref XXH64_update(). + */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH64_state_t. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note Call this to incrementally consume blocks of data. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH64_state_t. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated 64-bit xxHash64 value from that state. + * + * @note + * Calling XXH64_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ +/******* Canonical representation *******/ + +/*! + * @brief Canonical (big endian) representation of @ref XXH64_hash_t. + */ +typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; + +/*! + * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t. + * + * @param dst The @ref XXH64_canonical_t pointer to be stored to. + * @param hash The @ref XXH64_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + * + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash); + +/*! + * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t. + * + * @param src The @ref XXH64_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + * + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src); + +#ifndef XXH_NO_XXH3 + +/*! + * @} + * ************************************************************************ + * @defgroup XXH3_family XXH3 family + * @ingroup public + * @{ + * + * XXH3 is a more recent hash algorithm featuring: + * - Improved speed for both small and large inputs + * - True 64-bit and 128-bit outputs + * - SIMD acceleration + * - Improved 32-bit viability + * + * Speed analysis methodology is explained here: + * + * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html + * + * Compared to XXH64, expect XXH3 to run approximately + * ~2x faster on large inputs and >3x faster on small ones, + * exact differences vary depending on platform. + * + * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, + * but does not require it. + * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3 + * at competitive speeds, even without vector support. Further details are + * explained in the implementation. + * + * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD + * implementations for many common platforms: + * - AVX512 + * - AVX2 + * - SSE2 + * - ARM NEON + * - WebAssembly SIMD128 + * - POWER8 VSX + * - s390x ZVector + * This can be controlled via the @ref XXH_VECTOR macro, but it automatically + * selects the best version according to predefined macros. For the x86 family, an + * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c. + * + * XXH3 implementation is portable: + * it has a generic C90 formulation that can be compiled on any platform, + * all implementations generate exactly the same hash value on all platforms. + * Starting from v0.8.0, it's also labelled "stable", meaning that + * any future version will also generate the same hash value. + * + * XXH3 offers 2 variants, _64bits and _128bits. + * + * When only 64 bits are needed, prefer invoking the _64bits variant, as it + * reduces the amount of mixing, resulting in faster speed on small inputs. + * It's also generally simpler to manipulate a scalar return type than a struct. + * + * The API supports one-shot hashing, streaming mode, and custom secrets. + */ +/*-********************************************************************** +* XXH3 64-bit variant +************************************************************************/ + +/*! + * @brief Calculates 64-bit unseeded variant of XXH3 hash of @p input. + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit XXH3 hash value. + * + * @note + * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of `0`, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see + * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Calculates 64-bit seeded variant of XXH3 hash of @p input. + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit XXH3 hash value. + * + * @note + * seed == 0 produces the same results as @ref XXH3_64bits(). + * + * This variant generates a custom secret on the fly based on default secret + * altered using the @p seed value. + * + * While this operation is decently fast, note that it's not completely free. + * + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); + +/*! + * The bare minimum size for a custom secret. + * + * @see + * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), + * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). + */ +#define XXH3_SECRET_SIZE_MIN 136 + +/*! + * @brief Calculates 64-bit variant of XXH3 with a custom "secret". + * + * @param data The block of data to be hashed, at least @p len bytes in size. + * @param len The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @return The calculated 64-bit XXH3 hash value. + * + * @pre + * The memory between @p data and @p data + @p len must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p data may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * It's possible to provide any blob of bytes as a "secret" to generate the hash. + * This makes it more difficult for an external actor to prepare an intentional collision. + * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN). + * However, the quality of the secret impacts the dispersion of the hash algorithm. + * Therefore, the secret _must_ look like a bunch of random bytes. + * Avoid "trivial" or structured data such as repeated sequences or a text document. + * Whenever in doubt about the "randomness" of the blob of bytes, + * consider employing @ref XXH3_generateSecret() instead (see below). + * It will generate a proper high entropy secret derived from the blob of bytes. + * Another advantage of using XXH3_generateSecret() is that + * it guarantees that all bits within the initial blob of bytes + * will impact every bit of the output. + * This is not necessarily the case when using the blob of bytes directly + * because, when hashing _small_ inputs, only a portion of the secret is employed. + * + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); + + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + */ + +/*! + * @brief The opaque state struct for the XXH3 streaming API. + * + * @see XXH3_state_s for details. + */ +typedef struct XXH3_state_s XXH3_state_t; +XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); + +/*! + * @brief Copies one @ref XXH3_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state); + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret with default parameters. + * - Call this function before @ref XXH3_64bits_update(). + * - Digest will be equivalent to `XXH3_64bits()`. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret from `seed`. + * - Call this function before @ref XXH3_64bits_update(). + * - Digest will be equivalent to `XXH3_64bits_withSeed()`. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); + +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * `secret` is referenced, it _must outlive_ the hash streaming session. + * + * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN, + * and the quality of produced hash values depends on secret's entropy + * (secret's content should look like a bunch of random bytes). + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note Call this to incrementally consume blocks of data. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 64-bit hash value from that state. + * + * @note + * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/* note : canonical representation of XXH3 is the same as XXH64 + * since they both produce XXH64_hash_t values */ + + +/*-********************************************************************** +* XXH3 128-bit variant +************************************************************************/ + +/*! + * @brief The return value from 128-bit hashes. + * + * Stored in little endian order, although the fields themselves are in native + * endianness. + */ +typedef struct { + XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ + XXH64_hash_t high64; /*!< `value >> 64` */ +} XXH128_hash_t; + +/*! + * @brief Calculates 128-bit unseeded variant of XXH3 of @p data. + * + * @param data The block of data to be hashed, at least @p length bytes in size. + * @param len The length of @p data, in bytes. + * + * @return The calculated 128-bit variant of XXH3 value. + * + * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead + * for shorter inputs. + * + * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of `0`, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len); +/*! @brief Calculates 128-bit seeded variant of XXH3 hash of @p data. + * + * @param data The block of data to be hashed, at least @p length bytes in size. + * @param len The length of @p data, in bytes. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @return The calculated 128-bit variant of XXH3 value. + * + * @note + * seed == 0 produces the same results as @ref XXH3_64bits(). + * + * This variant generates a custom secret on the fly based on default secret + * altered using the @p seed value. + * + * While this operation is decently fast, note that it's not completely free. + * + * @see XXH3_128bits(), XXH3_128bits_withSecret(): other seeding variants + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); +/*! + * @brief Calculates 128-bit variant of XXH3 with a custom "secret". + * + * @param data The block of data to be hashed, at least @p len bytes in size. + * @param len The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @return The calculated 128-bit variant of XXH3 value. + * + * It's possible to provide any blob of bytes as a "secret" to generate the hash. + * This makes it more difficult for an external actor to prepare an intentional collision. + * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN). + * However, the quality of the secret impacts the dispersion of the hash algorithm. + * Therefore, the secret _must_ look like a bunch of random bytes. + * Avoid "trivial" or structured data such as repeated sequences or a text document. + * Whenever in doubt about the "randomness" of the blob of bytes, + * consider employing @ref XXH3_generateSecret() instead (see below). + * It will generate a proper high entropy secret derived from the blob of bytes. + * Another advantage of using XXH3_generateSecret() is that + * it guarantees that all bits within the initial blob of bytes + * will impact every bit of the output. + * This is not necessarily the case when using the blob of bytes directly + * because, when hashing _small_ inputs, only a portion of the secret is employed. + * + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + * + * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). + * Use already declared XXH3_createState() and XXH3_freeState(). + * + * All reset and streaming functions have same meaning as their 64-bit counterpart. + */ + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret with default parameters. + * - Call it before @ref XXH3_128bits_update(). + * - Digest will be equivalent to `XXH3_128bits()`. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret from `seed`. + * - Call it before @ref XXH3_128bits_update(). + * - Digest will be equivalent to `XXH3_128bits_withSeed()`. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * `secret` is referenced, it _must outlive_ the hash streaming session. + * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN, + * and the quality of produced hash values depends on secret's entropy + * (secret's content should look like a bunch of random bytes). + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 128-bit hash value from that state. + * + * @note + * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/* Following helper functions make it possible to compare XXH128_hast_t values. + * Since XXH128_hash_t is a structure, this capability is not offered by the language. + * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ + +/*! + * @brief Check equality of two XXH128_hash_t values + * + * @param h1 The 128-bit hash value. + * @param h2 Another 128-bit hash value. + * + * @return `1` if `h1` and `h2` are equal. + * @return `0` if they are not. + */ +XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); + +/*! + * @brief Compares two @ref XXH128_hash_t + * + * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. + * + * @param h128_1 Left-hand side value + * @param h128_2 Right-hand side value + * + * @return >0 if @p h128_1 > @p h128_2 + * @return =0 if @p h128_1 == @p h128_2 + * @return <0 if @p h128_1 < @p h128_2 + */ +XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2); -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); +/******* Canonical representation *******/ +typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; + + +/*! + * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t. + * + * @param dst The @ref XXH128_canonical_t pointer to be stored to. + * @param hash The @ref XXH128_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash); + +/*! + * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t. + * + * @param src The @ref XXH128_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src); + + +#endif /* !XXH_NO_XXH3 */ + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + +#endif /* XXH_NO_LONG_LONG */ + +/*! + * @} + */ #endif /* XXHASH_H_5627135585666179 */ -/* ================================================================================================ - This section contains definitions which are not guaranteed to remain stable. - They may change in future versions, becoming incompatible with a different version of the library. - They shall only be used with static linking. - Never use these definitions in association with dynamic linking ! -=================================================================================================== */ -#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345) -#define XXH_STATIC_H_3543687687345 - -/* These definitions are only meant to allow allocation of XXH state - statically, on stack, or in a struct for example. - Do not use members directly. */ - - struct XXH32_state_s { - unsigned total_len_32; - unsigned large_len; - unsigned v1; - unsigned v2; - unsigned v3; - unsigned v4; - unsigned mem32[4]; /* buffer defined as U32 for alignment */ - unsigned memsize; - unsigned reserved; /* never read nor write, will be removed in a future version */ - }; /* typedef'd to XXH32_state_t */ - - struct XXH64_state_s { - unsigned long long total_len; - unsigned long long v1; - unsigned long long v2; - unsigned long long v3; - unsigned long long v4; - unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ - unsigned memsize; - unsigned reserved[2]; /* never read nor write, will be removed in a future version */ - }; /* typedef'd to XXH64_state_t */ - - -# ifdef XXH_PRIVATE_API -# include "xxhash.c" /* include xxhash functions as `static`, for inlining */ -# endif +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) +#define XXHASH_H_STATIC_13879238742 +/* **************************************************************************** + * This section contains declarations which are not guaranteed to remain stable. + * They may change in future versions, becoming incompatible with a different + * version of the library. + * These declarations should only be used with static linking. + * Never use them in association with dynamic linking! + ***************************************************************************** */ + +/* + * These definitions are only present to allow static allocation + * of XXH states, on stack or in a struct, for example. + * Never **ever** access their members directly. + */ + +/*! + * @internal + * @brief Structure for XXH32 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH32_state_t. + * Do not access the members of this struct directly. + * @see XXH64_state_s, XXH3_state_s + */ +struct XXH32_state_s { + XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ + XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ + XXH32_hash_t v[4]; /*!< Accumulator lanes */ + XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ + XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ +}; /* typedef'd to XXH32_state_t */ + + +#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ + +/*! + * @internal + * @brief Structure for XXH64 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH64_state_t. + * Do not access the members of this struct directly. + * @see XXH32_state_s, XXH3_state_s + */ +struct XXH64_state_s { + XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ + XXH64_hash_t v[4]; /*!< Accumulator lanes */ + XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ + XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ + XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ +}; /* typedef'd to XXH64_state_t */ + +#ifndef XXH_NO_XXH3 + +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ +# include +# define XXH_ALIGN(n) alignas(n) +#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ +/* In C++ alignas() is a keyword */ +# define XXH_ALIGN(n) alignas(n) +#elif defined(__GNUC__) +# define XXH_ALIGN(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +# define XXH_ALIGN(n) __declspec(align(n)) +#else +# define XXH_ALIGN(n) /* disabled */ +#endif + +/* Old GCC versions only accept the attribute after the type in structures. */ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ + && defined(__GNUC__) +# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) +#else +# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type +#endif + +/*! + * @brief The size of the internal XXH3 buffer. + * + * This is the optimal update size for incremental hashing. + * + * @see XXH3_64b_update(), XXH3_128b_update(). + */ +#define XXH3_INTERNALBUFFER_SIZE 256 + +/*! + * @internal + * @brief Default size of the secret buffer (and @ref XXH3_kSecret). + * + * This is the size used in @ref XXH3_kSecret and the seeded functions. + * + * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. + */ +#define XXH3_SECRET_DEFAULT_SIZE 192 + +/*! + * @internal + * @brief Structure for XXH3 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. + * Otherwise it is an opaque type. + * Never use this definition in combination with dynamic library. + * This allows fields to safely be changed in the future. + * + * @note ** This structure has a strict alignment requirement of 64 bytes!! ** + * Do not allocate this with `malloc()` or `new`, + * it will not be sufficiently aligned. + * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation. + * + * Typedef'd to @ref XXH3_state_t. + * Do never access the members of this struct directly. + * + * @see XXH3_INITSTATE() for stack initialization. + * @see XXH3_createState(), XXH3_freeState(). + * @see XXH32_state_s, XXH64_state_s + */ +struct XXH3_state_s { + XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); + /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */ + XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); + /*!< Used to store a custom secret generated from a seed. */ + XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); + /*!< The internal buffer. @see XXH32_state_s::mem32 */ + XXH32_hash_t bufferedSize; + /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ + XXH32_hash_t useSeed; + /*!< Reserved field. Needed for padding on 64-bit. */ + size_t nbStripesSoFar; + /*!< Number or stripes processed. */ + XXH64_hash_t totalLen; + /*!< Total length hashed. 64-bit even on 32-bit targets. */ + size_t nbStripesPerBlock; + /*!< Number of stripes per block. */ + size_t secretLimit; + /*!< Size of @ref customSecret or @ref extSecret */ + XXH64_hash_t seed; + /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ + XXH64_hash_t reserved64; + /*!< Reserved field. */ + const unsigned char* extSecret; + /*!< Reference to an external secret for the _withSecret variants, NULL + * for other variants. */ + /* note: there may be some padding at the end due to alignment on 64 bytes */ +}; /* typedef'd to XXH3_state_t */ + +#undef XXH_ALIGN_MEMBER -#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */ +/*! + * @brief Initializes a stack-allocated `XXH3_state_s`. + * + * When the @ref XXH3_state_t structure is merely emplaced on stack, + * it should be initialized with XXH3_INITSTATE() or a memset() + * in case its first reset uses XXH3_NNbits_reset_withSeed(). + * This init can be omitted if the first reset uses default or _withSecret mode. + * This operation isn't necessary when the state is created with XXH3_createState(). + * Note that this doesn't prepare the state for a streaming operation, + * it's still necessary to use XXH3_NNbits_reset*() afterwards. + */ +#define XXH3_INITSTATE(XXH3_state_ptr) \ + do { \ + XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \ + tmp_xxh3_state_ptr->seed = 0; \ + tmp_xxh3_state_ptr->extSecret = NULL; \ + } while(0) #if defined (__cplusplus) -} +extern "C" { +#endif + +/*! + * @brief Calculates the 128-bit hash of @p data using XXH3. + * + * @param data The block of data to be hashed, at least @p len bytes in size. + * @param len The length of @p data, in bytes. + * @param seed The 64-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p data and @p data + @p len must be valid, + * readable, contiguous memory. However, if @p len is `0`, @p data may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 128-bit XXH3 value. + * + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); + + +/* === Experimental API === */ +/* Symbols defined below must be considered tied to a specific library version. */ + +/*! + * @brief Derive a high-entropy secret from any user-defined content, named customSeed. + * + * @param secretBuffer A writable buffer for derived high-entropy secret data. + * @param secretSize Size of secretBuffer, in bytes. Must be >= XXH3_SECRET_DEFAULT_SIZE. + * @param customSeed A user-defined content. + * @param customSeedSize Size of customSeed, in bytes. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * The generated secret can be used in combination with `*_withSecret()` functions. + * The `_withSecret()` variants are useful to provide a higher level of protection + * than 64-bit seed, as it becomes much more difficult for an external actor to + * guess how to impact the calculation logic. + * + * The function accepts as input a custom seed of any length and any content, + * and derives from it a high-entropy secret of length @p secretSize into an + * already allocated buffer @p secretBuffer. + * + * The generated secret can then be used with any `*_withSecret()` variant. + * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(), + * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret() + * are part of this list. They all accept a `secret` parameter + * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN) + * _and_ feature very high entropy (consist of random-looking bytes). + * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can + * be employed to ensure proper quality. + * + * @p customSeed can be anything. It can have any size, even small ones, + * and its content can be anything, even "poor entropy" sources such as a bunch + * of zeroes. The resulting `secret` will nonetheless provide all required qualities. + * + * @pre + * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN + * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior. + * + * Example code: + * @code{.c} + * #include + * #include + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Hashes argv[2] using the entropy from argv[1]. + * int main(int argc, char* argv[]) + * { + * char secret[XXH3_SECRET_SIZE_MIN]; + * if (argv != 3) { return 1; } + * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1])); + * XXH64_hash_t h = XXH3_64bits_withSecret( + * argv[2], strlen(argv[2]), + * secret, sizeof(secret) + * ); + * printf("%016llx\n", (unsigned long long) h); + * } + * @endcode + */ +XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize); + +/*! + * @brief Generate the same secret as the _withSeed() variants. + * + * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes + * @param seed The 64-bit seed to alter the hash result predictably. + * + * The generated secret can be used in combination with + *`*_withSecret()` and `_withSecretandSeed()` variants. + * + * Example C++ `std::string` hash class: + * @code{.cpp} + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Slow, seeds each time + * class HashSlow { + * XXH64_hash_t seed; + * public: + * HashSlow(XXH64_hash_t s) : seed{s} {} + * size_t operator()(const std::string& x) const { + * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)}; + * } + * }; + * // Fast, caches the seeded secret for future uses. + * class HashFast { + * unsigned char secret[XXH3_SECRET_SIZE_MIN]; + * public: + * HashFast(XXH64_hash_t s) { + * XXH3_generateSecret_fromSeed(secret, seed); + * } + * size_t operator()(const std::string& x) const { + * return size_t{ + * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret)) + * }; + * } + * }; + * @endcode + */ +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed); + +/*! + * @brief Calculates 64/128-bit seeded variant of XXH3 hash of @p data. + * + * @param data The block of data to be hashed, at least @p len bytes in size. + * @param len The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * These variants generate hash values using either + * @p seed for "short" keys (< @ref XXH3_MIDSIZE_MAX = 240 bytes) + * or @p secret for "large" keys (>= @ref XXH3_MIDSIZE_MAX). + * + * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. + * `_withSeed()` has to generate the secret on the fly for "large" keys. + * It's fast, but can be perceptible for "not so large" keys (< 1 KB). + * `_withSecret()` has to generate the masks on the fly for "small" keys, + * which requires more instructions than _withSeed() variants. + * Therefore, _withSecretandSeed variant combines the best of both worlds. + * + * When @p secret has been generated by XXH3_generateSecret_fromSeed(), + * this variant produces *exactly* the same results as `_withSeed()` variant, + * hence offering only a pure speed benefit on "large" input, + * by skipping the need to regenerate the secret for every large input. + * + * Another usage scenario is to hash the secret to a 64-bit hash value, + * for example with XXH3_64bits(), which then becomes the seed, + * and then employ both the seed and the secret in _withSecretandSeed(). + * On top of speed, an added benefit is that each bit in the secret + * has a 50% chance to swap each bit in the output, via its impact to the seed. + * + * This is not guaranteed when using the secret directly in "small data" scenarios, + * because only portions of the secret are employed for small data. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed); +/*! + * @brief Calculates 128-bit seeded variant of XXH3 hash of @p data. + * + * @param input The block of data to be hashed, at least @p len bytes in size. + * @param length The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed64 The 64-bit seed to alter the hash result predictably. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @see XXH3_64bits_withSecretandSeed() + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#ifndef XXH_NO_STREAM +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed64 The 64-bit seed to alter the hash result predictably. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @see XXH3_64bits_withSecretandSeed() + */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed64 The 64-bit seed to alter the hash result predictably. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @see XXH3_64bits_withSecretandSeed() + */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#endif /* !XXH_NO_STREAM */ + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + +#endif /* !XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ + +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# define XXH_IMPLEMENTATION #endif + +#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ + + +/* ======================================================================== */ +/* ======================================================================== */ +/* ======================================================================== */ + + +/*-********************************************************************** + * xxHash implementation + *-********************************************************************** + * xxHash's implementation used to be hosted inside xxhash.c. + * + * However, inlining requires implementation to be visible to the compiler, + * hence be included alongside the header. + * Previously, implementation was hosted inside xxhash.c, + * which was then #included when inlining was activated. + * This construction created issues with a few build and install systems, + * as it required xxhash.c to be stored in /include directory. + * + * xxHash implementation is now directly integrated within xxhash.h. + * As a consequence, xxhash.c is no longer needed in /include. + * + * xxhash.c is still available and is still useful. + * In a "normal" setup, when xxhash is not inlined, + * xxhash.h only exposes the prototypes and public symbols, + * while xxhash.c can be built into an object file xxhash.o + * which can then be linked into the final binary. + ************************************************************************/ + +#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ + || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) +# define XXH_IMPLEM_13a8737387 + +/* ************************************* +* Tuning parameters +***************************************/ + +/*! + * @defgroup tuning Tuning parameters + * @{ + * + * Various macros to control xxHash's behavior. + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Define this to disable 64-bit code. + * + * Useful if only using the @ref XXH32_family and you have a strict C90 compiler. + */ +# define XXH_NO_LONG_LONG +# undef XXH_NO_LONG_LONG /* don't actually */ +/*! + * @brief Controls how unaligned memory is accessed. + * + * By default, access to unaligned memory is controlled by `memcpy()`, which is + * safe and portable. + * + * Unfortunately, on some target/compiler combinations, the generated assembly + * is sub-optimal. + * + * The below switch allow selection of a different access method + * in the search for improved performance. + * + * @par Possible options: + * + * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` + * @par + * Use `memcpy()`. Safe and portable. Note that most modern compilers will + * eliminate the function call and treat it as an unaligned access. + * + * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` + * @par + * Depends on compiler extensions and is therefore not portable. + * This method is safe _if_ your compiler supports it, + * and *generally* as fast or faster than `memcpy`. + * + * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast + * @par + * Casts directly and dereferences. This method doesn't depend on the + * compiler, but it violates the C standard as it directly dereferences an + * unaligned pointer. It can generate buggy code on targets which do not + * support unaligned memory accesses, but in some circumstances, it's the + * only known way to get the most performance. + * + * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift + * @par + * Also portable. This can generate the best code on old compilers which don't + * inline small `memcpy()` calls, and it might also be faster on big-endian + * systems which lack a native byteswap instruction. However, some compilers + * will emit literal byteshifts even if the target supports unaligned access. + * + * + * @warning + * Methods 1 and 2 rely on implementation-defined behavior. Use these with + * care, as what works on one compiler/platform/optimization level may cause + * another to read garbage data or even crash. + * + * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. + * + * Prefer these methods in priority order (0 > 3 > 1 > 2) + */ +# define XXH_FORCE_MEMORY_ACCESS 0 + +/*! + * @def XXH_SIZE_OPT + * @brief Controls how much xxHash optimizes for size. + * + * xxHash, when compiled, tends to result in a rather large binary size. This + * is mostly due to heavy usage to forced inlining and constant folding of the + * @ref XXH3_family to increase performance. + * + * However, some developers prefer size over speed. This option can + * significantly reduce the size of the generated code. When using the `-Os` + * or `-Oz` options on GCC or Clang, this is defined to 1 by default, + * otherwise it is defined to 0. + * + * Most of these size optimizations can be controlled manually. + * + * This is a number from 0-2. + * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed + * comes first. + * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more + * conservative and disables hacks that increase code size. It implies the + * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0, + * and @ref XXH3_NEON_LANES == 8 if they are not already defined. + * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. + * Performance may cry. For example, the single shot functions just use the + * streaming API. + */ +# define XXH_SIZE_OPT 0 + +/*! + * @def XXH_FORCE_ALIGN_CHECK + * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() + * and XXH64() only). + * + * This is an important performance trick for architectures without decent + * unaligned memory access performance. + * + * It checks for input alignment, and when conditions are met, uses a "fast + * path" employing direct 32-bit/64-bit reads, resulting in _dramatically + * faster_ read speed. + * + * The check costs one initial branch per hash, which is generally negligible, + * but not zero. + * + * Moreover, it's not useful to generate an additional code path if memory + * access uses the same instruction for both aligned and unaligned + * addresses (e.g. x86 and aarch64). + * + * In these cases, the alignment check can be removed by setting this macro to 0. + * Then the code will always use unaligned memory access. + * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips + * which are platforms known to offer good unaligned memory accesses performance. + * + * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. + * + * This option does not affect XXH3 (only XXH32 and XXH64). + */ +# define XXH_FORCE_ALIGN_CHECK 0 + +/*! + * @def XXH_NO_INLINE_HINTS + * @brief When non-zero, sets all functions to `static`. + * + * By default, xxHash tries to force the compiler to inline almost all internal + * functions. + * + * This can usually improve performance due to reduced jumping and improved + * constant folding, but significantly increases the size of the binary which + * might not be favorable. + * + * Additionally, sometimes the forced inlining can be detrimental to performance, + * depending on the architecture. + * + * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the + * compiler full control on whether to inline or not. + * + * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if + * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. + */ +# define XXH_NO_INLINE_HINTS 0 + +/*! + * @def XXH3_INLINE_SECRET + * @brief Determines whether to inline the XXH3 withSecret code. + * + * When the secret size is known, the compiler can improve the performance + * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). + * + * However, if the secret size is not known, it doesn't have any benefit. This + * happens when xxHash is compiled into a global symbol. Therefore, if + * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. + * + * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers + * that are *sometimes* force inline on -Og, and it is impossible to automatically + * detect this optimization level. + */ +# define XXH3_INLINE_SECRET 0 + +/*! + * @def XXH32_ENDJMP + * @brief Whether to use a jump for `XXH32_finalize`. + * + * For performance, `XXH32_finalize` uses multiple branches in the finalizer. + * This is generally preferable for performance, + * but depending on exact architecture, a jmp may be preferable. + * + * This setting is only possibly making a difference for very small inputs. + */ +# define XXH32_ENDJMP 0 + +/*! + * @internal + * @brief Redefines old internal names. + * + * For compatibility with code that uses xxHash's internals before the names + * were changed to improve namespacing. There is no other reason to use this. + */ +# define XXH_OLD_NAMES +# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ + +/*! + * @def XXH_NO_STREAM + * @brief Disables the streaming API. + * + * When xxHash is not inlined and the streaming functions are not used, disabling + * the streaming functions can improve code size significantly, especially with + * the @ref XXH3_family which tends to make constant folded copies of itself. + */ +# define XXH_NO_STREAM +# undef XXH_NO_STREAM /* don't actually */ +#endif /* XXH_DOXYGEN */ +/*! + * @} + */ + +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ + /* prefer __packed__ structures (method 1) for GCC + * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy + * which for some reason does unaligned loads. */ +# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED)) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +#ifndef XXH_SIZE_OPT + /* default to 1 for -Os or -Oz */ +# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) +# define XXH_SIZE_OPT 1 +# else +# define XXH_SIZE_OPT 0 +# endif +#endif + +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ + /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */ +# if XXH_SIZE_OPT >= 1 || \ + defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \ + || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */ +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + +#ifndef XXH_NO_INLINE_HINTS +# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ +# define XXH_NO_INLINE_HINTS 1 +# else +# define XXH_NO_INLINE_HINTS 0 +# endif +#endif + +#ifndef XXH3_INLINE_SECRET +# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \ + || !defined(XXH_INLINE_ALL) +# define XXH3_INLINE_SECRET 0 +# else +# define XXH3_INLINE_SECRET 1 +# endif +#endif + +#ifndef XXH32_ENDJMP +/* generally preferable for performance */ +# define XXH32_ENDJMP 0 +#endif + +/*! + * @defgroup impl Implementation + * @{ + */ + +/* ************************************* +* Includes & Memory related functions +***************************************/ +#include /* memcmp, memcpy */ +#include /* ULLONG_MAX */ + +#if defined(XXH_NO_STREAM) +/* nothing */ +#elif defined(XXH_NO_STDLIB) + +/* When requesting to disable any mention of stdlib, + * the library loses the ability to invoked malloc / free. + * In practice, it means that functions like `XXH*_createState()` + * will always fail, and return NULL. + * This flag is useful in situations where + * xxhash.h is integrated into some kernel, embedded or limited environment + * without access to dynamic allocation. + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; } +static void XXH_free(void* p) { (void)p; } + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + +#else + +/* + * Modify the local functions below should you wish to use + * different memory routines for malloc() and free() + */ +#include + +#if defined (__cplusplus) +extern "C" { +#endif +/*! + * @internal + * @brief Modify this function to use a different routine than malloc(). + */ +static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); } + +/*! + * @internal + * @brief Modify this function to use a different routine than free(). + */ +static void XXH_free(void* p) { free(p); } + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + +#endif /* XXH_NO_STDLIB */ + +#if defined (__cplusplus) +extern "C" { +#endif +/*! + * @internal + * @brief Modify this function to use a different routine than memcpy(). + */ +static void* XXH_memcpy(void* dest, const void* src, size_t size) +{ + return memcpy(dest,src,size); +} + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio warning fix */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + +#if XXH_NO_INLINE_HINTS /* disable inlining hints */ +# if defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __attribute__((unused)) +# else +# define XXH_FORCE_INLINE static +# endif +# define XXH_NO_INLINE static +/* enable inlining hints */ +#elif defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) +# define XXH_NO_INLINE static __attribute__((noinline)) +#elif defined(_MSC_VER) /* Visual Studio */ +# define XXH_FORCE_INLINE static __forceinline +# define XXH_NO_INLINE static __declspec(noinline) +#elif defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ +# define XXH_FORCE_INLINE static inline +# define XXH_NO_INLINE static +#else +# define XXH_FORCE_INLINE static +# define XXH_NO_INLINE static +#endif + +#if XXH3_INLINE_SECRET +# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE +#else +# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE +#endif + + +/* ************************************* +* Debug +***************************************/ +/*! + * @ingroup tuning + * @def XXH_DEBUGLEVEL + * @brief Sets the debugging level. + * + * XXH_DEBUGLEVEL is expected to be defined externally, typically via the + * compiler's command line options. The value must be a number. + */ +#ifndef XXH_DEBUGLEVEL +# ifdef DEBUGLEVEL /* backwards compat */ +# define XXH_DEBUGLEVEL DEBUGLEVEL +# else +# define XXH_DEBUGLEVEL 0 +# endif +#endif + +#if (XXH_DEBUGLEVEL>=1) +# include /* note: can still be disabled with NDEBUG */ +# define XXH_ASSERT(c) assert(c) +#else +# if defined(__INTEL_COMPILER) +# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c)) +# else +# define XXH_ASSERT(c) XXH_ASSUME(c) +# endif +#endif + +/* note: use after variable declarations */ +#ifndef XXH_STATIC_ASSERT +# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0) +# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) +# else +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) +# endif +# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) +#endif + +/*! + * @internal + * @def XXH_COMPILER_GUARD(var) + * @brief Used to prevent unwanted optimizations for @p var. + * + * It uses an empty GCC inline assembly statement with a register constraint + * which forces @p var into a general purpose register (eg eax, ebx, ecx + * on x86) and marks it as modified. + * + * This is used in a few places to avoid unwanted autovectorization (e.g. + * XXH32_round()). All vectorization we want is explicit via intrinsics, + * and _usually_ isn't wanted elsewhere. + * + * We also use it to prevent unwanted constant folding for AArch64 in + * XXH3_initCustomSecret_scalar(). + */ +#if defined(__GNUC__) || defined(__clang__) +# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var)) +#else +# define XXH_COMPILER_GUARD(var) ((void)0) +#endif + +/* Specifically for NEON vectors which use the "w" constraint, on + * Clang. */ +#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) +# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var)) +#else +# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) +#endif + +/* ************************************* +* Basic Types +***************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# ifdef _AIX +# include +# else +# include +# endif + typedef uint8_t xxh_u8; +#else + typedef unsigned char xxh_u8; +#endif +typedef XXH32_hash_t xxh_u32; + +#ifdef XXH_OLD_NAMES +# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" +# define BYTE xxh_u8 +# define U8 xxh_u8 +# define U32 xxh_u32 +#endif + +#if defined (__cplusplus) +extern "C" { +#endif + +/* *** Memory access *** */ + +/*! + * @internal + * @fn xxh_u32 XXH_read32(const void* ptr) + * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit native endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32(const void* ptr) + * @brief Reads an unaligned 32-bit little endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readBE32(const void* ptr) + * @brief Reads an unaligned 32-bit big endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit big endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) + * @brief Like @ref XXH_readLE32(), but has an option for aligned reads. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is + * always @ref XXH_alignment::XXH_unaligned. + * + * @param ptr The pointer to read from. + * @param align Whether @p ptr is aligned. + * @pre + * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte + * aligned. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE32 and XXH_readBE32. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* + * Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware. + */ +static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; +#endif +static xxh_u32 XXH_read32(const void* ptr) +{ + typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; + return *((const xxh_unalign32*)ptr); +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u32 XXH_read32(const void* memPtr) +{ + xxh_u32 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* *** Endianness *** */ + +/*! + * @ingroup tuning + * @def XXH_CPU_LITTLE_ENDIAN + * @brief Whether the target is little endian. + * + * Defined to 1 if the target is little endian, or 0 if it is big endian. + * It can be defined externally, for example on the compiler command line. + * + * If it is not defined, + * a runtime check (which is usually constant folded) is used instead. + * + * @note + * This is not necessarily defined to an integer constant. + * + * @see XXH_isLittleEndian() for the runtime check. + */ +#ifndef XXH_CPU_LITTLE_ENDIAN +/* + * Try to detect endianness automatically, to avoid the nonstandard behavior + * in `XXH_isLittleEndian()` + */ +# if defined(_WIN32) /* Windows is always little endian */ \ + || defined(__LITTLE_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 1 +# elif defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 0 +# else +/*! + * @internal + * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. + * + * Most compilers will constant fold this. + */ +static int XXH_isLittleEndian(void) +{ + /* + * Portable and well-defined behavior. + * Don't use static: it is detrimental to performance. + */ + const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +# endif +#endif + + + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +#ifdef __has_builtin +# define XXH_HAS_BUILTIN(x) __has_builtin(x) +#else +# define XXH_HAS_BUILTIN(x) 0 +#endif + + + +/* + * C23 and future versions have standard "unreachable()". + * Once it has been implemented reliably we can add it as an + * additional case: + * + * ``` + * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) + * # include + * # ifdef unreachable + * # define XXH_UNREACHABLE() unreachable() + * # endif + * #endif + * ``` + * + * Note C++23 also has std::unreachable() which can be detected + * as follows: + * ``` + * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L) + * # include + * # define XXH_UNREACHABLE() std::unreachable() + * #endif + * ``` + * NB: `__cpp_lib_unreachable` is defined in the `` header. + * We don't use that as including `` in `extern "C"` blocks + * doesn't work on GCC12 + */ + +#if XXH_HAS_BUILTIN(__builtin_unreachable) +# define XXH_UNREACHABLE() __builtin_unreachable() + +#elif defined(_MSC_VER) +# define XXH_UNREACHABLE() __assume(0) + +#else +# define XXH_UNREACHABLE() +#endif + +#if XXH_HAS_BUILTIN(__builtin_assume) +# define XXH_ASSUME(c) __builtin_assume(c) +#else +# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); } +#endif + +/*! + * @internal + * @def XXH_rotl32(x,r) + * @brief 32-bit rotate left. + * + * @param x The 32-bit integer to be rotated. + * @param r The number of bits to rotate. + * @pre + * @p r > 0 && @p r < 32 + * @note + * @p x and @p r may be evaluated multiple times. + * @return The rotated result. + */ +#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ + && XXH_HAS_BUILTIN(__builtin_rotateleft64) +# define XXH_rotl32 __builtin_rotateleft32 +# define XXH_rotl64 __builtin_rotateleft64 +/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ +#elif defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) +# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) +#endif + +/*! + * @internal + * @fn xxh_u32 XXH_swap32(xxh_u32 x) + * @brief A 32-bit byteswap. + * + * @param x The 32-bit integer to byteswap. + * @return @p x, byteswapped. + */ +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static xxh_u32 XXH_swap32 (xxh_u32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* *************************** +* Memory reads +*****************************/ + +/*! + * @internal + * @brief Enum to indicate whether a pointer is aligned. + */ +typedef enum { + XXH_aligned, /*!< Aligned */ + XXH_unaligned /*!< Possibly unaligned */ +} XXH_alignment; + +/* + * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. + * + * This is ideal for older compilers which don't inline memcpy. + */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u32)bytePtr[1] << 8) + | ((xxh_u32)bytePtr[2] << 16) + | ((xxh_u32)bytePtr[3] << 24); +} + +XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[3] + | ((xxh_u32)bytePtr[2] << 8) + | ((xxh_u32)bytePtr[1] << 16) + | ((xxh_u32)bytePtr[0] << 24); +} + +#else +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); +} + +static xxh_u32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u32 +XXH_readLE32_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) { + return XXH_readLE32(ptr); + } else { + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); + } +} + + +/* ************************************* +* Misc +***************************************/ +/*! @ingroup public */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +/*! + * @} + * @defgroup XXH32_impl XXH32 implementation + * @ingroup impl + * + * Details on the XXH32 implementation. + * @{ + */ + /* #define instead of static const, to be used as initializers */ +#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ +#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ +#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ +#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ +#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ + +#ifdef XXH_OLD_NAMES +# define PRIME32_1 XXH_PRIME32_1 +# define PRIME32_2 XXH_PRIME32_2 +# define PRIME32_3 XXH_PRIME32_3 +# define PRIME32_4 XXH_PRIME32_4 +# define PRIME32_5 XXH_PRIME32_5 +#endif + +/*! + * @internal + * @brief Normal stripe processing routine. + * + * This shuffles the bits so that any bit from @p input impacts several bits in + * @p acc. + * + * @param acc The accumulator lane. + * @param input The stripe of input to mix. + * @return The mixed accumulator lane. + */ +static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) +{ + acc += input * XXH_PRIME32_2; + acc = XXH_rotl32(acc, 13); + acc *= XXH_PRIME32_1; +#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) + /* + * UGLY HACK: + * A compiler fence is the only thing that prevents GCC and Clang from + * autovectorizing the XXH32 loop (pragmas and attributes don't work for some + * reason) without globally disabling SSE4.1. + * + * The reason we want to avoid vectorization is because despite working on + * 4 integers at a time, there are multiple factors slowing XXH32 down on + * SSE4: + * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on + * newer chips!) making it slightly slower to multiply four integers at + * once compared to four integers independently. Even when pmulld was + * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE + * just to multiply unless doing a long operation. + * + * - Four instructions are required to rotate, + * movqda tmp, v // not required with VEX encoding + * pslld tmp, 13 // tmp <<= 13 + * psrld v, 19 // x >>= 19 + * por v, tmp // x |= tmp + * compared to one for scalar: + * roll v, 13 // reliably fast across the board + * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason + * + * - Instruction level parallelism is actually more beneficial here because + * the SIMD actually serializes this operation: While v1 is rotating, v2 + * can load data, while v3 can multiply. SSE forces them to operate + * together. + * + * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing + * the loop. NEON is only faster on the A53, and with the newer cores, it is less + * than half the speed. + * + * Additionally, this is used on WASM SIMD128 because it JITs to the same + * SIMD instructions and has the same issue. + */ + XXH_COMPILER_GUARD(acc); +#endif + return acc; +} + +/*! + * @internal + * @brief Mixes all bits to finalize the hash. + * + * The final mix ensures that all input bits have a chance to impact any bit in + * the output digest, resulting in an unbiased distribution. + * + * @param hash The hash to avalanche. + * @return The avalanched hash. + */ +static xxh_u32 XXH32_avalanche(xxh_u32 hash) +{ + hash ^= hash >> 15; + hash *= XXH_PRIME32_2; + hash ^= hash >> 13; + hash *= XXH_PRIME32_3; + hash ^= hash >> 16; + return hash; +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-15 bytes of @p ptr. + * + * There may be up to 15 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 16. + * @param align Whether @p ptr is aligned. + * @return The finalized hash. + * @see XXH64_finalize(). + */ +static XXH_PUREF xxh_u32 +XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ +#define XXH_PROCESS1 do { \ + hash += (*ptr++) * XXH_PRIME32_5; \ + hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ +} while (0) + +#define XXH_PROCESS4 do { \ + hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ +} while (0) + + if (ptr==NULL) XXH_ASSERT(len == 0); + + /* Compact rerolled version; generally faster */ + if (!XXH32_ENDJMP) { + len &= 15; + while (len >= 4) { + XXH_PROCESS4; + len -= 4; + } + while (len > 0) { + XXH_PROCESS1; + --len; + } + return XXH32_avalanche(hash); + } else { + switch(len&15) /* or switch(bEnd - p) */ { + case 12: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 8: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 4: XXH_PROCESS4; + return XXH32_avalanche(hash); + + case 13: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 9: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 5: XXH_PROCESS4; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 14: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 10: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 6: XXH_PROCESS4; + XXH_PROCESS1; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 15: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 11: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 7: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 3: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 2: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 1: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 0: return XXH32_avalanche(hash); + } + XXH_ASSERT(0); + return hash; /* reaching this point is deemed impossible */ + } +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1 XXH_PROCESS1 +# define PROCESS4 XXH_PROCESS4 +#else +# undef XXH_PROCESS1 +# undef XXH_PROCESS4 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH32(). + * + * @param input , len , seed Directly passed from @ref XXH32(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u32 +XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) +{ + xxh_u32 h32; + + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=16) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 15; + xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + xxh_u32 v2 = seed + XXH_PRIME32_2; + xxh_u32 v3 = seed + 0; + xxh_u32 v4 = seed - XXH_PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; + v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; + v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; + v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; + } while (input < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + XXH_PRIME32_5; + } + + h32 += (xxh_u32)len; + + return XXH32_finalize(h32, input, len&15, align); +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) +{ +#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, (const xxh_u8*)input, len); + return XXH32_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); +#endif +} + + + +/******* Hash streaming *******/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + statePtr->v[1] = seed + XXH_PRIME32_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME32_1; + return XXH_OK; +} + + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode +XXH32_update(XXH32_state_t* state, const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len_32 += (XXH32_hash_t)len; + state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); + state->memsize += (XXH32_hash_t)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const xxh_u32* p32 = state->mem32; + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const xxh_u8* const limit = bEnd - 16; + + do { + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) +{ + xxh_u32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v[0], 1) + + XXH_rotl32(state->v[1], 7) + + XXH_rotl32(state->v[2], 12) + + XXH_rotl32(state->v[3], 18); + } else { + h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); +} +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ +/*! + * @} + * @ingroup impl + * @{ + */ +/******* Memory access *******/ + +typedef XXH64_hash_t xxh_u64; + +#ifdef XXH_OLD_NAMES +# define U64 xxh_u64 +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE64 and XXH_readBE64. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + return *(const xxh_u64*) memPtr; +} + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; +#endif +static xxh_u64 XXH_read64(const void* ptr) +{ + typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; + return *((const xxh_unalign64*)ptr); +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + xxh_u64 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static xxh_u64 XXH_swap64(xxh_u64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + + +/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u64)bytePtr[1] << 8) + | ((xxh_u64)bytePtr[2] << 16) + | ((xxh_u64)bytePtr[3] << 24) + | ((xxh_u64)bytePtr[4] << 32) + | ((xxh_u64)bytePtr[5] << 40) + | ((xxh_u64)bytePtr[6] << 48) + | ((xxh_u64)bytePtr[7] << 56); +} + +XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[7] + | ((xxh_u64)bytePtr[6] << 8) + | ((xxh_u64)bytePtr[5] << 16) + | ((xxh_u64)bytePtr[4] << 24) + | ((xxh_u64)bytePtr[3] << 32) + | ((xxh_u64)bytePtr[2] << 40) + | ((xxh_u64)bytePtr[1] << 48) + | ((xxh_u64)bytePtr[0] << 56); +} + +#else +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); +} + +static xxh_u64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u64 +XXH_readLE64_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) + return XXH_readLE64(ptr); + else + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); +} + + +/******* xxh64 *******/ +/*! + * @} + * @defgroup XXH64_impl XXH64 implementation + * @ingroup impl + * + * Details on the XXH64 implementation. + * @{ + */ +/* #define rather that static const, to be used as initializers */ +#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ +#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ +#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ +#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ +#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ + +#ifdef XXH_OLD_NAMES +# define PRIME64_1 XXH_PRIME64_1 +# define PRIME64_2 XXH_PRIME64_2 +# define PRIME64_3 XXH_PRIME64_3 +# define PRIME64_4 XXH_PRIME64_4 +# define PRIME64_5 XXH_PRIME64_5 +#endif + +/*! @copydoc XXH32_round */ +static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) +{ + acc += input * XXH_PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= XXH_PRIME64_1; +#if (defined(__AVX512F__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) + /* + * DISABLE AUTOVECTORIZATION: + * A compiler fence is used to prevent GCC and Clang from + * autovectorizing the XXH64 loop (pragmas and attributes don't work for some + * reason) without globally disabling AVX512. + * + * Autovectorization of XXH64 tends to be detrimental, + * though the exact outcome may change depending on exact cpu and compiler version. + * For information, it has been reported as detrimental for Skylake-X, + * but possibly beneficial for Zen4. + * + * The default is to disable auto-vectorization, + * but you can select to enable it instead using `XXH_ENABLE_AUTOVECTORIZE` build variable. + */ + XXH_COMPILER_GUARD(acc); +#endif + return acc; +} + +static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; + return acc; +} + +/*! @copydoc XXH32_avalanche */ +static xxh_u64 XXH64_avalanche(xxh_u64 hash) +{ + hash ^= hash >> 33; + hash *= XXH_PRIME64_2; + hash ^= hash >> 29; + hash *= XXH_PRIME64_3; + hash ^= hash >> 32; + return hash; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-31 bytes of @p ptr. + * + * There may be up to 31 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 32. + * @param align Whether @p ptr is aligned. + * @return The finalized hash + * @see XXH32_finalize(). + */ +static XXH_PUREF xxh_u64 +XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ + if (ptr==NULL) XXH_ASSERT(len == 0); + len &= 31; + while (len >= 8) { + xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); + ptr += 8; + hash ^= k1; + hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4; + len -= 8; + } + if (len >= 4) { + hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; + ptr += 4; + hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + len -= 4; + } + while (len > 0) { + hash ^= (*ptr++) * XXH_PRIME64_5; + hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; + --len; + } + return XXH64_avalanche(hash); +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1_64 XXH_PROCESS1_64 +# define PROCESS4_64 XXH_PROCESS4_64 +# define PROCESS8_64 XXH_PROCESS8_64 +#else +# undef XXH_PROCESS1_64 +# undef XXH_PROCESS4_64 +# undef XXH_PROCESS8_64 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH64(). + * + * @param input , len , seed Directly passed from @ref XXH64(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u64 +XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) +{ + xxh_u64 h64; + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=32) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 31; + xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + xxh_u64 v2 = seed + XXH_PRIME64_2; + xxh_u64 v3 = seed + 0; + xxh_u64 v4 = seed - XXH_PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; + v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; + v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; + v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; + } while (input= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, (const xxh_u8*)input, len); + return XXH64_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); + +#endif +} + +/******* Hash Streaming *******/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH64_family*/ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + statePtr->v[1] = seed + XXH_PRIME64_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME64_1; + return XXH_OK; +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode +XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); + state->memsize += (xxh_u32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3)); + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const xxh_u8* const limit = bEnd - 32; + + do { + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8; + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8; + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8; + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state) +{ + xxh_u64 h64; + + if (state->total_len >= 32) { + h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); + } else { + h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; + } + + h64 += (xxh_u64) state->total_len; + + return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); +} +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#if defined (__cplusplus) +} +#endif + +#ifndef XXH_NO_XXH3 + +/* ********************************************************************* +* XXH3 +* New generation hash designed for speed on small keys and vectorization +************************************************************************ */ +/*! + * @} + * @defgroup XXH3_impl XXH3 implementation + * @ingroup impl + * @{ + */ + +/* === Compiler specifics === */ + +#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ +# define XXH_RESTRICT /* disable */ +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ +# define XXH_RESTRICT restrict +#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \ + || (defined (__clang__)) \ + || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \ + || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) +/* + * There are a LOT more compilers that recognize __restrict but this + * covers the major ones. + */ +# define XXH_RESTRICT __restrict +#else +# define XXH_RESTRICT /* disable */ +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) \ + || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ + || defined(__clang__) +# define XXH_likely(x) __builtin_expect(x, 1) +# define XXH_unlikely(x) __builtin_expect(x, 0) +#else +# define XXH_likely(x) (x) +# define XXH_unlikely(x) (x) +#endif + +#ifndef XXH_HAS_INCLUDE +# ifdef __has_include +/* + * Not defined as XXH_HAS_INCLUDE(x) (function-like) because + * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion) + */ +# define XXH_HAS_INCLUDE __has_include +# else +# define XXH_HAS_INCLUDE(x) 0 +# endif +#endif + +#if defined(__GNUC__) || defined(__clang__) +# if defined(__ARM_FEATURE_SVE) +# include +# endif +# if defined(__ARM_NEON__) || defined(__ARM_NEON) \ + || (defined(_M_ARM) && _M_ARM >= 7) \ + || defined(_M_ARM64) || defined(_M_ARM64EC) \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* WASM SIMD128 via SIMDe */ +# define inline __inline__ /* circumvent a clang bug */ +# include +# undef inline +# elif defined(__AVX2__) +# include +# elif defined(__SSE2__) +# include +# endif +#endif + +#if defined(_MSC_VER) +# include +#endif + +/* + * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while + * remaining a true 64-bit/128-bit hash function. + * + * This is done by prioritizing a subset of 64-bit operations that can be + * emulated without too many steps on the average 32-bit machine. + * + * For example, these two lines seem similar, and run equally fast on 64-bit: + * + * xxh_u64 x; + * x ^= (x >> 47); // good + * x ^= (x >> 13); // bad + * + * However, to a 32-bit machine, there is a major difference. + * + * x ^= (x >> 47) looks like this: + * + * x.lo ^= (x.hi >> (47 - 32)); + * + * while x ^= (x >> 13) looks like this: + * + * // note: funnel shifts are not usually cheap. + * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); + * x.hi ^= (x.hi >> 13); + * + * The first one is significantly faster than the second, simply because the + * shift is larger than 32. This means: + * - All the bits we need are in the upper 32 bits, so we can ignore the lower + * 32 bits in the shift. + * - The shift result will always fit in the lower 32 bits, and therefore, + * we can ignore the upper 32 bits in the xor. + * + * Thanks to this optimization, XXH3 only requires these features to be efficient: + * + * - Usable unaligned access + * - A 32-bit or 64-bit ALU + * - If 32-bit, a decent ADC instruction + * - A 32 or 64-bit multiply with a 64-bit result + * - For the 128-bit variant, a decent byteswap helps short inputs. + * + * The first two are already required by XXH32, and almost all 32-bit and 64-bit + * platforms which can run XXH32 can run XXH3 efficiently. + * + * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one + * notable exception. + * + * First of all, Thumb-1 lacks support for the UMULL instruction which + * performs the important long multiply. This means numerous __aeabi_lmul + * calls. + * + * Second of all, the 8 functional registers are just not enough. + * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need + * Lo registers, and this shuffling results in thousands more MOVs than A32. + * + * A32 and T32 don't have this limitation. They can access all 14 registers, + * do a 32->64 multiply with UMULL, and the flexible operand allowing free + * shifts is helpful, too. + * + * Therefore, we do a quick sanity check. + * + * If compiling Thumb-1 for a target which supports ARM instructions, we will + * emit a warning, as it is not a "sane" platform to compile for. + * + * Usually, if this happens, it is because of an accident and you probably need + * to specify -march, as you likely meant to compile for a newer architecture. + * + * Credit: large sections of the vectorial and asm source code paths + * have been contributed by @easyaspi314 + */ +#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) +# warning "XXH3 is highly inefficient without ARM or Thumb-2." +#endif + +/* ========================================== + * Vectorization detection + * ========================================== */ + +#ifdef XXH_DOXYGEN +/*! + * @ingroup tuning + * @brief Overrides the vectorization implementation chosen for XXH3. + * + * Can be defined to 0 to disable SIMD or any of the values mentioned in + * @ref XXH_VECTOR_TYPE. + * + * If this is not defined, it uses predefined macros to determine the best + * implementation. + */ +# define XXH_VECTOR XXH_SCALAR +/*! + * @ingroup tuning + * @brief Possible values for @ref XXH_VECTOR. + * + * Note that these are actually implemented as macros. + * + * If this is not defined, it is detected automatically. + * internal macro XXH_X86DISPATCH overrides this. + */ +enum XXH_VECTOR_TYPE /* fake enum */ { + XXH_SCALAR = 0, /*!< Portable scalar version */ + XXH_SSE2 = 1, /*!< + * SSE2 for Pentium 4, Opteron, all x86_64. + * + * @note SSE2 is also guaranteed on Windows 10, macOS, and + * Android x86. + */ + XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ + XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ + XXH_NEON = 4, /*!< + * NEON for most ARMv7-A, all AArch64, and WASM SIMD128 + * via the SIMDeverywhere polyfill provided with the + * Emscripten SDK. + */ + XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ + XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */ +}; +/*! + * @ingroup tuning + * @brief Selects the minimum alignment for XXH3's accumulators. + * + * When using SIMD, this should match the alignment required for said vector + * type, so, for example, 32 for AVX2. + * + * Default: Auto detected. + */ +# define XXH_ACC_ALIGN 8 +#endif + +/* Actual definition */ +#ifndef XXH_DOXYGEN +# define XXH_SCALAR 0 +# define XXH_SSE2 1 +# define XXH_AVX2 2 +# define XXH_AVX512 3 +# define XXH_NEON 4 +# define XXH_VSX 5 +# define XXH_SVE 6 +#endif + +#ifndef XXH_VECTOR /* can be defined on command line */ +# if defined(__ARM_FEATURE_SVE) +# define XXH_VECTOR XXH_SVE +# elif ( \ + defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ + || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* wasm simd128 via SIMDe */ \ + ) && ( \ + defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ + ) +# define XXH_VECTOR XXH_NEON +# elif defined(__AVX512F__) +# define XXH_VECTOR XXH_AVX512 +# elif defined(__AVX2__) +# define XXH_VECTOR XXH_AVX2 +# elif defined(__SSE2__) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) +# define XXH_VECTOR XXH_SSE2 +# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ + || (defined(__s390x__) && defined(__VEC__)) \ + && defined(__GNUC__) /* TODO: IBM XL */ +# define XXH_VECTOR XXH_VSX +# else +# define XXH_VECTOR XXH_SCALAR +# endif +#endif + +/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ +#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) +# ifdef _MSC_VER +# pragma warning(once : 4606) +# else +# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." +# endif +# undef XXH_VECTOR +# define XXH_VECTOR XXH_SCALAR +#endif + +/* + * Controls the alignment of the accumulator, + * for compatibility with aligned vector loads, which are usually faster. + */ +#ifndef XXH_ACC_ALIGN +# if defined(XXH_X86DISPATCH) +# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ +# elif XXH_VECTOR == XXH_SCALAR /* scalar */ +# define XXH_ACC_ALIGN 8 +# elif XXH_VECTOR == XXH_SSE2 /* sse2 */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX2 /* avx2 */ +# define XXH_ACC_ALIGN 32 +# elif XXH_VECTOR == XXH_NEON /* neon */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_VSX /* vsx */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX512 /* avx512 */ +# define XXH_ACC_ALIGN 64 +# elif XXH_VECTOR == XXH_SVE /* sve */ +# define XXH_ACC_ALIGN 64 +# endif +#endif + +#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ + || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#elif XXH_VECTOR == XXH_SVE +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#else +# define XXH_SEC_ALIGN 8 +#endif + +#if defined(__GNUC__) || defined(__clang__) +# define XXH_ALIASING __attribute__((may_alias)) +#else +# define XXH_ALIASING /* nothing */ +#endif + +/* + * UGLY HACK: + * GCC usually generates the best code with -O3 for xxHash. + * + * However, when targeting AVX2, it is overzealous in its unrolling resulting + * in code roughly 3/4 the speed of Clang. + * + * There are other issues, such as GCC splitting _mm256_loadu_si256 into + * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which + * only applies to Sandy and Ivy Bridge... which don't even support AVX2. + * + * That is why when compiling the AVX2 version, it is recommended to use either + * -O2 -mavx2 -march=haswell + * or + * -O2 -mavx2 -mno-avx256-split-unaligned-load + * for decent performance, or to use Clang instead. + * + * Fortunately, we can control the first one with a pragma that forces GCC into + * -O2, but the other one we can't control without "failed to inline always + * inline function due to target mismatch" warnings. + */ +#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ + && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ +# pragma GCC push_options +# pragma GCC optimize("-O2") +#endif + +#if defined (__cplusplus) +extern "C" { +#endif + +#if XXH_VECTOR == XXH_NEON + +/* + * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3 + * optimizes out the entire hashLong loop because of the aliasing violation. + * + * However, GCC is also inefficient at load-store optimization with vld1q/vst1q, + * so the only option is to mark it as aliasing. + */ +typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING; + +/*! + * @internal + * @brief `vld1q_u64` but faster and alignment-safe. + * + * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only + * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86). + * + * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it + * prohibits load-store optimizations. Therefore, a direct dereference is used. + * + * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe + * unaligned load. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */ +{ + return *(xxh_aliasing_uint64x2_t const *)ptr; +} +#else +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) +{ + return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr)); +} +#endif + +/*! + * @internal + * @brief `vmlal_u32` on low and high halves of a vector. + * + * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with + * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32` + * with `vmlal_u32`. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11 +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* Inline assembly is the only way */ + __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs)); + return acc; +} +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* This intrinsic works as expected */ + return vmlal_high_u32(acc, lhs, rhs); +} +#else +/* Portable intrinsic versions */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); +} +/*! @copydoc XXH_vmlal_low_u32 + * Assume the compiler converts this to vmlal_high_u32 on aarch64 */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); +} +#endif + +/*! + * @ingroup tuning + * @brief Controls the NEON to scalar ratio for XXH3 + * + * This can be set to 2, 4, 6, or 8. + * + * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. + * + * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those + * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU + * bandwidth. + * + * This is even more noticeable on the more advanced cores like the Cortex-A76 which + * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. + * + * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes + * and 2 scalar lanes, which is chosen by default. + * + * This does not apply to Apple processors or 32-bit processors, which run better with + * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes. + * + * This change benefits CPUs with large micro-op buffers without negatively affecting + * most other CPUs: + * + * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. | + * |:----------------------|:--------------------|----------:|-----------:|------:| + * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% | + * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% | + * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | + * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | + * + * It also seems to fix some bad codegen on GCC, making it almost as fast as clang. + * + * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning + * it effectively becomes worse 4. + * + * @see XXH3_accumulate_512_neon() + */ +# ifndef XXH3_NEON_LANES +# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \ + && !defined(__APPLE__) && XXH_SIZE_OPT <= 0 +# define XXH3_NEON_LANES 6 +# else +# define XXH3_NEON_LANES XXH_ACC_NB +# endif +# endif +#endif /* XXH_VECTOR == XXH_NEON */ + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + +/* + * VSX and Z Vector helpers. + * + * This is very messy, and any pull requests to clean this up are welcome. + * + * There are a lot of problems with supporting VSX and s390x, due to + * inconsistent intrinsics, spotty coverage, and multiple endiannesses. + */ +#if XXH_VECTOR == XXH_VSX +/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`, + * and `pixel`. This is a problem for obvious reasons. + * + * These keywords are unnecessary; the spec literally says they are + * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd + * after including the header. + * + * We use pragma push_macro/pop_macro to keep the namespace clean. */ +# pragma push_macro("bool") +# pragma push_macro("vector") +# pragma push_macro("pixel") +/* silence potential macro redefined warnings */ +# undef bool +# undef vector +# undef pixel + +# if defined(__s390x__) +# include +# else +# include +# endif + +/* Restore the original macro values, if applicable. */ +# pragma pop_macro("pixel") +# pragma pop_macro("vector") +# pragma pop_macro("bool") + +typedef __vector unsigned long long xxh_u64x2; +typedef __vector unsigned char xxh_u8x16; +typedef __vector unsigned xxh_u32x4; + +/* + * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue. + */ +typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING; + +# ifndef XXH_VSX_BE +# if defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_VSX_BE 1 +# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ +# warning "-maltivec=be is not recommended. Please use native endianness." +# define XXH_VSX_BE 1 +# else +# define XXH_VSX_BE 0 +# endif +# endif /* !defined(XXH_VSX_BE) */ + +# if XXH_VSX_BE +# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) +# define XXH_vec_revb vec_revb +# else +#if defined (__cplusplus) +extern "C" { +#endif +/*! + * A polyfill for POWER9's vec_revb(). + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) +{ + xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, + 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; + return vec_perm(val, val, vByteSwap); +} +#if defined (__cplusplus) +} /* extern "C" */ +#endif +# endif +# endif /* XXH_VSX_BE */ + +#if defined (__cplusplus) +extern "C" { +#endif +/*! + * Performs an unaligned vector load and byte swaps it on big endian. + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) +{ + xxh_u64x2 ret; + XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); +# if XXH_VSX_BE + ret = XXH_vec_revb(ret); +# endif + return ret; +} + +/* + * vec_mulo and vec_mule are very problematic intrinsics on PowerPC + * + * These intrinsics weren't added until GCC 8, despite existing for a while, + * and they are endian dependent. Also, their meaning swap depending on version. + * */ +# if defined(__s390x__) + /* s390x is always big endian, no issue on this platform */ +# define XXH_vec_mulo vec_mulo +# define XXH_vec_mule vec_mule +# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) +/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ + /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */ +# define XXH_vec_mulo __builtin_altivec_vmulouw +# define XXH_vec_mule __builtin_altivec_vmuleuw +# else +/* gcc needs inline assembly */ +/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +# endif /* XXH_vec_mulo, XXH_vec_mule */ + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + +#endif /* XXH_VECTOR == XXH_VSX */ + +#if XXH_VECTOR == XXH_SVE +#define ACCRND(acc, offset) \ +do { \ + svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ + svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ + svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ + svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ + svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ + svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ + svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ + acc = svadd_u64_x(mask, acc, mul); \ +} while (0) +#endif /* XXH_VECTOR == XXH_SVE */ + +/* prefetch + * can be disabled, by declaring XXH_NO_PREFETCH build macro */ +#if defined(XXH_NO_PREFETCH) +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +#else +# if XXH_SIZE_OPT >= 1 +# define XXH_PREFETCH(ptr) (void)(ptr) +# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ +# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) +# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +# else +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +# endif +#endif /* XXH_NO_PREFETCH */ + +#if defined (__cplusplus) +extern "C" { +#endif +/* ========================================== + * XXH3 default settings + * ========================================== */ + +#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ + +#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) +# error "default keyset is not large enough" +#endif + +/*! Pseudorandom secret taken directly from FARSH. */ +XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { + 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, + 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, + 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, + 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, + 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, + 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, + 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, + 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, + 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, + 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, + 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, + 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, +}; + +static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */ +static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */ + +#ifdef XXH_OLD_NAMES +# define kSecret XXH3_kSecret +#endif + +#ifdef XXH_DOXYGEN +/*! + * @brief Calculates a 32-bit to 64-bit long multiply. + * + * Implemented as a macro. + * + * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't + * need to (but it shouldn't need to anyways, it is about 7 instructions to do + * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we + * use that instead of the normal method. + * + * If you are compiling for platforms like Thumb-1 and don't have a better option, + * you may also want to write your own long multiply routine here. + * + * @param x, y Numbers to be multiplied + * @return 64-bit product of the low 32 bits of @p x and @p y. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64(xxh_u64 x, xxh_u64 y) +{ + return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); +} +#elif defined(_MSC_VER) && defined(_M_IX86) +# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) +#else +/* + * Downcast + upcast is usually better than masking on older compilers like + * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. + * + * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands + * and perform a full 64x64 multiply -- entirely redundant on 32-bit. + */ +# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) +#endif + +/*! + * @brief Calculates a 64->128-bit long multiply. + * + * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar + * version. + * + * @param lhs , rhs The 64-bit integers to be multiplied + * @return The 128-bit result represented in an @ref XXH128_hash_t. + */ +static XXH128_hash_t +XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) +{ + /* + * GCC/Clang __uint128_t method. + * + * On most 64-bit targets, GCC and Clang define a __uint128_t type. + * This is usually the best way as it usually uses a native long 64-bit + * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. + * + * Usually. + * + * Despite being a 32-bit platform, Clang (and emscripten) define this type + * despite not having the arithmetic for it. This results in a laggy + * compiler builtin call which calculates a full 128-bit multiply. + * In that case it is best to use the portable one. + * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 + */ +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \ + && defined(__SIZEOF_INT128__) \ + || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + + __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; + XXH128_hash_t r128; + r128.low64 = (xxh_u64)(product); + r128.high64 = (xxh_u64)(product >> 64); + return r128; + + /* + * MSVC for x64's _umul128 method. + * + * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); + * + * This compiles to single operand MUL on x64. + */ +#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(_umul128) +#endif + xxh_u64 product_high; + xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); + XXH128_hash_t r128; + r128.low64 = product_low; + r128.high64 = product_high; + return r128; + + /* + * MSVC for ARM64's __umulh method. + * + * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method. + */ +#elif defined(_M_ARM64) || defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(__umulh) +#endif + XXH128_hash_t r128; + r128.low64 = lhs * rhs; + r128.high64 = __umulh(lhs, rhs); + return r128; + +#else + /* + * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. + * + * This is a fast and simple grade school multiply, which is shown below + * with base 10 arithmetic instead of base 0x100000000. + * + * 9 3 // D2 lhs = 93 + * x 7 5 // D2 rhs = 75 + * ---------- + * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 + * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 + * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 + * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 + * --------- + * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 + * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 + * --------- + * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 + * + * The reasons for adding the products like this are: + * 1. It avoids manual carry tracking. Just like how + * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. + * This avoids a lot of complexity. + * + * 2. It hints for, and on Clang, compiles to, the powerful UMAAL + * instruction available in ARM's Digital Signal Processing extension + * in 32-bit ARMv6 and later, which is shown below: + * + * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) + * { + * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; + * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); + * *RdHi = (xxh_u32)(product >> 32); + * } + * + * This instruction was designed for efficient long multiplication, and + * allows this to be calculated in only 4 instructions at speeds + * comparable to some 64-bit ALUs. + * + * 3. It isn't terrible on other platforms. Usually this will be a couple + * of 32-bit ADD/ADCs. + */ + + /* First calculate all of the cross products. */ + xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); + xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); + xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); + xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); + + /* Now add the products together. These will never overflow. */ + xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; + xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; + xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); + + XXH128_hash_t r128; + r128.low64 = lower; + r128.high64 = upper; + return r128; +#endif +} + +/*! + * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. + * + * The reason for the separate function is to prevent passing too many structs + * around by value. This will hopefully inline the multiply, but we don't force it. + * + * @param lhs , rhs The 64-bit integers to multiply + * @return The low 64 bits of the product XOR'd by the high 64 bits. + * @see XXH_mult64to128() + */ +static xxh_u64 +XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) +{ + XXH128_hash_t product = XXH_mult64to128(lhs, rhs); + return product.low64 ^ product.high64; +} + +/*! Seems to produce slightly better code on GCC for some reason. */ +XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) +{ + XXH_ASSERT(0 <= shift && shift < 64); + return v64 ^ (v64 >> shift); +} + +/* + * This is a fast avalanche stage, + * suitable when input bits are already partially mixed + */ +static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) +{ + h64 = XXH_xorshift64(h64, 37); + h64 *= PRIME_MX1; + h64 = XXH_xorshift64(h64, 32); + return h64; +} + +/* + * This is a stronger avalanche, + * inspired by Pelle Evensen's rrmxmx + * preferable when input has not been previously mixed + */ +static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) +{ + /* this mix is inspired by Pelle Evensen's rrmxmx */ + h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); + h64 *= PRIME_MX2; + h64 ^= (h64 >> 35) + len ; + h64 *= PRIME_MX2; + return XXH_xorshift64(h64, 28); +} + + +/* ========================================== + * Short keys + * ========================================== + * One of the shortcomings of XXH32 and XXH64 was that their performance was + * sub-optimal on short lengths. It used an iterative algorithm which strongly + * favored lengths that were a multiple of 4 or 8. + * + * Instead of iterating over individual inputs, we use a set of single shot + * functions which piece together a range of lengths and operate in constant time. + * + * Additionally, the number of multiplies has been significantly reduced. This + * reduces latency, especially when emulating 64-bit multiplies on 32-bit. + * + * Depending on the platform, this may or may not be faster than XXH32, but it + * is almost guaranteed to be faster than XXH64. + */ + +/* + * At very short lengths, there isn't enough input to fully hide secrets, or use + * the entire secret. + * + * There is also only a limited amount of mixing we can do before significantly + * impacting performance. + * + * Therefore, we use different sections of the secret and always mix two secret + * samples with an XOR. This should have no effect on performance on the + * seedless or withSeed variants because everything _should_ be constant folded + * by modern compilers. + * + * The XOR mixing hides individual parts of the secret and increases entropy. + * + * This adds an extra layer of strength for custom secrets. + */ +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combined = { input[0], 0x01, input[0], input[0] } + * len = 2: combined = { input[1], 0x02, input[0], input[1] } + * len = 3: combined = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; + return XXH64_avalanche(keyed); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input1 = XXH_readLE32(input); + xxh_u32 const input2 = XXH_readLE32(input + len - 4); + xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; + xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); + xxh_u64 const keyed = input64 ^ bitflip; + return XXH3_rrmxmx(keyed, len); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; + xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; + xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; + xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; + xxh_u64 const acc = len + + XXH_swap64(input_lo) + input_hi + + XXH3_mul128_fold64(input_lo, input_hi); + return XXH3_avalanche(acc); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); + if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); + if (len) return XXH3_len_1to3_64b(input, len, secret, seed); + return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); + } +} + +/* + * DISCLAIMER: There are known *seed-dependent* multicollisions here due to + * multiplication by zero, affecting hashes of lengths 17 to 240. + * + * However, they are very unlikely. + * + * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all + * unseeded non-cryptographic hashes, it does not attempt to defend itself + * against specially crafted inputs, only random inputs. + * + * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes + * cancelling out the secret is taken an arbitrary number of times (addressed + * in XXH3_accumulate_512), this collision is very unlikely with random inputs + * and/or proper seeding: + * + * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a + * function that is only called up to 16 times per hash with up to 240 bytes of + * input. + * + * This is not too bad for a non-cryptographic hash function, especially with + * only 64 bit outputs. + * + * The 128-bit variant (which trades some speed for strength) is NOT affected + * by this, although it is always a good idea to use a proper seed if you care + * about strength. + */ +XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) +{ +#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ + /* + * UGLY HACK: + * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in + * slower code. + * + * By forcing seed64 into a register, we disrupt the cost model and + * cause it to scalarize. See `XXH32_round()` + * + * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, + * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on + * GCC 9.2, despite both emitting scalar code. + * + * GCC generates much better scalar code than Clang for the rest of XXH3, + * which is why finding a more optimal codepath is an interest. + */ + XXH_COMPILER_GUARD(seed64); +#endif + { xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 const input_hi = XXH_readLE64(input+8); + return XXH3_mul128_fold64( + input_lo ^ (XXH_readLE64(secret) + seed64), + input_hi ^ (XXH_readLE64(secret+8) - seed64) + ); + } +} + +/* For mid range keys, XXH3 uses a Mum-hash variant. */ +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { xxh_u64 acc = len * XXH_PRIME64_1; +#if XXH_SIZE_OPT >= 1 + /* Smaller and cleaner, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc += XXH3_mix16B(input+16 * i, secret+32*i, seed); + acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed); + } while (i-- != 0); +#else + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc += XXH3_mix16B(input+48, secret+96, seed); + acc += XXH3_mix16B(input+len-64, secret+112, seed); + } + acc += XXH3_mix16B(input+32, secret+64, seed); + acc += XXH3_mix16B(input+len-48, secret+80, seed); + } + acc += XXH3_mix16B(input+16, secret+32, seed); + acc += XXH3_mix16B(input+len-32, secret+48, seed); + } + acc += XXH3_mix16B(input+0, secret+0, seed); + acc += XXH3_mix16B(input+len-16, secret+16, seed); +#endif + return XXH3_avalanche(acc); + } +} + +/*! + * @brief Maximum size of "short" key in bytes. + */ +#define XXH3_MIDSIZE_MAX 240 + +XXH_NO_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + #define XXH3_MIDSIZE_STARTOFFSET 3 + #define XXH3_MIDSIZE_LASTOFFSET 17 + + { xxh_u64 acc = len * XXH_PRIME64_1; + xxh_u64 acc_end; + unsigned int const nbRounds = (unsigned int)len / 16; + unsigned int i; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + for (i=0; i<8; i++) { + acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); + } + /* last bytes */ + acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); + XXH_ASSERT(nbRounds >= 8); + acc = XXH3_avalanche(acc); +#if defined(__clang__) /* Clang */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. + * In everywhere else, it uses scalar code. + * + * For 64->128-bit multiplies, even if the NEON was 100% optimal, it + * would still be slower than UMAAL (see XXH_mult64to128). + * + * Unfortunately, Clang doesn't handle the long multiplies properly and + * converts them to the nonexistent "vmulq_u64" intrinsic, which is then + * scalarized into an ugly mess of VMOV.32 instructions. + * + * This mess is difficult to avoid without turning autovectorization + * off completely, but they are usually relatively minor and/or not + * worth it to fix. + * + * This loop is the easiest to fix, as unlike XXH32, this pragma + * _actually works_ because it is a loop vectorization instead of an + * SLP vectorization. + */ + #pragma clang loop vectorize(disable) +#endif + for (i=8 ; i < nbRounds; i++) { + /* + * Prevents clang for unrolling the acc loop and interleaving with this one. + */ + XXH_COMPILER_GUARD(acc); + acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); + } + return XXH3_avalanche(acc + acc_end); + } +} + + +/* ======= Long Keys ======= */ + +#define XXH_STRIPE_LEN 64 +#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ +#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) + +#ifdef XXH_OLD_NAMES +# define STRIPE_LEN XXH_STRIPE_LEN +# define ACC_NB XXH_ACC_NB +#endif + +#ifndef XXH_PREFETCH_DIST +# ifdef __clang__ +# define XXH_PREFETCH_DIST 320 +# else +# if (XXH_VECTOR == XXH_AVX512) +# define XXH_PREFETCH_DIST 512 +# else +# define XXH_PREFETCH_DIST 384 +# endif +# endif /* __clang__ */ +#endif /* XXH_PREFETCH_DIST */ + +/* + * These macros are to generate an XXH3_accumulate() function. + * The two arguments select the name suffix and target attribute. + * + * The name of this symbol is XXH3_accumulate_() and it calls + * XXH3_accumulate_512_(). + * + * It may be useful to hand implement this function if the compiler fails to + * optimize the inline function. + */ +#define XXH3_ACCUMULATE_TEMPLATE(name) \ +void \ +XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \ + const xxh_u8* XXH_RESTRICT input, \ + const xxh_u8* XXH_RESTRICT secret, \ + size_t nbStripes) \ +{ \ + size_t n; \ + for (n = 0; n < nbStripes; n++ ) { \ + const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \ + XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ + XXH3_accumulate_512_##name( \ + acc, \ + in, \ + secret + n*XXH_SECRET_CONSUME_RATE); \ + } \ +} + + +XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) +{ + if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); + XXH_memcpy(dst, &v64, sizeof(v64)); +} + +/* Several intrinsic functions below are supposed to accept __int64 as argument, + * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . + * However, several environments do not define __int64 type, + * requiring a workaround. + */ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) + typedef int64_t xxh_i64; +#else + /* the following type must have a width of 64-bit */ + typedef long long xxh_i64; +#endif + + +/* + * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. + * + * It is a hardened version of UMAC, based off of FARSH's implementation. + * + * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD + * implementations, and it is ridiculously fast. + * + * We harden it by mixing the original input to the accumulators as well as the product. + * + * This means that in the (relatively likely) case of a multiply by zero, the + * original input is preserved. + * + * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve + * cross-pollination, as otherwise the upper and lower halves would be + * essentially independent. + * + * This doesn't matter on 64-bit hashes since they all get merged together in + * the end, so we skip the extra step. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +#if (XXH_VECTOR == XXH_AVX512) \ + || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) + +#ifndef XXH_TARGET_AVX512 +# define XXH_TARGET_AVX512 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + __m512i* const xacc = (__m512i *) acc; + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + + { + /* data_vec = input[0]; */ + __m512i const data_vec = _mm512_loadu_si512 (input); + /* key_vec = secret[0]; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + /* data_key = data_vec ^ key_vec; */ + __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); + /* xacc[0] += swap(data_vec); */ + __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); + __m512i const sum = _mm512_add_epi64(*xacc, data_swap); + /* xacc[0] += product; */ + *xacc = _mm512_add_epi64(product, sum); + } +} +XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512) + +/* + * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. + * + * Multiplication isn't perfect, as explained by Google in HighwayHash: + * + * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to + * // varying degrees. In descending order of goodness, bytes + * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. + * // As expected, the upper and lower bytes are much worse. + * + * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 + * + * Since our algorithm uses a pseudorandom secret to add some variance into the + * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. + * + * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid + * extraction. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + { __m512i* const xacc = (__m512i*) acc; + const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); + + /* xacc[0] ^= (xacc[0] >> 47) */ + __m512i const acc_vec = *xacc; + __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); + /* xacc[0] ^= secret; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); + + /* xacc[0] *= XXH_PRIME32_1; */ + __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32); + __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); + __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); + *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); + XXH_ASSERT(((size_t)customSecret & 63) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); + __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); + __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); + + const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); + __m512i* const dest = ( __m512i*) customSecret; + int i; + XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 63) == 0); + for (i=0; i < nbRounds; ++i) { + dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_AVX2) \ + || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) + +#ifndef XXH_TARGET_AVX2 +# define XXH_TARGET_AVX2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xinput = (const __m256i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* data_vec = xinput[i]; */ + __m256i const data_vec = _mm256_loadu_si256 (xinput+i); + /* key_vec = xsecret[i]; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); + __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm256_add_epi64(product, sum); + } } +} +XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2) + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m256i const acc_vec = xacc[i]; + __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); + __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); + /* xacc[i] ^= xsecret; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32); + __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); + __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); + (void)(&XXH_writeLE64); + XXH_PREFETCH(customSecret); + { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64); + + const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret); + __m256i* dest = ( __m256i*) customSecret; + +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dest); +# endif + XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 31) == 0); + + /* GCC -O2 need unroll loop manually */ + dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed); + dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed); + dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed); + dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed); + dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed); + dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed); + } +} + +#endif + +/* x86dispatch always generates SSE2 */ +#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) + +#ifndef XXH_TARGET_SSE2 +# define XXH_TARGET_SSE2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* SSE2 is just a half-scale version of the AVX2 version. */ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xinput = (const __m128i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* data_vec = xinput[i]; */ + __m128i const data_vec = _mm_loadu_si128 (xinput+i); + /* key_vec = xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); + __m128i const sum = _mm_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm_add_epi64(product, sum); + } } +} +XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2) + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m128i const acc_vec = xacc[i]; + __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); + __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); + /* xacc[i] ^= xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); + __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); + +# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 + /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ + XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) }; + __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); +# else + __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); +# endif + int i; + + const void* const src16 = XXH3_kSecret; + __m128i* dst16 = (__m128i*) customSecret; +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dst16); +# endif + XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dst16 & 15) == 0); + + for (i=0; i < nbRounds; ++i) { + dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_NEON) + +/* forward declarations for the scalar routines */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, size_t lane); + +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, size_t lane); + +/*! + * @internal + * @brief The bulk processing loop for NEON and WASM SIMD128. + * + * The NEON code path is actually partially scalar when running on AArch64. This + * is to optimize the pipelining and can have up to 15% speedup depending on the + * CPU, and it also mitigates some GCC codegen issues. + * + * @see XXH3_NEON_LANES for configuring this and details about this optimization. + * + * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit + * integers instead of the other platforms which mask full 64-bit vectors, + * so the setup is more complicated than just shifting right. + * + * Additionally, there is an optimization for 4 lanes at once noted below. + * + * Since, as stated, the most optimal amount of lanes for Cortexes is 6, + * there needs to be *three* versions of the accumulate operation used + * for the remaining 2 lanes. + * + * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap + * nearly perfectly. + */ + +XXH_FORCE_INLINE void +XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0); + { /* GCC for darwin arm64 does not like aliasing here */ + xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc; + /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ + uint8_t const* xinput = (const uint8_t *) input; + uint8_t const* xsecret = (const uint8_t *) secret; + + size_t i; +#ifdef __wasm_simd128__ + /* + * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret + * is constant propagated, which results in it converting it to this + * inside the loop: + * + * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) + * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) + * ... + * + * This requires a full 32-bit address immediate (and therefore a 6 byte + * instruction) as well as an add for each offset. + * + * Putting an asm guard prevents it from folding (at the cost of losing + * the alignment hint), and uses the free offset in `v128.load` instead + * of adding secret_offset each time which overall reduces code size by + * about a kilobyte and improves performance. + */ + XXH_COMPILER_GUARD(xsecret); +#endif + /* Scalar lanes use the normal scalarRound routine */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } + i = 0; + /* 4 NEON lanes at a time. */ + for (; i+1 < XXH3_NEON_LANES / 2; i+=2) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); + uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16)); + /* data_swap = swap(data_vec) */ + uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); + uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); + uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); + + /* + * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a + * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to + * get one vector with the low 32 bits of each lane, and one vector + * with the high 32 bits of each lane. + * + * The intrinsic returns a double vector because the original ARMv7-a + * instruction modified both arguments in place. AArch64 and SIMD128 emit + * two instructions from this intrinsic. + * + * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] + * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] + */ + uint32x4x2_t unzipped = vuzpq_u32( + vreinterpretq_u32_u64(data_key_1), + vreinterpretq_u32_u64(data_key_2) + ); + /* data_key_lo = data_key & 0xFFFFFFFF */ + uint32x4_t data_key_lo = unzipped.val[0]; + /* data_key_hi = data_key >> 32 */ + uint32x4_t data_key_hi = unzipped.val[1]; + /* + * Then, we can split the vectors horizontally and multiply which, as for most + * widening intrinsics, have a variant that works on both high half vectors + * for free on AArch64. A similar instruction is available on SIMD128. + * + * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi + */ + uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); + uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); + /* + * Clang reorders + * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s + * c += a; // add acc.2d, acc.2d, swap.2d + * to + * c += a; // add acc.2d, acc.2d, swap.2d + * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s + * + * While it would make sense in theory since the addition is faster, + * for reasons likely related to umlal being limited to certain NEON + * pipelines, this is worse. A compiler guard fixes this. + */ + XXH_COMPILER_GUARD_CLANG_NEON(sum_1); + XXH_COMPILER_GUARD_CLANG_NEON(sum_2); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64(xacc[i], sum_1); + xacc[i+1] = vaddq_u64(xacc[i+1], sum_2); + } + /* Operate on the remaining NEON lanes 2 at a time. */ + for (; i < XXH3_NEON_LANES / 2; i++) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + /* acc_vec_2 = swap(data_vec) */ + uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* For two lanes, just use VMOVN and VSHRN. */ + /* data_key_lo = data_key & 0xFFFFFFFF; */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* data_key_hi = data_key >> 32; */ + uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); + /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ + uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); + /* Same Clang workaround as before */ + XXH_COMPILER_GUARD_CLANG_NEON(sum); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64 (xacc[i], sum); + } + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon) + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc; + uint8_t const* xsecret = (uint8_t const*) secret; + + size_t i; + /* WASM uses operator overloads and doesn't need these. */ +#ifndef __wasm_simd128__ + /* { prime32_1, prime32_1 } */ + uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); + /* { 0, prime32_1, 0, prime32_1 } */ + uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); +#endif + + /* AArch64 uses both scalar and neon at the same time */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } + for (i=0; i < XXH3_NEON_LANES / 2; i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + uint64x2_t acc_vec = xacc[i]; + uint64x2_t shifted = vshrq_n_u64(acc_vec, 47); + uint64x2_t data_vec = veorq_u64(acc_vec, shifted); + + /* xacc[i] ^= xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* xacc[i] *= XXH_PRIME32_1 */ +#ifdef __wasm_simd128__ + /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */ + xacc[i] = data_key * XXH_PRIME32_1; +#else + /* + * Expanded version with portable NEON intrinsics + * + * lo(x) * lo(y) + (hi(x) * lo(y) << 32) + * + * prod_hi = hi(data_key) * lo(prime) << 32 + * + * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector + * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits + * and avoid the shift. + */ + uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi); + /* Extract low bits for vmlal_u32 */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ + xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); +#endif + } + } +} +#endif + +#if (XXH_VECTOR == XXH_VSX) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* presumed aligned */ + xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */ + xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */ + xxh_u64x2 const v32 = { 32, 32 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* data_vec = xinput[i]; */ + xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i); + /* key_vec = xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + /* shuffled = (data_key << 32) | (data_key >> 32); */ + xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); + /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ + xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); + /* acc_vec = xacc[i]; */ + xxh_u64x2 acc_vec = xacc[i]; + acc_vec += product; + + /* swap high and low halves */ +#ifdef __s390x__ + acc_vec += vec_permi(data_vec, data_vec, 2); +#else + acc_vec += vec_xxpermdi(data_vec, data_vec, 2); +#endif + xacc[i] = acc_vec; + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + const xxh_u8* const xsecret = (const xxh_u8*) secret; + /* constants */ + xxh_u64x2 const v32 = { 32, 32 }; + xxh_u64x2 const v47 = { 47, 47 }; + xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + xxh_u64x2 const acc_vec = xacc[i]; + xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); + + /* xacc[i] ^= xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + + /* xacc[i] *= XXH_PRIME32_1 */ + /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ + xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); + /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ + xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); + xacc[i] = prod_odd + (prod_even << v32); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_SVE) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_sve( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc); + ACCRND(vacc, 0); + svst1_u64(mask, xacc, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } +} + +XXH_FORCE_INLINE void +XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, + size_t nbStripes) +{ + if (nbStripes != 0) { + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc + 0); + do { + /* svprfd(svbool_t, void *, enum svfprop); */ + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(vacc, 0); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } + } +} + +#endif + +/* scalar variants - universal */ + +#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) +/* + * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they + * emit an excess mask and a full 64-bit multiply-add (MADD X-form). + * + * While this might not seem like much, as AArch64 is a 64-bit architecture, only + * big Cortex designs have a full 64-bit multiplier. + * + * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit + * multiplies expand to 2-3 multiplies in microcode. This has a major penalty + * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline. + * + * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does + * not have this penalty and does the mask automatically. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + xxh_u64 ret; + /* note: %x = 64-bit register, %w = 32-bit register */ + __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc)); + return ret; +} +#else +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; +} +#endif + +/*! + * @internal + * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* xacc = (xxh_u64*) acc; + xxh_u8 const* xinput = (xxh_u8 const*) input; + xxh_u8 const* xsecret = (xxh_u8 const*) secret; + XXH_ASSERT(lane < XXH_ACC_NB); + XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); + { + xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); + xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); + xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ + xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]); + } +} + +/*! + * @internal + * @brief Processes a 64 byte block of data using the scalar path. + */ +XXH_FORCE_INLINE void +XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + size_t i; + /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */ +#if defined(__GNUC__) && !defined(__clang__) \ + && (defined(__arm__) || defined(__thumb2__)) \ + && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \ + && XXH_SIZE_OPT <= 0 +# pragma GCC unroll 8 +#endif + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar) + +/*! + * @internal + * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ + XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); + XXH_ASSERT(lane < XXH_ACC_NB); + { + xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); + xxh_u64 acc64 = xacc[lane]; + acc64 = XXH_xorshift64(acc64, 47); + acc64 ^= key64; + acc64 *= XXH_PRIME32_1; + xacc[lane] = acc64; + } +} + +/*! + * @internal + * @brief Scrambles the accumulators after a large chunk has been read + */ +XXH_FORCE_INLINE void +XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + size_t i; + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } +} + +XXH_FORCE_INLINE void +XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + /* + * We need a separate pointer for the hack below, + * which requires a non-const pointer. + * Any decent compiler will optimize this out otherwise. + */ + const xxh_u8* kSecretPtr = XXH3_kSecret; + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + +#if defined(__GNUC__) && defined(__aarch64__) + /* + * UGLY HACK: + * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are + * placed sequentially, in order, at the top of the unrolled loop. + * + * While MOVK is great for generating constants (2 cycles for a 64-bit + * constant compared to 4 cycles for LDR), it fights for bandwidth with + * the arithmetic instructions. + * + * I L S + * MOVK + * MOVK + * MOVK + * MOVK + * ADD + * SUB STR + * STR + * By forcing loads from memory (as the asm line causes the compiler to assume + * that XXH3_kSecretPtr has been changed), the pipelines are used more + * efficiently: + * I L S + * LDR + * ADD LDR + * SUB STR + * STR + * + * See XXH3_NEON_LANES for details on the pipsline. + * + * XXH3_64bits_withSeed, len == 256, Snapdragon 835 + * without hack: 2654.4 MB/s + * with hack: 3202.9 MB/s + */ + XXH_COMPILER_GUARD(kSecretPtr); +#endif + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; + int i; + for (i=0; i < nbRounds; i++) { + /* + * The asm hack causes the compiler to assume that kSecretPtr aliases with + * customSecret, and on aarch64, this prevented LDP from merging two + * loads together for free. Putting the loads together before the stores + * properly generates LDP. + */ + xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; + xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; + XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); + XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); + } } +} + + +typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t); +typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); +typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); + + +#if (XXH_VECTOR == XXH_AVX512) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx512 +#define XXH3_accumulate XXH3_accumulate_avx512 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 + +#elif (XXH_VECTOR == XXH_AVX2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx2 +#define XXH3_accumulate XXH3_accumulate_avx2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 + +#elif (XXH_VECTOR == XXH_SSE2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_sse2 +#define XXH3_accumulate XXH3_accumulate_sse2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 + +#elif (XXH_VECTOR == XXH_NEON) + +#define XXH3_accumulate_512 XXH3_accumulate_512_neon +#define XXH3_accumulate XXH3_accumulate_neon +#define XXH3_scrambleAcc XXH3_scrambleAcc_neon +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_VSX) + +#define XXH3_accumulate_512 XXH3_accumulate_512_vsx +#define XXH3_accumulate XXH3_accumulate_vsx +#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_SVE) +#define XXH3_accumulate_512 XXH3_accumulate_512_sve +#define XXH3_accumulate XXH3_accumulate_sve +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#else /* scalar */ + +#define XXH3_accumulate_512 XXH3_accumulate_512_scalar +#define XXH3_accumulate XXH3_accumulate_scalar +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#endif + +#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ +# undef XXH3_initCustomSecret +# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar +#endif + +XXH_FORCE_INLINE void +XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; + size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; + size_t const nb_blocks = (len - 1) / block_len; + + size_t n; + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + + for (n = 0; n < nb_blocks; n++) { + f_acc(acc, input + n*block_len, secret, nbStripesPerBlock); + f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); + } + + /* last partial block */ + XXH_ASSERT(len > XXH_STRIPE_LEN); + { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; + XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); + f_acc(acc, input + nb_blocks*block_len, secret, nbStripes); + + /* last stripe */ + { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; +#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ + XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); + } } +} + +XXH_FORCE_INLINE xxh_u64 +XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) +{ + return XXH3_mul128_fold64( + acc[0] ^ XXH_readLE64(secret), + acc[1] ^ XXH_readLE64(secret+8) ); +} + +static XXH64_hash_t +XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) +{ + xxh_u64 result64 = start; + size_t i = 0; + + for (i = 0; i < 4; i++) { + result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); +#if defined(__clang__) /* Clang */ \ + && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Prevent autovectorization on Clang ARMv7-a. Exact same problem as + * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. + * XXH3_64bits, len == 256, Snapdragon 835: + * without hack: 2063.7 MB/s + * with hack: 2560.7 MB/s + */ + XXH_COMPILER_GUARD(result64); +#endif + } + + return XXH3_avalanche(result64); +} + +#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ + XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, + const void* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + /* do not align on 8, so that the secret is different from the accumulator */ +#define XXH_SECRET_MERGEACCS_START 11 + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1); +} + +/* + * It's important for performance to transmit secret's size (when it's static) + * so that the compiler can properly optimize the vectorized loop. + * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. + */ +XXH3_WITH_SECRET_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * It's preferable for performance that XXH3_hashLong is not inlined, + * as it results in a smaller function for small data, easier to the instruction cache. + * Note that inside this no_inline function, we do inline the internal loop, + * and provide a statically defined secret size to allow optimization of vector loop. + */ +XXH_NO_INLINE XXH_PUREF XXH64_hash_t +XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * XXH3_hashLong_64b_withSeed(): + * Generate a custom key based on alteration of default XXH3_kSecret with the seed, + * and then use this key for long mode hashing. + * + * This operation is decently fast but nonetheless costs a little bit of time. + * Try to avoid it whenever possible (typically when seed==0). + * + * It's important for performance that XXH3_hashLong is not inlined. Not sure + * why (uop cache maybe?), but the difference is large and easily measurable. + */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, + XXH64_hash_t seed, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ +#if XXH_SIZE_OPT <= 0 + if (seed == 0) + return XXH3_hashLong_64b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc, f_scramble); +#endif + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed); + return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), + f_acc, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_64b_withSeed_internal(input, len, seed, + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + + +typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong64_f f_hashLong) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secretLen` condition is not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + * Also, note that function signature doesn't offer room to return an error. + */ + if (len <= 16) + return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); +} + + +/* === Public entry point === */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length) +{ + return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed) +{ + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); +} + +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (length <= XXH3_MIDSIZE_MAX) + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize); +} + + +/* === XXH3 streaming === */ +#ifndef XXH_NO_STREAM +/* + * Malloc's a pointer that is always aligned to align. + * + * This must be freed with `XXH_alignedFree()`. + * + * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte + * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2 + * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON. + * + * This underalignment previously caused a rather obvious crash which went + * completely unnoticed due to XXH3_createState() not actually being tested. + * Credit to RedSpah for noticing this bug. + * + * The alignment is done manually: Functions like posix_memalign or _mm_malloc + * are avoided: To maintain portability, we would have to write a fallback + * like this anyways, and besides, testing for the existence of library + * functions without relying on external build tools is impossible. + * + * The method is simple: Overallocate, manually align, and store the offset + * to the original behind the returned pointer. + * + * Align must be a power of 2 and 8 <= align <= 128. + */ +static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align) +{ + XXH_ASSERT(align <= 128 && align >= 8); /* range check */ + XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ + XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ + { /* Overallocate to make room for manual realignment and an offset byte */ + xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); + if (base != NULL) { + /* + * Get the offset needed to align this pointer. + * + * Even if the returned pointer is aligned, there will always be + * at least one byte to store the offset to the original pointer. + */ + size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ + /* Add the offset for the now-aligned pointer */ + xxh_u8* ptr = base + offset; + + XXH_ASSERT((size_t)ptr % align == 0); + + /* Store the offset immediately before the returned pointer. */ + ptr[-1] = (xxh_u8)offset; + return ptr; + } + return NULL; + } +} +/* + * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass + * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. + */ +static void XXH_alignedFree(void* p) +{ + if (p != NULL) { + xxh_u8* ptr = (xxh_u8*)p; + /* Get the offset byte we added in XXH_malloc. */ + xxh_u8 offset = ptr[-1]; + /* Free the original malloc'd pointer */ + xxh_u8* base = ptr - offset; + XXH_free(base); + } +} +/*! @ingroup XXH3_family */ +/*! + * @brief Allocate an @ref XXH3_state_t. + * + * @return An allocated pointer of @ref XXH3_state_t on success. + * @return `NULL` on failure. + * + * @note Must be freed with XXH3_freeState(). + */ +XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) +{ + XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); + if (state==NULL) return NULL; + XXH3_INITSTATE(state); + return state; +} + +/*! @ingroup XXH3_family */ +/*! + * @brief Frees an @ref XXH3_state_t. + * + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * + * @return @ref XXH_OK. + * + * @note Must be allocated with XXH3_createState(). + */ +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) +{ + XXH_alignedFree(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void +XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state) +{ + XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); +} + +static void +XXH3_reset_internal(XXH3_state_t* statePtr, + XXH64_hash_t seed, + const void* secret, size_t secretSize) +{ + size_t const initStart = offsetof(XXH3_state_t, bufferedSize); + size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; + XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); + XXH_ASSERT(statePtr != NULL); + /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ + memset((char*)statePtr + initStart, 0, initLength); + statePtr->acc[0] = XXH_PRIME32_3; + statePtr->acc[1] = XXH_PRIME64_1; + statePtr->acc[2] = XXH_PRIME64_2; + statePtr->acc[3] = XXH_PRIME64_3; + statePtr->acc[4] = XXH_PRIME64_4; + statePtr->acc[5] = XXH_PRIME32_2; + statePtr->acc[6] = XXH_PRIME64_5; + statePtr->acc[7] = XXH_PRIME32_1; + statePtr->seed = seed; + statePtr->useSeed = (seed != 0); + statePtr->extSecret = (const unsigned char*)secret; + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; + statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + if (statePtr == NULL) return XXH_ERROR; + if (seed==0) return XXH3_64bits_reset(statePtr); + if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) + XXH3_initCustomSecret(statePtr->customSecret, seed); + XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64) +{ + if (statePtr == NULL) return XXH_ERROR; + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + XXH3_reset_internal(statePtr, seed64, secret, secretSize); + statePtr->useSeed = 1; /* always, even if seed64==0 */ + return XXH_OK; +} + +/*! + * @internal + * @brief Processes a large input for XXH3_update() and XXH3_digest_long(). + * + * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block. + * + * @param acc Pointer to the 8 accumulator lanes + * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block* + * @param nbStripesPerBlock Number of stripes in a block + * @param input Input pointer + * @param nbStripes Number of stripes to process + * @param secret Secret pointer + * @param secretLimit Offset of the last block in @p secret + * @param f_acc Pointer to an XXH3_accumulate implementation + * @param f_scramble Pointer to an XXH3_scrambleAcc implementation + * @return Pointer past the end of @p input after processing + */ +XXH_FORCE_INLINE const xxh_u8 * +XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, + size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, + const xxh_u8* XXH_RESTRICT input, size_t nbStripes, + const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; + /* Process full blocks */ + if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { + /* Process the initial partial block... */ + size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; + + do { + /* Accumulate and scramble */ + f_acc(acc, input, initialSecret, nbStripesThisIter); + f_scramble(acc, secret + secretLimit); + input += nbStripesThisIter * XXH_STRIPE_LEN; + nbStripes -= nbStripesThisIter; + /* Then continue the loop with the full block size */ + nbStripesThisIter = nbStripesPerBlock; + initialSecret = secret; + } while (nbStripes >= nbStripesPerBlock); + *nbStripesSoFarPtr = 0; + } + /* Process a partial block */ + if (nbStripes > 0) { + f_acc(acc, input, initialSecret, nbStripes); + input += nbStripes * XXH_STRIPE_LEN; + *nbStripesSoFarPtr += nbStripes; + } + /* Return end pointer */ + return input; +} + +#ifndef XXH3_STREAM_USE_STACK +# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */ +# define XXH3_STREAM_USE_STACK 1 +# endif +#endif +/* + * Both XXH3_64bits_update and XXH3_128bits_update use this routine. + */ +XXH_FORCE_INLINE XXH_errorcode +XXH3_update(XXH3_state_t* XXH_RESTRICT const state, + const xxh_u8* XXH_RESTRICT input, size_t len, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + XXH_ASSERT(state != NULL); + { const xxh_u8* const bEnd = input + len; + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* For some reason, gcc and MSVC seem to suffer greatly + * when operating accumulators directly into state. + * Operating into stack space seems to enable proper optimization. + * clang, on the other hand, doesn't seem to need this trick */ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; + XXH_memcpy(acc, state->acc, sizeof(acc)); +#else + xxh_u64* XXH_RESTRICT const acc = state->acc; +#endif + state->totalLen += len; + XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); + + /* small input : just fill in tmp buffer */ + if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { + XXH_memcpy(state->buffer + state->bufferedSize, input, len); + state->bufferedSize += (XXH32_hash_t)len; + return XXH_OK; + } + + /* total input is now > XXH3_INTERNALBUFFER_SIZE */ + #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) + XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ + + /* + * Internal buffer is partially filled (always, except at beginning) + * Complete it, then consume it. + */ + if (state->bufferedSize) { + size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; + XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); + input += loadSize; + XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, XXH3_INTERNALBUFFER_STRIPES, + secret, state->secretLimit, + f_acc, f_scramble); + state->bufferedSize = 0; + } + XXH_ASSERT(input < bEnd); + if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { + size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; + input = XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + input, nbStripes, + secret, state->secretLimit, + f_acc, f_scramble); + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + + } + /* Some remaining input (always) : buffer it */ + XXH_ASSERT(input < bEnd); + XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); + XXH_ASSERT(state->bufferedSize == 0); + XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); + state->bufferedSize = (XXH32_hash_t)(bEnd-input); +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* save stack accumulators into state */ + XXH_memcpy(state->acc, acc, sizeof(acc)); +#endif + } + + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_update(state, (const xxh_u8*)input, len, + XXH3_accumulate, XXH3_scrambleAcc); +} + + +XXH_FORCE_INLINE void +XXH3_digest_long (XXH64_hash_t* acc, + const XXH3_state_t* state, + const unsigned char* secret) +{ + xxh_u8 lastStripe[XXH_STRIPE_LEN]; + const xxh_u8* lastStripePtr; + + /* + * Digest on a local copy. This way, the state remains unaltered, and it can + * continue ingesting more input afterwards. + */ + XXH_memcpy(acc, state->acc, sizeof(state->acc)); + if (state->bufferedSize >= XXH_STRIPE_LEN) { + /* Consume remaining stripes then point to remaining data in buffer */ + size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; + size_t nbStripesSoFar = state->nbStripesSoFar; + XXH3_consumeStripes(acc, + &nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, nbStripes, + secret, state->secretLimit, + XXH3_accumulate, XXH3_scrambleAcc); + lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; + } else { /* bufferedSize < XXH_STRIPE_LEN */ + /* Copy to temp buffer */ + size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; + XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ + XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); + XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + lastStripePtr = lastStripe; + } + /* Last stripe */ + XXH3_accumulate_512(acc, + lastStripePtr, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + return XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + } + /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ + if (state->useSeed) + return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} +#endif /* !XXH_NO_STREAM */ + + +/* ========================================== + * XXH3 128 bits (a.k.a XXH128) + * ========================================== + * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, + * even without counting the significantly larger output size. + * + * For example, extra steps are taken to avoid the seed-dependent collisions + * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). + * + * This strength naturally comes at the cost of some speed, especially on short + * lengths. Note that longer hashes are about as fast as the 64-bit version + * due to it using only a slight modification of the 64-bit loop. + * + * XXH128 is also more oriented towards 64-bit machines. It is still extremely + * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). + */ + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + /* A doubled version of 1to3_64b with different constants. */ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } + * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } + * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); + xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; + xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; + xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; + XXH128_hash_t h128; + h128.low64 = XXH64_avalanche(keyed_lo); + h128.high64 = XXH64_avalanche(keyed_hi); + return h128; + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input_lo = XXH_readLE32(input); + xxh_u32 const input_hi = XXH_readLE32(input + len - 4); + xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); + xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; + xxh_u64 const keyed = input_64 ^ bitflip; + + /* Shift len to the left to ensure it is even, this avoids even multiplies. */ + XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); + + m128.high64 += (m128.low64 << 1); + m128.low64 ^= (m128.high64 >> 3); + + m128.low64 = XXH_xorshift64(m128.low64, 35); + m128.low64 *= PRIME_MX2; + m128.low64 = XXH_xorshift64(m128.low64, 28); + m128.high64 = XXH3_avalanche(m128.high64); + return m128; + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; + xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; + xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 input_hi = XXH_readLE64(input + len - 8); + XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); + /* + * Put len in the middle of m128 to ensure that the length gets mixed to + * both the low and high bits in the 128x64 multiply below. + */ + m128.low64 += (xxh_u64)(len - 1) << 54; + input_hi ^= bitfliph; + /* + * Add the high 32 bits of input_hi to the high 32 bits of m128, then + * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to + * the high 64 bits of m128. + * + * The best approach to this operation is different on 32-bit and 64-bit. + */ + if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ + /* + * 32-bit optimized version, which is more readable. + * + * On 32-bit, it removes an ADC and delays a dependency between the two + * halves of m128.high64, but it generates an extra mask on 64-bit. + */ + m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); + } else { + /* + * 64-bit optimized (albeit more confusing) version. + * + * Uses some properties of addition and multiplication to remove the mask: + * + * Let: + * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) + * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) + * c = XXH_PRIME32_2 + * + * a + (b * c) + * Inverse Property: x + y - x == y + * a + (b * (1 + c - 1)) + * Distributive Property: x * (y + z) == (x * y) + (x * z) + * a + (b * 1) + (b * (c - 1)) + * Identity Property: x * 1 == x + * a + b + (b * (c - 1)) + * + * Substitute a, b, and c: + * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + * + * Since input_hi.hi + input_hi.lo == input_hi, we get this: + * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + */ + m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); + } + /* m128 ^= XXH_swap64(m128 >> 64); */ + m128.low64 ^= XXH_swap64(m128.high64); + + { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ + XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); + h128.high64 += m128.high64 * XXH_PRIME64_2; + + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = XXH3_avalanche(h128.high64); + return h128; + } } +} + +/* + * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN + */ +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); + if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); + if (len) return XXH3_len_1to3_128b(input, len, secret, seed); + { XXH128_hash_t h128; + xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); + xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); + h128.low64 = XXH64_avalanche(seed ^ bitflipl); + h128.high64 = XXH64_avalanche( seed ^ bitfliph); + return h128; + } } +} + +/* + * A bit slower than XXH3_mix16B, but handles multiply by zero better. + */ +XXH_FORCE_INLINE XXH128_hash_t +XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, + const xxh_u8* secret, XXH64_hash_t seed) +{ + acc.low64 += XXH3_mix16B (input_1, secret+0, seed); + acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); + acc.high64 += XXH3_mix16B (input_2, secret+16, seed); + acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); + return acc; +} + + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { XXH128_hash_t acc; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + +#if XXH_SIZE_OPT >= 1 + { + /* Smaller, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed); + } while (i-- != 0); + } +#else + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); + } + acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); + } + acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); + } + acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); +#endif + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_NO_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + { XXH128_hash_t acc; + unsigned i; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + /* + * We set as `i` as offset + 32. We do this so that unchanged + * `len` can be used as upper bound. This reaches a sweet spot + * where both x86 and aarch64 get simple agen and good codegen + * for the loop. + */ + for (i = 32; i < 160; i += 32) { + acc = XXH128_mix32B(acc, + input + i - 32, + input + i - 16, + secret + i - 32, + seed); + } + acc.low64 = XXH3_avalanche(acc.low64); + acc.high64 = XXH3_avalanche(acc.high64); + /* + * NB: `i <= len` will duplicate the last 32-bytes if + * len % 32 was zero. This is an unfortunate necessity to keep + * the hash result stable. + */ + for (i=160; i <= len; i += 32) { + acc = XXH128_mix32B(acc, + input + i - 32, + input + i - 16, + secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, + seed); + } + /* last bytes */ + acc = XXH128_mix32B(acc, + input + len - 16, + input + len - 32, + secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, + (XXH64_hash_t)0 - seed); + + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)len * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + secretSize + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)len * XXH_PRIME64_2)); + return h128; + } +} + +/* + * It's important for performance that XXH3_hashLong() is not inlined. + */ +XXH_NO_INLINE XXH_PUREF XXH128_hash_t +XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * It's important for performance to pass @p secretLen (when it's static) + * to the compiler, so that it can properly optimize the vectorized loop. + * + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. + */ +XXH3_WITH_SECRET_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, + XXH3_accumulate, XXH3_scrambleAcc); +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ + if (seed64 == 0) + return XXH3_hashLong_128b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc, f_scramble); + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed64); + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), + f_acc, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + +typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const void* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_128bits_internal(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong128_f f_hl128) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secret` conditions are not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + */ + if (len <= 16) + return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hl128(input, len, seed64, secret, secretLen); +} + + +/* === Public XXH128 API === */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_128bits_internal(input, len, 0, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_default); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_128bits_internal(input, len, 0, + (const xxh_u8*)secret, secretSize, + XXH3_hashLong_128b_withSecret); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_internal(input, len, seed, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_withSeed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_withSeed(input, len, seed); +} + + +/* === XXH3 128-bit streaming === */ +#ifndef XXH_NO_STREAM +/* + * All initialization and update functions are identical to 64-bit streaming variant. + * The only difference is the finalization routine. + */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) +{ + return XXH3_64bits_reset(statePtr); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSeed(statePtr, seed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_64bits_update(state, input, len); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + state->secretLimit + XXH_STRIPE_LEN + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); + return h128; + } + } + /* len <= XXH3_MIDSIZE_MAX : short code */ + if (state->seed) + return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} +#endif /* !XXH_NO_STREAM */ +/* 128-bit utility functions */ + +/* return : 1 is equal, 0 if different */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) +{ + /* note : XXH128_hash_t is compact, it has no padding byte */ + return !(memcmp(&h1, &h2, sizeof(h1))); +} + +/* This prototype is compatible with stdlib's qsort(). + * @return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2) +{ + XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; + XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; + int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); + /* note : bets that, in most cases, hash values are different */ + if (hcmp) return hcmp; + return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); +} + + +/*====== Canonical representation ======*/ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void +XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) { + hash.high64 = XXH_swap64(hash.high64); + hash.low64 = XXH_swap64(hash.low64); + } + XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); + XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src) +{ + XXH128_hash_t h; + h.high64 = XXH_readBE64(src); + h.low64 = XXH_readBE64(src->digest + 8); + return h; +} + + + +/* ========================================== + * Secret generators + * ========================================== + */ +#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) + +XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128) +{ + XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 ); + XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize) +{ +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(secretBuffer != NULL); + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); +#else + /* production mode, assert() are disabled */ + if (secretBuffer == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; +#endif + + if (customSeedSize == 0) { + customSeed = XXH3_kSecret; + customSeedSize = XXH_SECRET_DEFAULT_SIZE; + } +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(customSeed != NULL); +#else + if (customSeed == NULL) return XXH_ERROR; +#endif + + /* Fill secretBuffer with a copy of customSeed - repeat as needed */ + { size_t pos = 0; + while (pos < secretSize) { + size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); + memcpy((char*)secretBuffer + pos, customSeed, toCopy); + pos += toCopy; + } } + + { size_t const nbSeg16 = secretSize / 16; + size_t n; + XXH128_canonical_t scrambler; + XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); + for (n=0; n /* malloc, calloc, free */ -#include /* memset */ +#define ZSTD_DEPS_NEED_MALLOC #include "error_private.h" #include "zstd_internal.h" @@ -48,36 +47,11 @@ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } * provides error code string from enum */ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } - - -/*=************************************************************** -* Custom allocator -****************************************************************/ -void* ZSTD_malloc(size_t size, ZSTD_customMem customMem) -{ - if (customMem.customAlloc) - return customMem.customAlloc(customMem.opaque, size); - return malloc(size); -} - -void* ZSTD_calloc(size_t size, ZSTD_customMem customMem) -{ - if (customMem.customAlloc) { - /* calloc implemented as malloc+memset; - * not as efficient as calloc, but next best guess for custom malloc */ - void* const ptr = customMem.customAlloc(customMem.opaque, size); - memset(ptr, 0, size); - return ptr; - } - return calloc(1, size); -} - -void ZSTD_free(void* ptr, ZSTD_customMem customMem) +int ZSTD_isDeterministicBuild(void) { - if (ptr!=NULL) { - if (customMem.customFree) - customMem.customFree(customMem.opaque, ptr); - else - free(ptr); - } +#if ZSTD_IS_DETERMINISTIC_BUILD + return 1; +#else + return 0; +#endif } diff --git a/lib/common/zstd_deps.h b/lib/common/zstd_deps.h new file mode 100644 index 00000000000..8a9c7cc5313 --- /dev/null +++ b/lib/common/zstd_deps.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* This file provides common libc dependencies that zstd requires. + * The purpose is to allow replacing this file with a custom implementation + * to compile zstd without libc support. + */ + +/* Need: + * NULL + * INT_MAX + * UINT_MAX + * ZSTD_memcpy() + * ZSTD_memset() + * ZSTD_memmove() + */ +#ifndef ZSTD_DEPS_COMMON +#define ZSTD_DEPS_COMMON + +/* Even though we use qsort_r only for the dictionary builder, the macro + * _GNU_SOURCE has to be declared *before* the inclusion of any standard + * header and the script 'combine.sh' combines the whole zstd source code + * in a single file. + */ +#if defined(__linux) || defined(__linux__) || defined(linux) || defined(__gnu_linux__) || \ + defined(__CYGWIN__) || defined(__MSYS__) +#if !defined(_GNU_SOURCE) && !defined(__ANDROID__) /* NDK doesn't ship qsort_r(). */ +#define _GNU_SOURCE +#endif +#endif + +#include +#include +#include + +#if defined(__GNUC__) && __GNUC__ >= 4 +# define ZSTD_memcpy(d,s,l) __builtin_memcpy((d),(s),(l)) +# define ZSTD_memmove(d,s,l) __builtin_memmove((d),(s),(l)) +# define ZSTD_memset(p,v,l) __builtin_memset((p),(v),(l)) +#else +# define ZSTD_memcpy(d,s,l) memcpy((d),(s),(l)) +# define ZSTD_memmove(d,s,l) memmove((d),(s),(l)) +# define ZSTD_memset(p,v,l) memset((p),(v),(l)) +#endif + +#endif /* ZSTD_DEPS_COMMON */ + +/* Need: + * ZSTD_malloc() + * ZSTD_free() + * ZSTD_calloc() + */ +#ifdef ZSTD_DEPS_NEED_MALLOC +#ifndef ZSTD_DEPS_MALLOC +#define ZSTD_DEPS_MALLOC + +#include + +#define ZSTD_malloc(s) malloc(s) +#define ZSTD_calloc(n,s) calloc((n), (s)) +#define ZSTD_free(p) free((p)) + +#endif /* ZSTD_DEPS_MALLOC */ +#endif /* ZSTD_DEPS_NEED_MALLOC */ + +/* + * Provides 64-bit math support. + * Need: + * U64 ZSTD_div64(U64 dividend, U32 divisor) + */ +#ifdef ZSTD_DEPS_NEED_MATH64 +#ifndef ZSTD_DEPS_MATH64 +#define ZSTD_DEPS_MATH64 + +#define ZSTD_div64(dividend, divisor) ((dividend) / (divisor)) + +#endif /* ZSTD_DEPS_MATH64 */ +#endif /* ZSTD_DEPS_NEED_MATH64 */ + +/* Need: + * assert() + */ +#ifdef ZSTD_DEPS_NEED_ASSERT +#ifndef ZSTD_DEPS_ASSERT +#define ZSTD_DEPS_ASSERT + +#include + +#endif /* ZSTD_DEPS_ASSERT */ +#endif /* ZSTD_DEPS_NEED_ASSERT */ + +/* Need: + * ZSTD_DEBUG_PRINT() + */ +#ifdef ZSTD_DEPS_NEED_IO +#ifndef ZSTD_DEPS_IO +#define ZSTD_DEPS_IO + +#include +#define ZSTD_DEBUG_PRINT(...) fprintf(stderr, __VA_ARGS__) + +#endif /* ZSTD_DEPS_IO */ +#endif /* ZSTD_DEPS_NEED_IO */ + +/* Only requested when is known to be present. + * Need: + * intptr_t + */ +#ifdef ZSTD_DEPS_NEED_STDINT +#ifndef ZSTD_DEPS_STDINT +#define ZSTD_DEPS_STDINT + +#include + +#endif /* ZSTD_DEPS_STDINT */ +#endif /* ZSTD_DEPS_NEED_STDINT */ diff --git a/lib/common/zstd_internal.h b/lib/common/zstd_internal.h index 31f756ab581..86a0fc5c809 100644 --- a/lib/common/zstd_internal.h +++ b/lib/common/zstd_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -20,23 +20,23 @@ * Dependencies ***************************************/ #include "compiler.h" +#include "cpu.h" #include "mem.h" #include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */ #include "error_private.h" #define ZSTD_STATIC_LINKING_ONLY -#include "zstd.h" +#include "../zstd.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" -#define HUF_STATIC_LINKING_ONLY #include "huf.h" #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ #endif #include "xxhash.h" /* XXH_reset, update, digest */ - - -#if defined (__cplusplus) -extern "C" { +#ifndef ZSTD_NO_TRACE +# include "zstd_trace.h" +#else +# define ZSTD_TRACE 0 #endif /* ---- static assert (debug) --- */ @@ -53,50 +53,7 @@ extern "C" { #undef MAX #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) - -/** - * Return the specified error if the condition evaluates to true. - * - * In debug modes, prints additional information. In order to do that - * (particularly, printing the conditional that failed), this can't just wrap - * RETURN_ERROR(). - */ -#define RETURN_ERROR_IF(cond, err, ...) \ - if (cond) { \ - RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return ERROR(err); \ - } - -/** - * Unconditionally return the specified error. - * - * In debug modes, prints additional information. - */ -#define RETURN_ERROR(err, ...) \ - do { \ - RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return ERROR(err); \ - } while(0); - -/** - * If the provided expression evaluates to an error code, returns that error code. - * - * In debug modes, prints additional information. - */ -#define FORWARD_IF_ERROR(err, ...) \ - do { \ - size_t const err_code = (err); \ - if (ERR_isError(err_code)) { \ - RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return err_code; \ - } \ - } while(0); +#define BOUNDED(min,val,max) (MAX(min,MIN(val,max))) /*-************************************* @@ -105,8 +62,7 @@ extern "C" { #define ZSTD_OPT_NUM (1<<12) #define ZSTD_REP_NUM 3 /* number of repcodes */ -#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) -static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; +static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; #define KB *(1 <<10) #define MB *(1 <<20) @@ -120,26 +76,29 @@ static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; #define BIT0 1 #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 -static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; -static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; +static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; +static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; #define ZSTD_FRAMEIDSIZE 4 /* magic number size */ #define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ -static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; +static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; +#define ZSTD_FRAMECHECKSUMSIZE 4 + #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ -#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */ +#define MIN_LITERALS_FOR_4_STREAMS 6 -#define HufLog 12 -typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; +typedef enum { set_basic, set_rle, set_compressed, set_repeat } SymbolEncodingType_e; #define LONGNBSEQ 0x7F00 #define MINMATCH 3 #define Litbits 8 +#define LitHufLog 11 #define MaxLit ((1<= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); + /* Separate out the first COPY16() call because the copy length is + * almost certain to be short, so the branches have different + * probabilities. Since it is almost certain to be short, only do + * one COPY16() in the first call. Then, do two calls per loop since + * at that point it is more likely to have a high trip count. + */ + ZSTD_copy16(op, ip); + if (16 >= length) return; + op += 16; + ip += 16; + do { + COPY16(op, ip); + COPY16(op, ip); + } + while (op < oend); + } } -MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */ +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { - const BYTE* ip = (const BYTE*)src; - BYTE* op = (BYTE*)dst; - BYTE* const oend = (BYTE*)dstEnd; - do - COPY8(op, ip) - while (op < oend); + size_t const length = MIN(dstCapacity, srcSize); + if (length > 0) { + ZSTD_memcpy(dst, src, length); + } + return length; } +/* define "workspace is too large" as this number of times larger than needed */ +#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 + +/* when workspace is continuously too large + * during at least this number of times, + * context's memory usage is considered wasteful, + * because it's sized to handle a worst case scenario which rarely happens. + * In which case, resize it down to free some memory */ +#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 + +/* Controls whether the input/output buffer is buffered or stable. */ +typedef enum { + ZSTD_bm_buffered = 0, /* Buffer the input/output */ + ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */ +} ZSTD_bufferMode_e; + /*-******************************************* * Private declarations *********************************************/ -typedef struct seqDef_s { - U32 offset; - U16 litLength; - U16 matchLength; -} seqDef; - -typedef struct { - seqDef* sequencesStart; - seqDef* sequences; - BYTE* litStart; - BYTE* lit; - BYTE* llCode; - BYTE* mlCode; - BYTE* ofCode; - size_t maxNbSeq; - size_t maxNbLit; - U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */ - U32 longLengthPos; -} seqStore_t; /** * Contains the compressed frame size and an upper-bound for the decompressed frame size. @@ -249,43 +284,11 @@ typedef struct { * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` */ typedef struct { + size_t nbBlocks; size_t compressedSize; unsigned long long decompressedBound; } ZSTD_frameSizeInfo; /* decompress & legacy */ -const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ - -/* custom memory allocation functions */ -void* ZSTD_malloc(size_t size, ZSTD_customMem customMem); -void* ZSTD_calloc(size_t size, ZSTD_customMem customMem); -void ZSTD_free(void* ptr, ZSTD_customMem customMem); - - -MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ -{ - assert(val != 0); - { -# if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse(&r, val); - return (unsigned)r; -# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ - return 31 - __builtin_clz(val); -# else /* Software version */ - static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - return DeBruijnClz[(v * 0x07C4ACDDU) >> 27]; -# endif - } -} - - /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; @@ -301,19 +304,23 @@ typedef struct { /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ -/* Used by: decompress, fullbench (does not get its definition from here) */ +/* Used by: decompress, fullbench */ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr); /*! ZSTD_decodeSeqHeaders() : * decode sequence header from src */ -/* Used by: decompress, fullbench (does not get its definition from here) */ +/* Used by: zstd_decompress_block, fullbench */ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize); - -#if defined (__cplusplus) +/** + * @returns true iff the CPU supports dynamic BMI2 dispatch. + */ +MEM_STATIC int ZSTD_cpuSupportsBmi2(void) +{ + ZSTD_cpuid_t cpuid = ZSTD_cpuid(); + return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); } -#endif #endif /* ZSTD_CCOMMON_H_MODULE */ diff --git a/lib/common/zstd_trace.h b/lib/common/zstd_trace.h new file mode 100644 index 00000000000..d8eec100758 --- /dev/null +++ b/lib/common/zstd_trace.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_TRACE_H +#define ZSTD_TRACE_H + +#include + +/* weak symbol support + * For now, enable conservatively: + * - Only GNUC + * - Only ELF + * - Only x86-64, i386, aarch64 and risc-v. + * Also, explicitly disable on platforms known not to work so they aren't + * forgotten in the future. + */ +#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \ + defined(__GNUC__) && defined(__ELF__) && \ + (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \ + defined(_M_IX86) || defined(__aarch64__) || defined(__riscv)) && \ + !defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \ + !defined(__CYGWIN__) && !defined(_AIX) +# define ZSTD_HAVE_WEAK_SYMBOLS 1 +#else +# define ZSTD_HAVE_WEAK_SYMBOLS 0 +#endif +#if ZSTD_HAVE_WEAK_SYMBOLS +# define ZSTD_WEAK_ATTR __attribute__((__weak__)) +#else +# define ZSTD_WEAK_ATTR +#endif + +/* Only enable tracing when weak symbols are available. */ +#ifndef ZSTD_TRACE +# define ZSTD_TRACE ZSTD_HAVE_WEAK_SYMBOLS +#endif + +#if ZSTD_TRACE + +struct ZSTD_CCtx_s; +struct ZSTD_DCtx_s; +struct ZSTD_CCtx_params_s; + +typedef struct { + /** + * ZSTD_VERSION_NUMBER + * + * This is guaranteed to be the first member of ZSTD_trace. + * Otherwise, this struct is not stable between versions. If + * the version number does not match your expectation, you + * should not interpret the rest of the struct. + */ + unsigned version; + /** + * Non-zero if streaming (de)compression is used. + */ + int streaming; + /** + * The dictionary ID. + */ + unsigned dictionaryID; + /** + * Is the dictionary cold? + * Only set on decompression. + */ + int dictionaryIsCold; + /** + * The dictionary size or zero if no dictionary. + */ + size_t dictionarySize; + /** + * The uncompressed size of the data. + */ + size_t uncompressedSize; + /** + * The compressed size of the data. + */ + size_t compressedSize; + /** + * The fully resolved CCtx parameters (NULL on decompression). + */ + struct ZSTD_CCtx_params_s const* params; + /** + * The ZSTD_CCtx pointer (NULL on decompression). + */ + struct ZSTD_CCtx_s const* cctx; + /** + * The ZSTD_DCtx pointer (NULL on compression). + */ + struct ZSTD_DCtx_s const* dctx; +} ZSTD_Trace; + +/** + * A tracing context. It must be 0 when tracing is disabled. + * Otherwise, any non-zero value returned by a tracing begin() + * function is presented to any subsequent calls to end(). + * + * Any non-zero value is treated as tracing is enabled and not + * interpreted by the library. + * + * Two possible uses are: + * * A timestamp for when the begin() function was called. + * * A unique key identifying the (de)compression, like the + * address of the [dc]ctx pointer if you need to track + * more information than just a timestamp. + */ +typedef unsigned long long ZSTD_TraceCtx; + +/** + * Trace the beginning of a compression call. + * @param cctx The dctx pointer for the compression. + * It can be used as a key to map begin() to end(). + * @returns Non-zero if tracing is enabled. The return value is + * passed to ZSTD_trace_compress_end(). + */ +ZSTD_WEAK_ATTR ZSTD_TraceCtx ZSTD_trace_compress_begin( + struct ZSTD_CCtx_s const* cctx); + +/** + * Trace the end of a compression call. + * @param ctx The return value of ZSTD_trace_compress_begin(). + * @param trace The zstd tracing info. + */ +ZSTD_WEAK_ATTR void ZSTD_trace_compress_end( + ZSTD_TraceCtx ctx, + ZSTD_Trace const* trace); + +/** + * Trace the beginning of a decompression call. + * @param dctx The dctx pointer for the decompression. + * It can be used as a key to map begin() to end(). + * @returns Non-zero if tracing is enabled. The return value is + * passed to ZSTD_trace_compress_end(). + */ +ZSTD_WEAK_ATTR ZSTD_TraceCtx ZSTD_trace_decompress_begin( + struct ZSTD_DCtx_s const* dctx); + +/** + * Trace the end of a decompression call. + * @param ctx The return value of ZSTD_trace_decompress_begin(). + * @param trace The zstd tracing info. + */ +ZSTD_WEAK_ATTR void ZSTD_trace_decompress_end( + ZSTD_TraceCtx ctx, + ZSTD_Trace const* trace); + +#endif /* ZSTD_TRACE */ + +#endif /* ZSTD_TRACE_H */ diff --git a/lib/compress/clevels.h b/lib/compress/clevels.h new file mode 100644 index 00000000000..c18da465f32 --- /dev/null +++ b/lib/compress/clevels.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_CLEVELS_H +#define ZSTD_CLEVELS_H + +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ +#include "../zstd.h" + +/*-===== Pre-defined compression levels =====-*/ + +#define ZSTD_MAX_CLEVEL 22 + +#ifdef __GNUC__ +__attribute__((__unused__)) +#endif + +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { +{ /* "default" - for any srcSize > 256 KB */ + /* W, C, H, S, L, TL, strat */ + { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ + { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */ + { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */ + { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */ + { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */ + { 21, 18, 19, 3, 5, 2, ZSTD_greedy }, /* level 5 */ + { 21, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6 */ + { 21, 19, 20, 4, 5, 8, ZSTD_lazy }, /* level 7 */ + { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 8 */ + { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */ + { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 10 */ + { 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 11 */ + { 22, 22, 23, 6, 5, 32, ZSTD_lazy2 }, /* level 12 */ + { 22, 22, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */ + { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ + { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ + { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */ + { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */ + { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */ + { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */ + { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */ + { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */ + { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */ +}, +{ /* for srcSize <= 256 KB */ + /* W, C, H, S, L, T, strat */ + { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */ + { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */ + { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */ + { 18, 16, 17, 3, 5, 2, ZSTD_greedy }, /* level 4.*/ + { 18, 17, 18, 5, 5, 2, ZSTD_greedy }, /* level 5.*/ + { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/ + { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */ + { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/ + { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/ + { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */ + { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ + { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/ + { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/ + { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/ + { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/ + { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +{ /* for srcSize <= 128 KB */ + /* W, C, H, S, L, T, strat */ + { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */ + { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */ + { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */ + { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */ + { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */ + { 17, 16, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */ + { 17, 16, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 17, 16, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 17, 16, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 17, 16, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */ + { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */ + { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/ + { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ + { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/ + { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/ + { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/ + { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/ + { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +{ /* for srcSize <= 16 KB */ + /* W, C, H, S, L, T, strat */ + { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */ + { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */ + { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */ + { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */ + { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/ + { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */ + { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/ + { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/ + { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/ + { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/ + { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/ + { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/ + { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/ + { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/ + { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/ + { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +}; + + + +#endif /* ZSTD_CLEVELS_H */ diff --git a/lib/compress/fse_compress.c b/lib/compress/fse_compress.c index 68b47e10935..1ce3cf16ac1 100644 --- a/lib/compress/fse_compress.c +++ b/lib/compress/fse_compress.c @@ -1,50 +1,32 @@ /* ****************************************************************** - FSE : Finite State Entropy encoder - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c + * FSE : Finite State Entropy encoder + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Includes ****************************************************************/ -#include /* malloc, free, qsort */ -#include /* memcpy, memset */ -#include "compiler.h" -#include "mem.h" /* U32, U16, etc. */ -#include "debug.h" /* assert, DEBUGLOG */ +#include "../common/compiler.h" +#include "../common/mem.h" /* U32, U16, etc. */ +#include "../common/debug.h" /* assert, DEBUGLOG */ #include "hist.h" /* HIST_count_wksp */ -#include "bitstream.h" +#include "../common/bitstream.h" #define FSE_STATIC_LINKING_ONLY -#include "fse.h" -#include "error_private.h" +#include "../common/fse.h" +#include "../common/error_private.h" +#define ZSTD_DEPS_NEED_MALLOC +#define ZSTD_DEPS_NEED_MATH64 +#include "../common/zstd_deps.h" /* ZSTD_memset */ +#include "../common/bits.h" /* ZSTD_highbit32 */ /* ************************************************************** @@ -94,41 +76,85 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ; FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); U32 const step = FSE_TABLESTEP(tableSize); - U32 cumul[FSE_MAX_SYMBOL_VALUE+2]; + U32 const maxSV1 = maxSymbolValue+1; + + U16* cumul = (U16*)workSpace; /* size = maxSV1 */ + FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */ - FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace; U32 highThreshold = tableSize-1; + assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */ + if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge); /* CTable header */ - if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge); tableU16[-2] = (U16) tableLog; tableU16[-1] = (U16) maxSymbolValue; assert(tableLog < 16); /* required for threshold strategy to work */ /* For explanations on how to distribute symbol values over the table : - * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ + * https://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ #ifdef __clang_analyzer__ - memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */ + ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */ #endif /* symbol start positions */ { U32 u; cumul[0] = 0; - for (u=1; u <= maxSymbolValue+1; u++) { + for (u=1; u <= maxSV1; u++) { if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ cumul[u] = cumul[u-1] + 1; tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); } else { - cumul[u] = cumul[u-1] + normalizedCounter[u-1]; + assert(normalizedCounter[u-1] >= 0); + cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1]; + assert(cumul[u] >= cumul[u-1]); /* no overflow */ } } - cumul[maxSymbolValue+1] = tableSize+1; + cumul[maxSV1] = (U16)(tableSize+1); } /* Spread symbols */ - { U32 position = 0; + if (highThreshold == tableSize - 1) { + /* Case for no low prob count symbols. Lay down 8 bytes at a time + * to reduce branch misses since we are operating on a small block + */ + BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */ + { U64 const add = 0x0101010101010101ull; + size_t pos = 0; + U64 sv = 0; + U32 s; + for (s=0; s=0); + pos += (size_t)n; + } + } + /* Spread symbols across the table. Lack of lowprob symbols means that + * we don't need variable sized inner loop, so we can unroll the loop and + * reduce branch misses. + */ + { size_t position = 0; + size_t s; + size_t const unroll = 2; /* Experimentally determined optimal unroll */ + assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */ + for (s = 0; s < (size_t)tableSize; s += unroll) { + size_t u; + for (u = 0; u < unroll; ++u) { + size_t const uPosition = (position + (u * step)) & tableMask; + tableSymbol[uPosition] = spread[s + u]; + } + position = (position + (unroll * step)) & tableMask; + } + assert(position == 0); /* Must have initialized all positions */ + } + } else { + U32 position = 0; U32 symbol; - for (symbol=0; symbol<=maxSymbolValue; symbol++) { + for (symbol=0; symbol highThreshold) position = (position + step) & tableMask; /* Low proba area */ } } - assert(position==0); /* Must have initialized all positions */ } @@ -161,16 +186,17 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, case -1: case 1: symbolTT[s].deltaNbBits = (tableLog << 16) - (1< 1); + { U32 const maxBitsOut = tableLog - ZSTD_highbit32 ((U32)normalizedCounter[s]-1); + U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut; symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; - symbolTT[s].deltaFindState = total - normalizedCounter[s]; - total += normalizedCounter[s]; + symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]); + total += (unsigned)normalizedCounter[s]; } } } } #if 0 /* debug : symbol costs */ @@ -181,31 +207,26 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, symbol, normalizedCounter[symbol], FSE_getMaxNbBits(symbolTT, symbol), (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256); - } - } + } } #endif return 0; } -size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) -{ - FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */ - return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol)); -} - - #ifndef FSE_COMMONDEFS_ONLY - /*-************************************************************** * FSE NCount encoding ****************************************************************/ size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog) { - size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3; + size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog + + 4 /* bitCount initialized at 4 */ + + 2 /* first two symbols may use one additional bit each */) / 8) + + 1 /* round up to whole nb bytes */ + + 2 /* additional two bytes for bitstream flush */; return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ } @@ -234,7 +255,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize, /* Init */ remaining = tableSize+1; /* +1 for extra accuracy */ threshold = tableSize; - nbBits = tableLog+1; + nbBits = (int)tableLog+1; while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */ if (previousIs0) { @@ -253,7 +274,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize, } while (symbol >= start+3) { start+=3; - bitStream += 3 << bitCount; + bitStream += 3U << bitCount; bitCount += 2; } bitStream += (symbol-start) << bitCount; @@ -273,7 +294,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize, count++; /* +1 for extra accuracy */ if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ - bitStream += count << bitCount; + bitStream += (U32)count << bitCount; bitCount += nbBits; bitCount -= (count>8); out+= (bitCount+7) /8; - return (out-ostart); + assert(out >= ostart); + return (size_t)(out-ostart); } @@ -322,21 +344,11 @@ size_t FSE_writeNCount (void* buffer, size_t bufferSize, * FSE Compression Code ****************************************************************/ -FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) -{ - size_t size; - if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; - size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); - return (FSE_CTable*)malloc(size); -} - -void FSE_freeCTable (FSE_CTable* ct) { free(ct); } - /* provides the minimum logSize to safely represent a distribution */ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) { - U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1; - U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; + U32 minBitsSrc = ZSTD_highbit32((U32)(srcSize)) + 1; + U32 minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2; U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; assert(srcSize > 1); /* Not supported, RLE should be used instead */ return minBits; @@ -344,7 +356,7 @@ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) { - U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; + U32 maxBitsSrc = ZSTD_highbit32((U32)(srcSize - 1)) - minus; U32 tableLog = maxTableLog; U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); assert(srcSize > 1); /* Not supported, RLE should be used instead */ @@ -361,11 +373,10 @@ unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); } - /* Secondary normalization method. To be used when primary method fails. */ -static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue) +static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount) { short const NOT_YET_ASSIGNED = -2; U32 s; @@ -382,7 +393,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, continue; } if (count[s] <= lowThreshold) { - norm[s] = -1; + norm[s] = lowProbCount; distributed++; total -= count[s]; continue; @@ -434,7 +445,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, { U64 const vStepLog = 62 - tableLog; U64 const mid = (1ULL << (vStepLog-1)) - 1; - U64 const rStep = ((((U64)1<> scale); @@ -490,7 +501,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, } } if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { /* corner case, need another normalization method */ - size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); + size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount); if (FSE_isError(errorCode)) return errorCode; } else normalizedCounter[largest] += (short)stillToDistribute; @@ -513,40 +524,6 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, return tableLog; } - -/* fake FSE_CTable, for raw (uncompressed) input */ -size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits) -{ - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSymbolValue = tableMask; - void* const ptr = ct; - U16* const tableU16 = ( (U16*) ptr) + 2; - void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */ - FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) return ERROR(GENERIC); /* min size */ - - /* header */ - tableU16[-2] = (U16) nbBits; - tableU16[-1] = (U16) maxSymbolValue; - - /* Build table */ - for (s=0; s not compressible */ - if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ - } - - tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); - CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) ); - - /* Write table description header */ - { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); - op += nc_err; - } - - /* Compress */ - CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); - { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); - if (cSize == 0) return 0; /* not enough space for compressed data */ - op += cSize; - } - - /* check compressibility */ - if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; - - return op-ostart; -} - -typedef struct { - FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; - BYTE scratchBuffer[1 << FSE_MAX_TABLELOG]; -} fseWkspMax_t; - -size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) -{ - fseWkspMax_t scratchBuffer; - DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ - if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); - return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); -} - -size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG); -} - - #endif /* FSE_COMMONDEFS_ONLY */ diff --git a/lib/compress/hist.c b/lib/compress/hist.c index 45b7babc1e2..3692bc250ca 100644 --- a/lib/compress/hist.c +++ b/lib/compress/hist.c @@ -1,44 +1,30 @@ /* ****************************************************************** - hist : Histogram functions - part of Finite State Entropy project - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c + * hist : Histogram functions + * part of Finite State Entropy project + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* --- dependencies --- */ -#include "mem.h" /* U32, BYTE, etc. */ -#include "debug.h" /* assert, DEBUGLOG */ -#include "error_private.h" /* ERROR */ +#include "../common/mem.h" /* U32, BYTE, etc. */ +#include "../common/debug.h" /* assert, DEBUGLOG */ +#include "../common/error_private.h" /* ERROR */ #include "hist.h" +#if defined(ZSTD_ARCH_ARM_SVE2) +#define HIST_FAST_THRESHOLD 500 +#else +#define HIST_FAST_THRESHOLD 1500 +#endif + /* --- Error management --- */ unsigned HIST_isError(size_t code) { return ERR_isError(code); } @@ -46,6 +32,16 @@ unsigned HIST_isError(size_t code) { return ERR_isError(code); } /*-************************************************************** * Histogram functions ****************************************************************/ +void HIST_add(unsigned* count, const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + const BYTE* const end = ip + srcSize; + + while (ip *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall); + *maxSymbolValuePtr = maxSymbolValue; + + /* If the buffer size is not divisible by 16, the last elements of the final + * vector register read will be zeros, and these elements must be subtracted + * from the histogram. + */ + hh0 = svsub_n_u16_m(svptrue_pat_b32(SV_VL1), hh0, -sourceSize & 15); + + svst1_u32(svwhilelt_b32_u64( 0, maxCount), count + 0, svshllb_n_u32(hh0, 0)); + svst1_u32(svwhilelt_b32_u64( 4, maxCount), count + 4, svshllt_n_u32(hh0, 0)); + svst1_u32(svwhilelt_b32_u64( 8, maxCount), count + 8, svshllb_n_u32(hh1, 0)); + svst1_u32(svwhilelt_b32_u64(12, maxCount), count + 12, svshllt_n_u32(hh1, 0)); + svst1_u32(svwhilelt_b32_u64(16, maxCount), count + 16, svshllb_n_u32(hh2, 0)); + svst1_u32(svwhilelt_b32_u64(20, maxCount), count + 20, svshllt_n_u32(hh2, 0)); + svst1_u32(svwhilelt_b32_u64(24, maxCount), count + 24, svshllb_n_u32(hh3, 0)); + svst1_u32(svwhilelt_b32_u64(28, maxCount), count + 28, svshllt_n_u32(hh3, 0)); + svst1_u32(svwhilelt_b32_u64(32, maxCount), count + 32, svshllb_n_u32(hh4, 0)); + svst1_u32(svwhilelt_b32_u64(36, maxCount), count + 36, svshllt_n_u32(hh4, 0)); + svst1_u32(svwhilelt_b32_u64(40, maxCount), count + 40, svshllb_n_u32(hh5, 0)); + svst1_u32(svwhilelt_b32_u64(44, maxCount), count + 44, svshllt_n_u32(hh5, 0)); + svst1_u32(svwhilelt_b32_u64(48, maxCount), count + 48, svshllb_n_u32(hh6, 0)); + svst1_u32(svwhilelt_b32_u64(52, maxCount), count + 52, svshllt_n_u32(hh6, 0)); + svst1_u32(svwhilelt_b32_u64(56, maxCount), count + 56, svshllb_n_u32(hh7, 0)); + svst1_u32(svwhilelt_b32_u64(60, maxCount), count + 60, svshllt_n_u32(hh7, 0)); + + hh0 = svmax_u16_x(vl128, hh0, hh1); + hh2 = svmax_u16_x(vl128, hh2, hh3); + hh4 = svmax_u16_x(vl128, hh4, hh5); + hh6 = svmax_u16_x(vl128, hh6, hh7); + hh0 = svmax_u16_x(vl128, hh0, hh2); + hh4 = svmax_u16_x(vl128, hh4, hh6); + max = svmax_u16_x(vl128, hh0, hh4); + + maxSymbolValue = min_size(maxSymbolValue, maxCount); + if (maxSymbolValue >= 64) { + const svuint8_t c4 = svadd_n_u8_x(vl128, c0, 64); + const svuint8_t c5 = svadd_n_u8_x(vl128, c1, 64); + const svuint8_t c6 = svadd_n_u8_x(vl128, c2, 64); + const svuint8_t c7 = svadd_n_u8_x(vl128, c3, 64); + const svuint8_t c8 = svadd_n_u8_x(vl128, c0, 128); + const svuint8_t c9 = svadd_n_u8_x(vl128, c1, 128); + + max = HIST_count_6_sve2(ip, sourceSize, count + 64, c4, c5, c6, c7, + c8, c9, max, maxCount - 64); + + if (maxSymbolValue >= 160) { + const svuint8_t ca = svadd_n_u8_x(vl128, c2, 128); + const svuint8_t cb = svadd_n_u8_x(vl128, c3, 128); + const svuint8_t cc = svadd_n_u8_x(vl128, c4, 128); + const svuint8_t cd = svadd_n_u8_x(vl128, c5, 128); + const svuint8_t ce = svadd_n_u8_x(vl128, c6, 128); + const svuint8_t cf = svadd_n_u8_x(vl128, c7, 128); + + max = HIST_count_6_sve2(ip, sourceSize, count + 160, ca, cb, cc, + cd, ce, cf, max, maxCount - 160); + } else if (maxCount > 160) { + ZSTD_memset(count + 160, 0, (maxCount - 160) * sizeof(*count)); + } + } else if (maxCount > 64) { + ZSTD_memset(count + 64, 0, (maxCount - 64) * sizeof(*count)); + } + + return svmaxv_u16(vl128, max); + } +} +#endif + /* HIST_count_parallel_wksp() : * store histogram into 4 intermediate tables, recombined at the end. * this design makes better use of OoO cpus, * and is noticeably faster when some values are heavily repeated. * But it needs some additional workspace for intermediate tables. - * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32. + * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32. * @return : largest histogram frequency, - * or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */ -static size_t HIST_count_parallel_wksp( - unsigned* count, unsigned* maxSymbolValuePtr, + * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */ +static UNUSED_ATTR +size_t HIST_count_parallel_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, HIST_checkInput_e check, U32* const workSpace) { const BYTE* ip = (const BYTE*)source; const BYTE* const iend = ip+sourceSize; - unsigned maxSymbolValue = *maxSymbolValuePtr; + size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count); unsigned max=0; U32* const Counting1 = workSpace; U32* const Counting2 = Counting1 + 256; U32* const Counting3 = Counting2 + 256; U32* const Counting4 = Counting3 + 256; - memset(workSpace, 0, 4*256*sizeof(unsigned)); - /* safety checks */ + assert(*maxSymbolValuePtr <= 255); if (!sourceSize) { - memset(count, 0, maxSymbolValue + 1); + ZSTD_memset(count, 0, countSize); *maxSymbolValuePtr = 0; return 0; } - if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ + ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned)); /* by stripes of 16 bytes */ { U32 cached = MEM_read32(ip); ip += 4; @@ -138,21 +371,18 @@ static size_t HIST_count_parallel_wksp( /* finish last symbols */ while (ipmaxSymbolValue; s--) { - Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; - if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); - } } - { U32 s; - if (maxSymbolValue > 255) maxSymbolValue = 255; - for (s=0; s<=maxSymbolValue; s++) { - count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; - if (count[s] > max) max = count[s]; + for (s=0; s<256; s++) { + Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; + if (Counting1[s] > max) max = Counting1[s]; } } - while (!count[maxSymbolValue]) maxSymbolValue--; - *maxSymbolValuePtr = maxSymbolValue; + { unsigned maxSymbolValue = 255; + while (!Counting1[maxSymbolValue]) maxSymbolValue--; + if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall); + *maxSymbolValuePtr = maxSymbolValue; + ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */ + } return (size_t)max; } @@ -165,19 +395,17 @@ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, void* workSpace, size_t workSpaceSize) { - if (sourceSize < 1500) /* heuristic threshold */ + if (sourceSize < HIST_FAST_THRESHOLD) /* heuristic threshold */ return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); +#if defined(ZSTD_ARCH_ARM_SVE2) + (void)workSpace; + (void)workSpaceSize; + return HIST_count_sve2(count, maxSymbolValuePtr, source, sourceSize, trustInput); +#else if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace); -} - -/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ -size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, - const void* source, size_t sourceSize) -{ - unsigned tmpCounters[HIST_WKSP_SIZE_U32]; - return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters)); +#endif } /* HIST_count_wksp() : @@ -187,17 +415,32 @@ size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, void* workSpace, size_t workSpaceSize) { +#if defined(ZSTD_ARCH_ARM_SVE2) + if (*maxSymbolValuePtr < 255) + return HIST_count_sve2(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue); +#else if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); if (*maxSymbolValuePtr < 255) return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace); +#endif *maxSymbolValuePtr = 255; return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize); } +#ifndef ZSTD_NO_UNUSED_FUNCTIONS +/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ +size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize) +{ + unsigned tmpCounters[HIST_WKSP_SIZE_U32]; + return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters)); +} + size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize) { unsigned tmpCounters[HIST_WKSP_SIZE_U32]; return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters)); } +#endif diff --git a/lib/compress/hist.h b/lib/compress/hist.h index 8b389358dc1..e526e9532a7 100644 --- a/lib/compress/hist.h +++ b/lib/compress/hist.h @@ -1,40 +1,20 @@ /* ****************************************************************** - hist : Histogram functions - part of Finite State Entropy project - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c + * hist : Histogram functions + * part of Finite State Entropy project + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* --- dependencies --- */ -#include /* size_t */ +#include "../common/zstd_deps.h" /* size_t */ /* --- simple histogram functions --- */ @@ -55,7 +35,11 @@ unsigned HIST_isError(size_t code); /**< tells if a return value is an error co /* --- advanced histogram functions --- */ +#if defined(__ARM_FEATURE_SVE2) +#define HIST_WKSP_SIZE_U32 0 +#else #define HIST_WKSP_SIZE_U32 1024 +#endif #define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned)) /** HIST_count_wksp() : * Same as HIST_count(), but using an externally provided scratch buffer. @@ -93,3 +77,10 @@ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, */ unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + +/*! HIST_add() : + * Lowest level: just add nb of occurrences of characters from @src into @count. + * @count is not reset. @count array is presumed large enough (i.e. 1 KB). + @ This function does not need any additional stack memory. + */ +void HIST_add(unsigned* count, const void* src, size_t srcSize); diff --git a/lib/compress/huf_compress.c b/lib/compress/huf_compress.c index f074f1e0a95..6f2194e7934 100644 --- a/lib/compress/huf_compress.c +++ b/lib/compress/huf_compress.c @@ -1,35 +1,15 @@ /* ****************************************************************** - Huffman encoder, part of New Generation Entropy library - Copyright (C) 2013-2016, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c + * Huffman encoder, part of New Generation Entropy library + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** @@ -43,16 +23,15 @@ /* ************************************************************** * Includes ****************************************************************/ -#include /* memcpy, memset */ -#include /* printf (debug) */ -#include "compiler.h" -#include "bitstream.h" +#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */ +#include "../common/compiler.h" +#include "../common/bitstream.h" #include "hist.h" #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ -#include "fse.h" /* header compression */ -#define HUF_STATIC_LINKING_ONLY -#include "huf.h" -#include "error_private.h" +#include "../common/fse.h" /* header compression */ +#include "../common/huf.h" +#include "../common/error_private.h" +#include "../common/bits.h" /* ZSTD_highbit32 */ /* ************************************************************** @@ -60,29 +39,114 @@ ****************************************************************/ #define HUF_isError ERR_isError #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ -#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e -#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } /* ************************************************************** -* Utils +* Required declarations ****************************************************************/ -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +typedef struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +} nodeElt; + + +/* ************************************************************** +* Debug Traces +****************************************************************/ + +#if DEBUGLEVEL >= 2 + +static size_t showU32(const U32* arr, size_t size) { - return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); + size_t u; + for (u=0; u= add) { + assert(add < align); + assert(((size_t)aligned & mask) == 0); + *workspaceSizePtr -= add; + return aligned; + } else { + *workspaceSizePtr = 0; + return NULL; + } +} + + /* HUF_compressWeights() : * Same as FSE_compress(), but dedicated to huff0's weights compression. * The use case needs much less stack memory. * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. */ #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 -static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) + +typedef struct { + FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; + U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)]; + unsigned count[HUF_TABLELOG_MAX+1]; + S16 norm[HUF_TABLELOG_MAX+1]; +} HUF_CompressWeightsWksp; + +static size_t +HUF_compressWeights(void* dst, size_t dstSize, + const void* weightTable, size_t wtSize, + void* workspace, size_t workspaceSize) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; @@ -90,69 +154,125 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight unsigned maxSymbolValue = HUF_TABLELOG_MAX; U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; + HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); - FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; - BYTE scratchBuffer[1<count, &maxSymbolValue, weightTable, wtSize); /* never fails */ if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ } tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); - CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); + CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) ); /* Write table description header */ - { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) ); op += hSize; } /* Compress */ - CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); - { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); + CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) ); if (cSize == 0) return 0; /* not enough space for compressed data */ op += cSize; } - return op-ostart; + return (size_t)(op-ostart); } +static size_t HUF_getNbBits(HUF_CElt elt) +{ + return elt & 0xFF; +} -struct HUF_CElt_s { - U16 val; - BYTE nbBits; -}; /* typedef'd to HUF_CElt within "huf.h" */ +static size_t HUF_getNbBitsFast(HUF_CElt elt) +{ + return elt; +} + +static size_t HUF_getValue(HUF_CElt elt) +{ + return elt & ~(size_t)0xFF; +} + +static size_t HUF_getValueFast(HUF_CElt elt) +{ + return elt; +} -/*! HUF_writeCTable() : - `CTable` : Huffman tree to save, using huf representation. - @return : size of saved CTable */ -size_t HUF_writeCTable (void* dst, size_t maxDstSize, - const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog) +static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits) { + assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX); + *elt = nbBits; +} + +static void HUF_setValue(HUF_CElt* elt, size_t value) +{ + size_t const nbBits = HUF_getNbBits(*elt); + if (nbBits > 0) { + assert((value >> nbBits) == 0); + *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits); + } +} + +HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable) +{ + HUF_CTableHeader header; + ZSTD_memcpy(&header, ctable, sizeof(header)); + return header; +} + +static void HUF_writeCTableHeader(HUF_CElt* ctable, U32 tableLog, U32 maxSymbolValue) +{ + HUF_CTableHeader header; + HUF_STATIC_ASSERT(sizeof(ctable[0]) == sizeof(header)); + ZSTD_memset(&header, 0, sizeof(header)); + assert(tableLog < 256); + header.tableLog = (BYTE)tableLog; + assert(maxSymbolValue < 256); + header.maxSymbolValue = (BYTE)maxSymbolValue; + ZSTD_memcpy(ctable, &header, sizeof(header)); +} + +typedef struct { + HUF_CompressWeightsWksp wksp; BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; +} HUF_WriteCTableWksp; + +size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, + const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, + void* workspace, size_t workspaceSize) +{ + HUF_CElt const* const ct = CTable + 1; BYTE* op = (BYTE*)dst; U32 n; + HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); + + HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp)); - /* check conditions */ + assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue); + assert(HUF_readCTableHeader(CTable).tableLog == huffLog); + + /* check conditions */ + if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); /* convert to weight */ - bitsToWeight[0] = 0; + wksp->bitsToWeight[0] = 0; for (n=1; nbitsToWeight[n] = (BYTE)(huffLog + 1 - n); for (n=0; nhuffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; /* attempt weights compression by FSE */ - { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) ); + if (maxDstSize < 1) return ERROR(dstSize_tooSmall); + { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) ); if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ op[0] = (BYTE)hSize; return hSize+1; @@ -162,45 +282,51 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize, if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); - huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ + wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ for (n=0; nhuffWeight[n] << 4) + wksp->huffWeight[n+1]); return ((maxSymbolValue+1)/2) + 1; } -size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize) +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights) { BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; U32 nbSymbols = 0; + HUF_CElt* const ct = CTable + 1; /* get symbol weights */ CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize)); + *hasZeroWeights = (rankVal[0] > 0); /* check result */ if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); + *maxSymbolValuePtr = nbSymbols - 1; + + HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr); + /* Prepare base value per rank */ { U32 n, nextRankStart = 0; for (n=1; n<=tableLog; n++) { - U32 current = nextRankStart; + U32 curr = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); - rankVal[n] = current; + rankVal[n] = curr; } } /* fill nbBits */ { U32 n; for (n=0; nn=tableLog+1 */ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; - { U32 n; for (n=0; n>= 1; } } /* assign value within rank, symbol order */ - { U32 n; for (n=0; n HUF_readCTableHeader(CTable).maxSymbolValue) + return 0; + return (U32)HUF_getNbBits(ct[symbolValue]); } -typedef struct nodeElt_s { - U32 count; - U16 parent; - BYTE byte; - BYTE nbBits; -} nodeElt; - -static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) +/** + * HUF_setMaxHeight(): + * Try to enforce @targetNbBits on the Huffman tree described in @huffNode. + * + * It attempts to convert all nodes with nbBits > @targetNbBits + * to employ @targetNbBits instead. Then it adjusts the tree + * so that it remains a valid canonical Huffman tree. + * + * @pre The sum of the ranks of each symbol == 2^largestBits, + * where largestBits == huffNode[lastNonNull].nbBits. + * @post The sum of the ranks of each symbol == 2^largestBits, + * where largestBits is the return value (expected <= targetNbBits). + * + * @param huffNode The Huffman tree modified in place to enforce targetNbBits. + * It's presumed sorted, from most frequent to rarest symbol. + * @param lastNonNull The symbol with the lowest count in the Huffman tree. + * @param targetNbBits The allowed number of bits, which the Huffman tree + * may not respect. After this function the Huffman tree will + * respect targetNbBits. + * @return The maximum number of bits of the Huffman tree after adjustment. + */ +static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits) { const U32 largestBits = huffNode[lastNonNull].nbBits; - if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */ + /* early exit : no elt > targetNbBits, so the tree is already valid. */ + if (largestBits <= targetNbBits) return largestBits; + + DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits); /* there are several too large elements (at least >= 2) */ { int totalCost = 0; - const U32 baseCost = 1 << (largestBits - maxNbBits); - U32 n = lastNonNull; - - while (huffNode[n].nbBits > maxNbBits) { + const U32 baseCost = 1 << (largestBits - targetNbBits); + int n = (int)lastNonNull; + + /* Adjust any ranks > targetNbBits to targetNbBits. + * Compute totalCost, which is how far the sum of the ranks is + * we are over 2^largestBits after adjust the offending ranks. + */ + while (huffNode[n].nbBits > targetNbBits) { totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); - huffNode[n].nbBits = (BYTE)maxNbBits; - n --; - } /* n stops at huffNode[n].nbBits <= maxNbBits */ - while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */ + huffNode[n].nbBits = (BYTE)targetNbBits; + n--; + } + /* n stops at huffNode[n].nbBits <= targetNbBits */ + assert(huffNode[n].nbBits <= targetNbBits); + /* n end at index of smallest symbol using < targetNbBits */ + while (huffNode[n].nbBits == targetNbBits) --n; - /* renorm totalCost */ - totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ + /* renorm totalCost from 2^largestBits to 2^targetNbBits + * note : totalCost is necessarily a multiple of baseCost */ + assert(((U32)totalCost & (baseCost - 1)) == 0); + totalCost >>= (largestBits - targetNbBits); + assert(totalCost > 0); /* repay normalized cost */ { U32 const noSymbol = 0xF0F0F0F0; U32 rankLast[HUF_TABLELOG_MAX+2]; - int pos; - /* Get pos of last (smallest) symbol per rank */ - memset(rankLast, 0xF0, sizeof(rankLast)); - { U32 currentNbBits = maxNbBits; + /* Get pos of last (smallest = lowest cum. count) symbol per rank */ + ZSTD_memset(rankLast, 0xF0, sizeof(rankLast)); + { U32 currentNbBits = targetNbBits; + int pos; for (pos=n ; pos >= 0; pos--) { if (huffNode[pos].nbBits >= currentNbBits) continue; - currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ - rankLast[maxNbBits-currentNbBits] = pos; + currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */ + rankLast[targetNbBits-currentNbBits] = (U32)pos; } } while (totalCost > 0) { - U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1; + /* Try to reduce the next power of 2 above totalCost because we + * gain back half the rank. + */ + U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1; + assert(nBitsToDecrease <= HUF_TABLELOG_MAX+1); for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { - U32 highPos = rankLast[nBitsToDecrease]; - U32 lowPos = rankLast[nBitsToDecrease-1]; + U32 const highPos = rankLast[nBitsToDecrease]; + U32 const lowPos = rankLast[nBitsToDecrease-1]; if (highPos == noSymbol) continue; + /* Decrease highPos if no symbols of lowPos or if it is + * not cheaper to remove 2 lowPos than highPos. + */ if (lowPos == noSymbol) break; { U32 const highTotal = huffNode[highPos].count; U32 const lowTotal = 2 * huffNode[lowPos].count; if (highTotal <= lowTotal) break; } } /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ + assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1); /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) - nBitsToDecrease ++; + nBitsToDecrease++; + assert(rankLast[nBitsToDecrease] != noSymbol); + /* Increase the number of bits to gain back half the rank cost. */ totalCost -= 1 << (nBitsToDecrease-1); + huffNode[rankLast[nBitsToDecrease]].nbBits++; + + /* Fix up the new rank. + * If the new rank was empty, this symbol is now its smallest. + * Otherwise, this symbol will be the largest in the new rank so no adjustment. + */ if (rankLast[nBitsToDecrease-1] == noSymbol) - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ - huffNode[rankLast[nBitsToDecrease]].nbBits ++; + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; + /* Fix up the old rank. + * If the symbol was at position 0, meaning it was the highest weight symbol in the tree, + * it must be the only symbol in its rank, so the old rank now has no symbols. + * Otherwise, since the Huffman nodes are sorted by count, the previous position is now + * the smallest node in the rank. If the previous position belongs to a different rank, + * then the rank is now empty. + */ if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol; else { rankLast[nBitsToDecrease]--; - if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) + if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease) rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ - } } /* while (totalCost > 0) */ - + } + } /* while (totalCost > 0) */ + + /* If we've removed too much weight, then we have to add it back. + * To avoid overshooting again, we only adjust the smallest rank. + * We take the largest nodes from the lowest rank 0 and move them + * to rank 1. There's guaranteed to be enough rank 0 symbols because + * TODO. + */ while (totalCost < 0) { /* Sometimes, cost correction overshoot */ - if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - while (huffNode[n].nbBits == maxNbBits) n--; + /* special case : no rank 1 symbol (using targetNbBits-1); + * let's create one from largest rank 0 (using targetNbBits). + */ + if (rankLast[1] == noSymbol) { + while (huffNode[n].nbBits == targetNbBits) n--; huffNode[n+1].nbBits--; - rankLast[1] = n+1; + assert(n >= 0); + rankLast[1] = (U32)(n+1); totalCost++; continue; } huffNode[ rankLast[1] + 1 ].nbBits--; rankLast[1]++; totalCost ++; - } } } /* there are several too large elements (at least >= 2) */ + } + } /* repay normalized cost */ + } /* there are several too large elements (at least >= 2) */ - return maxNbBits; + return targetNbBits; } - typedef struct { - U32 base; - U32 current; + U16 base; + U16 curr; } rankPos; -static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue) -{ - rankPos rank[32]; +typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)]; + +/* Number of buckets available for HUF_sort() */ +#define RANK_POSITION_TABLE_SIZE 192 + +typedef struct { + huffNodeTable huffNodeTbl; + rankPos rankPosition[RANK_POSITION_TABLE_SIZE]; +} HUF_buildCTable_wksp_tables; + +/* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing. + * Strategy is to use as many buckets as possible for representing distinct + * counts while using the remainder to represent all "large" counts. + * + * To satisfy this requirement for 192 buckets, we can do the following: + * Let buckets 0-166 represent distinct counts of [0, 166] + * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing. + */ +#define RANK_POSITION_MAX_COUNT_LOG 32 +#define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */) +#define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */) + +/* Return the appropriate bucket index for a given count. See definition of + * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. + */ +static U32 HUF_getIndex(U32 const count) { + return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF) + ? count + : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; +} + +/* Helper swap function for HUF_quickSortPartition() */ +static void HUF_swapNodes(nodeElt* a, nodeElt* b) { + nodeElt tmp = *a; + *a = *b; + *b = tmp; +} + +/* Returns 0 if the huffNode array is not sorted by descending count */ +MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) { + U32 i; + for (i = 1; i < maxSymbolValue1; ++i) { + if (huffNode[i].count > huffNode[i-1].count) { + return 0; + } + } + return 1; +} + +/* Insertion sort by descending order */ +HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) { + int i; + int const size = high-low+1; + huffNode += low; + for (i = 1; i < size; ++i) { + nodeElt const key = huffNode[i]; + int j = i - 1; + while (j >= 0 && huffNode[j].count < key.count) { + huffNode[j + 1] = huffNode[j]; + j--; + } + huffNode[j + 1] = key; + } +} + +/* Pivot helper function for quicksort. */ +static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) { + /* Simply select rightmost element as pivot. "Better" selectors like + * median-of-three don't experimentally appear to have any benefit. + */ + U32 const pivot = arr[high].count; + int i = low - 1; + int j = low; + for ( ; j < high; j++) { + if (arr[j].count > pivot) { + i++; + HUF_swapNodes(&arr[i], &arr[j]); + } + } + HUF_swapNodes(&arr[i + 1], &arr[high]); + return i + 1; +} + +/* Classic quicksort by descending with partially iterative calls + * to reduce worst case callstack size. + */ +static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) { + int const kInsertionSortThreshold = 8; + if (high - low < kInsertionSortThreshold) { + HUF_insertionSort(arr, low, high); + return; + } + while (low < high) { + int const idx = HUF_quickSortPartition(arr, low, high); + if (idx - low < high - idx) { + HUF_simpleQuickSort(arr, low, idx - 1); + low = idx + 1; + } else { + HUF_simpleQuickSort(arr, idx + 1, high); + high = idx - 1; + } + } +} + +/** + * HUF_sort(): + * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. + * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. + * + * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. + * Must have (maxSymbolValue + 1) entries. + * @param[in] count Histogram of the symbols. + * @param[in] maxSymbolValue Maximum symbol value. + * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. + */ +static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) { U32 n; + U32 const maxSymbolValue1 = maxSymbolValue+1; + + /* Compute base and set curr to base. + * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1. + * See HUF_getIndex to see bucketing strategy. + * We attribute each symbol to lowerRank's base value, because we want to know where + * each rank begins in the output, so for rank R we want to count ranks R+1 and above. + */ + ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); + for (n = 0; n < maxSymbolValue1; ++n) { + U32 lowerRank = HUF_getIndex(count[n]); + assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1); + rankPosition[lowerRank].base++; + } - memset(rank, 0, sizeof(rank)); - for (n=0; n<=maxSymbolValue; n++) { - U32 r = BIT_highbit32(count[n] + 1); - rank[r].base ++; + assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0); + /* Set up the rankPosition table */ + for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) { + rankPosition[n-1].base += rankPosition[n].base; + rankPosition[n-1].curr = rankPosition[n-1].base; } - for (n=30; n>0; n--) rank[n-1].base += rank[n].base; - for (n=0; n<32; n++) rank[n].current = rank[n].base; - for (n=0; n<=maxSymbolValue; n++) { + + /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */ + for (n = 0; n < maxSymbolValue1; ++n) { U32 const c = count[n]; - U32 const r = BIT_highbit32(c+1) + 1; - U32 pos = rank[r].current++; - while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) { - huffNode[pos] = huffNode[pos-1]; - pos--; - } + U32 const r = HUF_getIndex(c) + 1; + U32 const pos = rankPosition[r].curr++; + assert(pos < maxSymbolValue1); huffNode[pos].count = c; huffNode[pos].byte = (BYTE)n; } + + /* Sort each bucket. */ + for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) { + int const bucketSize = rankPosition[n].curr - rankPosition[n].base; + U32 const bucketStartIdx = rankPosition[n].base; + if (bucketSize > 1) { + assert(bucketStartIdx < maxSymbolValue1); + HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1); + } + } + + assert(HUF_isSorted(huffNode, maxSymbolValue1)); } /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. - * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of HUF_CTABLE_WORKSPACE_SIZE_U32 unsigned. + * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). */ #define STARTNODE (HUF_SYMBOLVALUE_MAX+1) -typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; -size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) + +/* HUF_buildTree(): + * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree. + * + * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array. + * @param maxSymbolValue The maximum symbol value. + * @return The smallest node in the Huffman tree (by count). + */ +static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) { - nodeElt* const huffNode0 = (nodeElt*)workSpace; - nodeElt* const huffNode = huffNode0+1; - U32 n, nonNullRank; + nodeElt* const huffNode0 = huffNode - 1; + int nonNullRank; int lowS, lowN; - U16 nodeNb = STARTNODE; - U32 nodeRoot; - - /* safety checks */ - if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ - if (wkspSize < sizeof(huffNodeTable)) return ERROR(workSpace_tooSmall); - if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; - if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); - memset(huffNode0, 0, sizeof(huffNodeTable)); - - /* sort, decreasing order */ - HUF_sort(huffNode, count, maxSymbolValue); - + int nodeNb = STARTNODE; + int n, nodeRoot; + DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)", maxSymbolValue + 1); /* init for parents */ - nonNullRank = maxSymbolValue; + nonNullRank = (int)maxSymbolValue; while(huffNode[nonNullRank].count == 0) nonNullRank--; lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; - huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb; + huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb; nodeNb++; lowS-=2; for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ /* create parents */ while (nodeNb <= nodeRoot) { - U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; - U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; + int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; + int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; - huffNode[n1].parent = huffNode[n2].parent = nodeNb; + huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb; nodeNb++; } @@ -392,126 +713,414 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbo for (n=0; n<=nonNullRank; n++) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; - /* enforce maxTableLog */ - maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); - - /* fill result into tree (val, nbBits) */ - { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; - U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; - if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ - for (n=0; n<=nonNullRank; n++) - nbPerRank[huffNode[n].nbBits]++; - /* determine stating value per rank */ - { U16 min = 0; - for (n=maxNbBits; n>0; n--) { - valPerRank[n] = min; /* get starting value within each rank */ - min += nbPerRank[n]; - min >>= 1; - } } - for (n=0; n<=maxSymbolValue; n++) - tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ - for (n=0; n<=maxSymbolValue; n++) - tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ - } + DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)", showHNodeBits(huffNode, maxSymbolValue+1)); - return maxNbBits; + return nonNullRank; } -/** HUF_buildCTable() : - * @return : maxNbBits - * Note : count is used before tree is written, so they can safely overlap +/** + * HUF_buildCTableFromTree(): + * Build the CTable given the Huffman tree in huffNode. + * + * @param[out] CTable The output Huffman CTable. + * @param huffNode The Huffman tree. + * @param nonNullRank The last and smallest node in the Huffman tree. + * @param maxSymbolValue The maximum symbol value. + * @param maxNbBits The exact maximum number of bits used in the Huffman tree. */ -size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits) +static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits) { - huffNodeTable nodeTable; - return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable)); + HUF_CElt* const ct = CTable + 1; + /* fill result into ctable (val, nbBits) */ + int n; + U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; + U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; + int const alphabetSize = (int)(maxSymbolValue + 1); + for (n=0; n<=nonNullRank; n++) + nbPerRank[huffNode[n].nbBits]++; + /* determine starting value per rank */ + { U16 min = 0; + for (n=(int)maxNbBits; n>0; n--) { + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; + min >>= 1; + } } + for (n=0; nhuffNodeTbl; + nodeElt* const huffNode = huffNode0+1; + int nonNullRank; + + HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables)); + + DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)", maxSymbolValue+1); + + /* safety checks */ + if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) + return ERROR(workSpace_tooSmall); + if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) + return ERROR(maxSymbolValue_tooLarge); + ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable)); + + /* sort, decreasing order */ + HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); + DEBUGLOG(6, "sorted symbols completed (%zu symbols)", showHNodeSymbols(huffNode, maxSymbolValue+1)); + + /* build tree */ + nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); + + /* determine and enforce maxTableLog */ + maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); + if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ + + HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); + + return maxNbBits; +} + +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) +{ + HUF_CElt const* ct = CTable + 1; size_t nbBits = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { - nbBits += CTable[s].nbBits * count[s]; + nbBits += HUF_getNbBits(ct[s]) * count[s]; } return nbBits >> 3; } -static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { - int bad = 0; - int s; - for (s = 0; s <= (int)maxSymbolValue; ++s) { - bad |= (count[s] != 0) & (CTable[s].nbBits == 0); - } - return !bad; +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { + HUF_CTableHeader header = HUF_readCTableHeader(CTable); + HUF_CElt const* ct = CTable + 1; + int bad = 0; + int s; + + assert(header.tableLog <= HUF_TABLELOG_ABSOLUTEMAX); + + if (header.maxSymbolValue < maxSymbolValue) + return 0; + + for (s = 0; s <= (int)maxSymbolValue; ++s) { + bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0); + } + return !bad; } size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } +/** HUF_CStream_t: + * Huffman uses its own BIT_CStream_t implementation. + * There are three major differences from BIT_CStream_t: + * 1. HUF_addBits() takes a HUF_CElt (size_t) which is + * the pair (nbBits, value) in the format: + * format: + * - Bits [0, 4) = nbBits + * - Bits [4, 64 - nbBits) = 0 + * - Bits [64 - nbBits, 64) = value + * 2. The bitContainer is built from the upper bits and + * right shifted. E.g. to add a new value of N bits + * you right shift the bitContainer by N, then or in + * the new value into the N upper bits. + * 3. The bitstream has two bit containers. You can add + * bits to the second container and merge them into + * the first container. + */ + +#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8) + +typedef struct { + size_t bitContainer[2]; + size_t bitPos[2]; + + BYTE* startPtr; + BYTE* ptr; + BYTE* endPtr; +} HUF_CStream_t; + +/**! HUF_initCStream(): + * Initializes the bitstream. + * @returns 0 or an error code. + */ +static size_t HUF_initCStream(HUF_CStream_t* bitC, + void* startPtr, size_t dstCapacity) +{ + ZSTD_memset(bitC, 0, sizeof(*bitC)); + bitC->startPtr = (BYTE*)startPtr; + bitC->ptr = bitC->startPtr; + bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]); + if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall); + return 0; +} + +/*! HUF_addBits(): + * Adds the symbol stored in HUF_CElt elt to the bitstream. + * + * @param elt The element we're adding. This is a (nbBits, value) pair. + * See the HUF_CStream_t docs for the format. + * @param idx Insert into the bitstream at this idx. + * @param kFast This is a template parameter. If the bitstream is guaranteed + * to have at least 4 unused bits after this call it may be 1, + * otherwise it must be 0. HUF_addBits() is faster when fast is set. + */ +FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast) +{ + assert(idx <= 1); + assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX); + /* This is efficient on x86-64 with BMI2 because shrx + * only reads the low 6 bits of the register. The compiler + * knows this and elides the mask. When fast is set, + * every operation can use the same value loaded from elt. + */ + bitC->bitContainer[idx] >>= HUF_getNbBits(elt); + bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt); + /* We only read the low 8 bits of bitC->bitPos[idx] so it + * doesn't matter that the high bits have noise from the value. + */ + bitC->bitPos[idx] += HUF_getNbBitsFast(elt); + assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); + /* The last 4-bits of elt are dirty if fast is set, + * so we must not be overwriting bits that have already been + * inserted into the bit container. + */ +#if DEBUGLEVEL >= 1 + { + size_t const nbBits = HUF_getNbBits(elt); + size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1; + (void)dirtyBits; + /* Middle bits are 0. */ + assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0); + /* We didn't overwrite any bits in the bit container. */ + assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); + (void)dirtyBits; + } +#endif +} + +FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC) +{ + bitC->bitContainer[1] = 0; + bitC->bitPos[1] = 0; +} + +/*! HUF_mergeIndex1() : + * Merges the bit container @ index 1 into the bit container @ index 0 + * and zeros the bit container @ index 1. + */ +FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC) +{ + assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER); + bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF); + bitC->bitContainer[0] |= bitC->bitContainer[1]; + bitC->bitPos[0] += bitC->bitPos[1]; + assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER); +} + +/*! HUF_flushBits() : +* Flushes the bits in the bit container @ index 0. +* +* @post bitPos will be < 8. +* @param kFast If kFast is set then we must know a-priori that +* the bit container will not overflow. +*/ +FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast) +{ + /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ + size_t const nbBits = bitC->bitPos[0] & 0xFF; + size_t const nbBytes = nbBits >> 3; + /* The top nbBits bits of bitContainer are the ones we need. */ + size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits); + /* Mask bitPos to account for the bytes we consumed. */ + bitC->bitPos[0] &= 7; + assert(nbBits > 0); + assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8); + assert(bitC->ptr <= bitC->endPtr); + MEM_writeLEST(bitC->ptr, bitContainer); + bitC->ptr += nbBytes; + assert(!kFast || bitC->ptr <= bitC->endPtr); + if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; + /* bitContainer doesn't need to be modified because the leftover + * bits are already the top bitPos bits. And we don't care about + * noise in the lower values. + */ +} + +/*! HUF_endMark() + * @returns The Huffman stream end mark: A 1-bit value = 1. + */ +static HUF_CElt HUF_endMark(void) +{ + HUF_CElt endMark; + HUF_setNbBits(&endMark, 1); + HUF_setValue(&endMark, 1); + return endMark; +} + +/*! HUF_closeCStream() : + * @return Size of CStream, in bytes, + * or 0 if it could not fit into dstBuffer */ +static size_t HUF_closeCStream(HUF_CStream_t* bitC) +{ + HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0); + HUF_flushBits(bitC, /* kFast */ 0); + { + size_t const nbBits = bitC->bitPos[0] & 0xFF; + if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ + return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0); + } +} + FORCE_INLINE_TEMPLATE void -HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) +HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast) { - BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); + HUF_addBits(bitCPtr, CTable[symbol], idx, fast); } -#define HUF_FLUSHBITS(s) BIT_flushBits(s) +FORCE_INLINE_TEMPLATE void +HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC, + const BYTE* ip, size_t srcSize, + const HUF_CElt* ct, + int kUnroll, int kFastFlush, int kLastFast) +{ + /* Join to kUnroll */ + int n = (int)srcSize; + int rem = n % kUnroll; + if (rem > 0) { + for (; rem > 0; --rem) { + HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0); + } + HUF_flushBits(bitC, kFastFlush); + } + assert(n % kUnroll == 0); + + /* Join to 2 * kUnroll */ + if (n % (2 * kUnroll)) { + int u; + for (u = 1; u < kUnroll; ++u) { + HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1); + } + HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast); + HUF_flushBits(bitC, kFastFlush); + n -= kUnroll; + } + assert(n % (2 * kUnroll) == 0); + + for (; n>0; n-= 2 * kUnroll) { + /* Encode kUnroll symbols into the bitstream @ index 0. */ + int u; + for (u = 1; u < kUnroll; ++u) { + HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1); + } + HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast); + HUF_flushBits(bitC, kFastFlush); + /* Encode kUnroll symbols into the bitstream @ index 1. + * This allows us to start filling the bit container + * without any data dependencies. + */ + HUF_zeroIndex1(bitC); + for (u = 1; u < kUnroll; ++u) { + HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1); + } + HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast); + /* Merge bitstream @ index 1 into the bitstream @ index 0 */ + HUF_mergeIndex1(bitC); + HUF_flushBits(bitC, kFastFlush); + } + assert(n == 0); + +} -#define HUF_FLUSHBITS_1(stream) \ - if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) +/** + * Returns a tight upper bound on the output space needed by Huffman + * with 8 bytes buffer to handle over-writes. If the output is at least + * this large we don't need to do bounds checks during Huffman encoding. + */ +static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog) +{ + return ((srcSize * tableLog) >> 3) + 8; +} -#define HUF_FLUSHBITS_2(stream) \ - if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) FORCE_INLINE_TEMPLATE size_t HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { + U32 const tableLog = HUF_readCTableHeader(CTable).tableLog; + HUF_CElt const* ct = CTable + 1; const BYTE* ip = (const BYTE*) src; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; - BYTE* op = ostart; - size_t n; - BIT_CStream_t bitC; + HUF_CStream_t bitC; /* init */ if (dstSize < 8) return 0; /* not enough space to compress */ - { size_t const initErr = BIT_initCStream(&bitC, op, oend-op); + { BYTE* op = ostart; + size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op)); if (HUF_isError(initErr)) return 0; } - n = srcSize & ~3; /* join to mod 4 */ - switch (srcSize & 3) - { - case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); - HUF_FLUSHBITS_2(&bitC); - /* fall-through */ - case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); - HUF_FLUSHBITS_1(&bitC); - /* fall-through */ - case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); - HUF_FLUSHBITS(&bitC); - /* fall-through */ - case 0 : /* fall-through */ - default: break; - } - - for (; n>0; n-=4) { /* note : n&3==0 at this stage */ - HUF_encodeSymbol(&bitC, ip[n- 1], CTable); - HUF_FLUSHBITS_1(&bitC); - HUF_encodeSymbol(&bitC, ip[n- 2], CTable); - HUF_FLUSHBITS_2(&bitC); - HUF_encodeSymbol(&bitC, ip[n- 3], CTable); - HUF_FLUSHBITS_1(&bitC); - HUF_encodeSymbol(&bitC, ip[n- 4], CTable); - HUF_FLUSHBITS(&bitC); + if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11) + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0); + else { + if (MEM_32bits()) { + switch (tableLog) { + case 11: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 10: ZSTD_FALLTHROUGH; + case 9: ZSTD_FALLTHROUGH; + case 8: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + case 7: ZSTD_FALLTHROUGH; + default: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + } + } else { + switch (tableLog) { + case 11: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 10: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + case 9: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 8: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 7: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 6: ZSTD_FALLTHROUGH; + default: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + } + } } + assert(bitC.ptr <= bitC.endPtr); - return BIT_closeCStream(&bitC); + return HUF_closeCStream(&bitC); } #if DYNAMIC_BMI2 -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) @@ -530,9 +1139,9 @@ HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, const int bmi2) + const HUF_CElt* CTable, const int flags) { - if (bmi2) { + if (flags & HUF_flags_bmi2) { return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); } return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); @@ -543,24 +1152,23 @@ HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, const int bmi2) + const HUF_CElt* CTable, const int flags) { - (void)bmi2; + (void)flags; return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } #endif -size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) { - return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); + return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); } - static size_t HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, int bmi2) + const HUF_CElt* CTable, int flags) { size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ const BYTE* ip = (const BYTE*) src; @@ -573,41 +1181,43 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, if (srcSize < 12) return 0; /* no saving possible : too small input */ op += 6; /* jumpTable */ - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) ); - if (cSize==0) return 0; - assert(cSize <= 65535); + assert(op <= oend); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); + if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart, (U16)cSize); op += cSize; } ip += segmentSize; - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) ); - if (cSize==0) return 0; - assert(cSize <= 65535); + assert(op <= oend); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); + if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; } ip += segmentSize; - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) ); - if (cSize==0) return 0; - assert(cSize <= 65535); + assert(op <= oend); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); + if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; } ip += segmentSize; - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, iend-ip, CTable, bmi2) ); - if (cSize==0) return 0; + assert(op <= oend); + assert(ip <= iend); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) ); + if (cSize == 0 || cSize > 65535) return 0; op += cSize; } - return op-ostart; + return (size_t)(op-ostart); } -size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) { - return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); + return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); } typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; @@ -615,44 +1225,129 @@ typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; static size_t HUF_compressCTable_internal( BYTE* const ostart, BYTE* op, BYTE* const oend, const void* src, size_t srcSize, - HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2) + HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags) { size_t const cSize = (nbStreams==HUF_singleStream) ? - HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) : - HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2); + HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) : + HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags); if (HUF_isError(cSize)) { return cSize; } if (cSize==0) { return 0; } /* uncompressible */ op += cSize; /* check compressibility */ + assert(op >= ostart); if ((size_t)(op-ostart) >= srcSize-1) { return 0; } - return op-ostart; + return (size_t)(op-ostart); } typedef struct { unsigned count[HUF_SYMBOLVALUE_MAX + 1]; - HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1]; - huffNodeTable nodeTable; + HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)]; + union { + HUF_buildCTable_wksp_tables buildCTable_wksp; + HUF_WriteCTableWksp writeCTable_wksp; + U32 hist_wksp[HIST_WKSP_SIZE_U32]; + } wksps; } HUF_compress_tables_t; +#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 +#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ + +unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue) +{ + unsigned cardinality = 0; + unsigned i; + + for (i = 0; i < maxSymbolValue + 1; i++) { + if (count[i] != 0) cardinality += 1; + } + + return cardinality; +} + +unsigned HUF_minTableLog(unsigned symbolCardinality) +{ + U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; + return minBitsSymbols; +} + +unsigned HUF_optimalTableLog( + unsigned maxTableLog, + size_t srcSize, + unsigned maxSymbolValue, + void* workSpace, size_t wkspSize, + HUF_CElt* table, + const unsigned* count, + int flags) +{ + assert(srcSize > 1); /* Not supported, RLE should be used instead */ + assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables)); + + if (!(flags & HUF_flags_optimalDepth)) { + /* cheap evaluation, based on FSE */ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); + } + + { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp); + size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp); + size_t hSize, newSize; + const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue); + const unsigned minTableLog = HUF_minTableLog(symbolCardinality); + size_t optSize = ((size_t) ~0) - 1; + unsigned optLog = maxTableLog, optLogGuess; + + DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize); + + /* Search until size increases */ + for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { + DEBUGLOG(7, "checking for huffLog=%u", optLogGuess); + + { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize); + if (ERR_isError(maxBits)) continue; + + if (maxBits < optLogGuess && optLogGuess > minTableLog) break; + + hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize); + } + + if (ERR_isError(hSize)) continue; + + newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; + + if (newSize > optSize + 1) { + break; + } + + if (newSize < optSize) { + optSize = newSize; + optLog = optLogGuess; + } + } + assert(optLog <= HUF_TABLELOG_MAX); + return optLog; + } +} + /* HUF_compress_internal() : - * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ + * `workSpace_align4` must be aligned on 4-bytes boundaries, + * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ static size_t HUF_compress_internal (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, HUF_nbStreams_e nbStreams, void* workSpace, size_t wkspSize, - HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, - const int bmi2) + HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags) { - HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace; + HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t)); BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; + DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize); + HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE); + /* checks & inits */ - if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ - if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall); + if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall); if (!srcSize) return 0; /* Uncompressed */ if (!dstSize) return 0; /* cannot fit anything within dst budget */ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ @@ -662,17 +1357,34 @@ HUF_compress_internal (void* dst, size_t dstSize, if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; /* Heuristic : If old table is valid, use it for small inputs */ - if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { + if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); + } + + /* If uncompressible data is suspected, do a smaller sampling first */ + DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); + if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { + size_t largestTotal = 0; + DEBUGLOG(5, "input suspected incompressible : sampling to check"); + { unsigned maxSymbolValueBegin = maxSymbolValue; + CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); + largestTotal += largestBegin; + } + { unsigned maxSymbolValueEnd = maxSymbolValue; + CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); + largestTotal += largestEnd; + } + if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */ } /* Scan input and build symbol stats */ - { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) ); + { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) ); if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ } + DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1)); /* Check validity of previous table */ if ( repeat @@ -681,26 +1393,25 @@ HUF_compress_internal (void* dst, size_t dstSize, *repeat = HUF_repeat_none; } /* Heuristic : use existing table for small inputs */ - if (preferRepeat && repeat && *repeat != HUF_repeat_none) { + if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } /* Build Huffman Tree */ - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags); { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, maxSymbolValue, huffLog, - table->nodeTable, sizeof(table->nodeTable)); + &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); CHECK_F(maxBits); huffLog = (U32)maxBits; - /* Zero unused symbols in CTable, so we can check it for validity */ - memset(table->CTable + (maxSymbolValue + 1), 0, - sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt))); + DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1)); } /* Write table description header */ - { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) ); + { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog, + &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) ); /* Check if using previous huffman table is beneficial */ if (repeat && *repeat != HUF_repeat_none) { size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); @@ -708,7 +1419,7 @@ HUF_compress_internal (void* dst, size_t dstSize, if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } } /* Use the new huffman table */ @@ -716,83 +1427,39 @@ HUF_compress_internal (void* dst, size_t dstSize, op += hSize; if (repeat) { *repeat = HUF_repeat_none; } if (oldHufTable) - memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ + ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ } return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, table->CTable, bmi2); -} - - -size_t HUF_compress1X_wksp (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - void* workSpace, size_t wkspSize) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_singleStream, - workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/); + nbStreams, table->CTable, flags); } size_t HUF_compress1X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) + HUF_CElt* hufTable, HUF_repeat* repeat, int flags) { + DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize); return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, hufTable, - repeat, preferRepeat, bmi2); -} - -size_t HUF_compress1X (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog) -{ - unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; - return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); -} - -/* HUF_compress4X_repeat(): - * compress input using 4 streams. - * provide workspace to generate compression tables */ -size_t HUF_compress4X_wksp (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - void* workSpace, size_t wkspSize) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_fourStreams, - workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/); + repeat, flags); } /* HUF_compress4X_repeat(): * compress input using 4 streams. - * re-use an existing huffman compression table */ + * consider skipping quickly + * reuse an existing huffman compression table */ size_t HUF_compress4X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) + HUF_CElt* hufTable, HUF_repeat* repeat, int flags) { + DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize); return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, - hufTable, repeat, preferRepeat, bmi2); -} - -size_t HUF_compress2 (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog) -{ - unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; - return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); -} - -size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) -{ - return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT); + hufTable, repeat, flags); } diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c index 77b8b9b70cf..1d6f0fcae0e 100644 --- a/lib/compress/zstd_compress.c +++ b/lib/compress/zstd_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,28 +11,74 @@ /*-************************************* * Dependencies ***************************************/ -#include /* INT_MAX */ -#include /* memset */ -#include "cpu.h" -#include "mem.h" +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ +#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */ +#include "../common/mem.h" +#include "../common/error_private.h" #include "hist.h" /* HIST_countFast_wksp */ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ -#include "fse.h" -#define HUF_STATIC_LINKING_ONLY -#include "huf.h" +#include "../common/fse.h" +#include "../common/huf.h" #include "zstd_compress_internal.h" +#include "zstd_compress_sequences.h" +#include "zstd_compress_literals.h" #include "zstd_fast.h" #include "zstd_double_fast.h" #include "zstd_lazy.h" #include "zstd_opt.h" #include "zstd_ldm.h" +#include "zstd_compress_superblock.h" +#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_rotateRight_U64 */ + +/* *************************************************************** +* Tuning parameters +*****************************************************************/ +/*! + * COMPRESS_HEAPMODE : + * Select how default decompression function ZSTD_compress() allocates its context, + * on stack (0, default), or into heap (1). + * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected. + */ +#ifndef ZSTD_COMPRESS_HEAPMODE +# define ZSTD_COMPRESS_HEAPMODE 0 +#endif + +/*! + * ZSTD_HASHLOG3_MAX : + * Maximum size of the hash table dedicated to find 3-bytes matches, + * in log format, aka 17 => 1 << 17 == 128Ki positions. + * This structure is only used in zstd_opt. + * Since allocation is centralized for all strategies, it has to be known here. + * The actual (selected) size of the hash table is then stored in ZSTD_MatchState_t.hashLog3, + * so that zstd_opt.c doesn't need to know about this constant. + */ +#ifndef ZSTD_HASHLOG3_MAX +# define ZSTD_HASHLOG3_MAX 17 +#endif + + +/*-************************************* +* Forward declarations +***************************************/ +size_t convertSequences_noRepcodes(SeqDef* dstSeqs, const ZSTD_Sequence* inSeqs, + size_t nbSequences); /*-************************************* * Helper functions ***************************************/ +/* ZSTD_compressBound() + * Note that the result from this function is only valid for + * the one-pass compression functions. + * When employing the streaming mode, + * if flushes are frequently altering the size of blocks, + * the overhead from block headers can make the compressed data larger + * than the return value of ZSTD_compressBound(). + */ size_t ZSTD_compressBound(size_t srcSize) { - return ZSTD_COMPRESSBOUND(srcSize); + size_t const r = ZSTD_COMPRESSBOUND(srcSize); + if (r==0) return ERROR(srcSize_wrong); + return r; } @@ -40,15 +86,20 @@ size_t ZSTD_compressBound(size_t srcSize) { * Context memory management ***************************************/ struct ZSTD_CDict_s { - void* dictBuffer; const void* dictContent; size_t dictContentSize; - void* workspace; - size_t workspaceSize; - ZSTD_matchState_t matchState; + ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */ + U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */ + ZSTD_cwksp workspace; + ZSTD_MatchState_t matchState; ZSTD_compressedBlockState_t cBlockState; ZSTD_customMem customMem; U32 dictID; + int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ + ZSTD_ParamSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use + * row-based matchfinder. Unless the cdict is reloaded, we will use + * the same greedy/lazy matchfinder at compression time. + */ }; /* typedef'd to ZSTD_CDict within "zstd.h" */ ZSTD_CCtx* ZSTD_createCCtx(void) @@ -59,9 +110,9 @@ ZSTD_CCtx* ZSTD_createCCtx(void) static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager) { assert(cctx != NULL); - memset(cctx, 0, sizeof(*cctx)); + ZSTD_memset(cctx, 0, sizeof(*cctx)); cctx->customMem = memManager; - cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); + cctx->bmi2 = ZSTD_cpuSupportsBmi2(); { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters); assert(!ZSTD_isError(err)); (void)err; @@ -72,33 +123,35 @@ ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) { ZSTD_STATIC_ASSERT(zcss_init==0); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); - if (!customMem.customAlloc ^ !customMem.customFree) return NULL; - { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem); + if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; + { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem); if (!cctx) return NULL; ZSTD_initCCtx(cctx, customMem); return cctx; } } -ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize) +ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) { - ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace; + ZSTD_cwksp ws; + ZSTD_CCtx* cctx; if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ - memset(workspace, 0, workspaceSize); /* may be a bit generous, could memset be smaller ? */ + ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); + + cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx)); + if (cctx == NULL) return NULL; + + ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx)); + ZSTD_cwksp_move(&cctx->workspace, &ws); cctx->staticSize = workspaceSize; - cctx->workSpace = (void*)(cctx+1); - cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx); - - /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ - if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL; - assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ - cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace; - cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1; - { - void* const ptr = cctx->blockState.nextCBlock + 1; - cctx->entropyWorkspace = (U32*)ptr; - } + + /* statically sized space. tmpWorkspace never moves (but prev/next block swap places) */ + if (!ZSTD_cwksp_check_available(&cctx->workspace, TMP_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; + cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); + cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); + cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, TMP_WORKSPACE_SIZE); + cctx->tmpWkspSize = TMP_WORKSPACE_SIZE; cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); return cctx; } @@ -108,10 +161,10 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize) */ static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx) { - ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem); + ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem); ZSTD_freeCDict(cctx->localDict.cdict); - memset(&cctx->localDict, 0, sizeof(cctx->localDict)); - memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); + ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict)); + ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); cctx->cdict = NULL; } @@ -126,20 +179,23 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) { assert(cctx != NULL); assert(cctx->staticSize == 0); - ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL; ZSTD_clearAllDicts(cctx); #ifdef ZSTD_MULTITHREAD ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL; #endif + ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); } size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) { + DEBUGLOG(3, "ZSTD_freeCCtx (address: %p)", (void*)cctx); if (cctx==NULL) return 0; /* support free on NULL */ RETURN_ERROR_IF(cctx->staticSize, memory_allocation, "not compatible with static CCtx"); - ZSTD_freeCCtxContent(cctx); - ZSTD_free(cctx, cctx->customMem); + { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); + ZSTD_freeCCtxContent(cctx); + if (!cctxInWorkspace) ZSTD_customFree(cctx, cctx->customMem); + } return 0; } @@ -158,7 +214,9 @@ static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx) size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support sizeof on NULL */ - return sizeof(*cctx) + cctx->workSpaceSize + /* cctx may be in the workspace */ + return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx)) + + ZSTD_cwksp_sizeof(&cctx->workspace) + ZSTD_sizeof_localDict(cctx->localDict) + ZSTD_sizeof_mtctx(cctx); } @@ -169,17 +227,117 @@ size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) } /* private API call, for dictBuilder only */ -const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } +const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } + +/* Returns true if the strategy supports using a row based matchfinder */ +static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { + return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2); +} + +/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder + * for this compression. + */ +static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_ParamSwitch_e mode) { + assert(mode != ZSTD_ps_auto); + return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable); +} + +/* Returns row matchfinder usage given an initial mode and cParams */ +static ZSTD_ParamSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_ParamSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { +#ifdef ZSTD_LINUX_KERNEL + /* The Linux Kernel does not use SIMD, and 128KB is a very common size, e.g. in BtrFS. + * The row match finder is slower for this size without SIMD, so disable it. + */ + const unsigned kWindowLogLowerBound = 17; +#else + const unsigned kWindowLogLowerBound = 14; +#endif + if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */ + mode = ZSTD_ps_disable; + if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode; + if (cParams->windowLog > kWindowLogLowerBound) mode = ZSTD_ps_enable; + return mode; +} + +/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ +static ZSTD_ParamSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_ParamSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { + if (mode != ZSTD_ps_auto) return mode; + return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable; +} + +/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ +static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, + const ZSTD_ParamSwitch_e useRowMatchFinder, + const U32 forDDSDict) { + assert(useRowMatchFinder != ZSTD_ps_auto); + /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate. + * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder. + */ + return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder)); +} + +/* Returns ZSTD_ps_enable if compression parameters are such that we should + * enable long distance matching (wlog >= 27, strategy >= btopt). + * Returns ZSTD_ps_disable otherwise. + */ +static ZSTD_ParamSwitch_e ZSTD_resolveEnableLdm(ZSTD_ParamSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { + if (mode != ZSTD_ps_auto) return mode; + return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable; +} + +static int ZSTD_resolveExternalSequenceValidation(int mode) { + return mode; +} + +/* Resolves maxBlockSize to the default if no value is present. */ +static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) { + if (maxBlockSize == 0) { + return ZSTD_BLOCKSIZE_MAX; + } else { + return maxBlockSize; + } +} + +static ZSTD_ParamSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_ParamSwitch_e value, int cLevel) { + if (value != ZSTD_ps_auto) return value; + if (cLevel < 10) { + return ZSTD_ps_disable; + } else { + return ZSTD_ps_enable; + } +} + +/* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged. + * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ +static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) { + return cParams->strategy == ZSTD_fast || cParams->strategy == ZSTD_dfast; +} static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( ZSTD_compressionParameters cParams) { ZSTD_CCtx_params cctxParams; - memset(&cctxParams, 0, sizeof(cctxParams)); + /* should not matter, as all cParams are presumed properly defined */ + ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT); cctxParams.cParams = cParams; - cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ + + /* Adjust advanced params according to cParams */ + cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams); + if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) { + ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); + assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); + assert(cctxParams.ldmParams.hashRateLog < 32); + } + cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.postBlockSplitter, &cParams); + cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); + cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences); + cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); + cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes, + cctxParams.compressionLevel); assert(!ZSTD_checkCParams(cParams)); - cctxParams.fParams.contentSizeFlag = 1; return cctxParams; } @@ -187,13 +345,12 @@ static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced( ZSTD_customMem customMem) { ZSTD_CCtx_params* params; - if (!customMem.customAlloc ^ !customMem.customFree) return NULL; - params = (ZSTD_CCtx_params*)ZSTD_calloc( + if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; + params = (ZSTD_CCtx_params*)ZSTD_customCalloc( sizeof(ZSTD_CCtx_params), customMem); if (!params) { return NULL; } + ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); params->customMem = customMem; - params->compressionLevel = ZSTD_CLEVEL_DEFAULT; - params->fParams.contentSizeFlag = 1; return params; } @@ -205,7 +362,7 @@ ZSTD_CCtx_params* ZSTD_createCCtxParams(void) size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params) { if (params == NULL) { return 0; } - ZSTD_free(params, params->customMem); + ZSTD_customFree(params, params->customMem); return 0; } @@ -215,36 +372,64 @@ size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params) } size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) { - RETURN_ERROR_IF(!cctxParams, GENERIC); - memset(cctxParams, 0, sizeof(*cctxParams)); + RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); + ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); cctxParams->compressionLevel = compressionLevel; cctxParams->fParams.contentSizeFlag = 1; return 0; } +#define ZSTD_NO_CLEVEL 0 + +/** + * Initializes `cctxParams` from `params` and `compressionLevel`. + * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. + */ +static void +ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, + const ZSTD_parameters* params, + int compressionLevel) +{ + assert(!ZSTD_checkCParams(params->cParams)); + ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); + cctxParams->cParams = params->cParams; + cctxParams->fParams = params->fParams; + /* Should not matter, as all cParams are presumed properly defined. + * But, set it for tracing anyway. + */ + cctxParams->compressionLevel = compressionLevel; + cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams); + cctxParams->postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->postBlockSplitter, ¶ms->cParams); + cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams); + cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences); + cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); + cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel); + DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", + cctxParams->useRowMatchFinder, cctxParams->postBlockSplitter, cctxParams->ldmParams.enableLdm); +} + size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) { - RETURN_ERROR_IF(!cctxParams, GENERIC); - FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) ); - memset(cctxParams, 0, sizeof(*cctxParams)); - cctxParams->cParams = params.cParams; - cctxParams->fParams = params.fParams; - cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ - assert(!ZSTD_checkCParams(params.cParams)); + RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); + FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); + ZSTD_CCtxParams_init_internal(cctxParams, ¶ms, ZSTD_NO_CLEVEL); return 0; } -/* ZSTD_assignParamsToCCtxParams() : - * params is presumed valid at this stage */ -static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams( - ZSTD_CCtx_params cctxParams, ZSTD_parameters params) -{ - ZSTD_CCtx_params ret = cctxParams; - ret.cParams = params.cParams; - ret.fParams = params.fParams; - ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ - assert(!ZSTD_checkCParams(params.cParams)); - return ret; +/** + * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. + * @param params Validated zstd parameters. + */ +static void ZSTD_CCtxParams_setZstdParams( + ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) +{ + assert(!ZSTD_checkCParams(params->cParams)); + cctxParams->cParams = params->cParams; + cctxParams->fParams = params->fParams; + /* Should not matter, as all cParams are presumed properly defined. + * But, set it for tracing anyway. + */ + cctxParams->compressionLevel = ZSTD_NO_CLEVEL; } ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) @@ -327,15 +512,25 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) return bounds; case ZSTD_c_overlapLog: +#ifdef ZSTD_MULTITHREAD bounds.lowerBound = ZSTD_OVERLAPLOG_MIN; bounds.upperBound = ZSTD_OVERLAPLOG_MAX; +#else + bounds.lowerBound = 0; + bounds.upperBound = 0; +#endif return bounds; - case ZSTD_c_enableLongDistanceMatching: + case ZSTD_c_enableDedicatedDictSearch: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; + case ZSTD_c_enableLongDistanceMatching: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + case ZSTD_c_ldmHashLog: bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN; bounds.upperBound = ZSTD_LDM_HASHLOG_MAX; @@ -374,36 +569,89 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) return bounds; case ZSTD_c_forceAttachDict: - ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy); + ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad); bounds.lowerBound = ZSTD_dictDefaultAttach; - bounds.upperBound = ZSTD_dictForceCopy; /* note : how to ensure at compile time that this is the highest value enum ? */ + bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */ return bounds; case ZSTD_c_literalCompressionMode: - ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed); - bounds.lowerBound = ZSTD_lcm_auto; - bounds.upperBound = ZSTD_lcm_uncompressed; + ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable); + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + + case ZSTD_c_targetCBlockSize: + bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN; + bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX; + return bounds; + + case ZSTD_c_srcSizeHint: + bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN; + bounds.upperBound = ZSTD_SRCSIZEHINT_MAX; + return bounds; + + case ZSTD_c_stableInBuffer: + case ZSTD_c_stableOutBuffer: + bounds.lowerBound = (int)ZSTD_bm_buffered; + bounds.upperBound = (int)ZSTD_bm_stable; + return bounds; + + case ZSTD_c_blockDelimiters: + bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters; + bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters; + return bounds; + + case ZSTD_c_validateSequences: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_splitAfterSequences: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + + case ZSTD_c_blockSplitterLevel: + bounds.lowerBound = 0; + bounds.upperBound = ZSTD_BLOCKSPLITTER_LEVEL_MAX; + return bounds; + + case ZSTD_c_useRowMatchFinder: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + + case ZSTD_c_deterministicRefPrefix: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_prefetchCDictTables: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + + case ZSTD_c_enableSeqProducerFallback: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_maxBlockSize: + bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; + bounds.upperBound = ZSTD_BLOCKSIZE_MAX; + return bounds; + + case ZSTD_c_repcodeResolution: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; return bounds; default: - { ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 }; - return boundError; - } + bounds.error = ERROR(parameter_unsupported); + return bounds; } } -/* ZSTD_cParam_withinBounds: - * @return 1 if value is within cParam bounds, - * 0 otherwise */ -static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) -{ - ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); - if (ZSTD_isError(bounds.error)) return 0; - if (value < bounds.lowerBound) return 0; - if (value > bounds.upperBound) return 0; - return 1; -} - /* ZSTD_cParam_clampBounds: * Clamps the value into the bounded range. */ @@ -416,10 +664,11 @@ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) return 0; } -#define BOUNDCHECK(cParam, val) { \ - RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ - parameter_outOfBound); \ -} +#define BOUNDCHECK(cParam, val) \ + do { \ + RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ + parameter_outOfBound, "Param out of bounds"); \ + } while (0) static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) @@ -433,6 +682,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) case ZSTD_c_minMatch: case ZSTD_c_targetLength: case ZSTD_c_strategy: + case ZSTD_c_blockSplitterLevel: return 1; case ZSTD_c_format: @@ -445,6 +695,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) case ZSTD_c_jobSize: case ZSTD_c_overlapLog: case ZSTD_c_rsyncable: + case ZSTD_c_enableDedicatedDictSearch: case ZSTD_c_enableLongDistanceMatching: case ZSTD_c_ldmHashLog: case ZSTD_c_ldmMinMatch: @@ -452,6 +703,19 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) case ZSTD_c_ldmHashRateLog: case ZSTD_c_forceAttachDict: case ZSTD_c_literalCompressionMode: + case ZSTD_c_targetCBlockSize: + case ZSTD_c_srcSizeHint: + case ZSTD_c_stableInBuffer: + case ZSTD_c_stableOutBuffer: + case ZSTD_c_blockDelimiters: + case ZSTD_c_validateSequences: + case ZSTD_c_splitAfterSequences: + case ZSTD_c_useRowMatchFinder: + case ZSTD_c_deterministicRefPrefix: + case ZSTD_c_prefetchCDictTables: + case ZSTD_c_enableSeqProducerFallback: + case ZSTD_c_maxBlockSize: + case ZSTD_c_repcodeResolution: default: return 0; } @@ -464,7 +728,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) if (ZSTD_isUpdateAuthorized(param)) { cctx->cParamsChanged = 1; } else { - RETURN_ERROR(stage_wrong); + RETURN_ERROR(stage_wrong, "can only set params in cctx init stage"); } } switch(param) @@ -493,13 +757,28 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) case ZSTD_c_jobSize: case ZSTD_c_overlapLog: case ZSTD_c_rsyncable: + case ZSTD_c_enableDedicatedDictSearch: case ZSTD_c_enableLongDistanceMatching: case ZSTD_c_ldmHashLog: case ZSTD_c_ldmMinMatch: case ZSTD_c_ldmBucketSizeLog: + case ZSTD_c_targetCBlockSize: + case ZSTD_c_srcSizeHint: + case ZSTD_c_stableInBuffer: + case ZSTD_c_stableOutBuffer: + case ZSTD_c_blockDelimiters: + case ZSTD_c_validateSequences: + case ZSTD_c_splitAfterSequences: + case ZSTD_c_blockSplitterLevel: + case ZSTD_c_useRowMatchFinder: + case ZSTD_c_deterministicRefPrefix: + case ZSTD_c_prefetchCDictTables: + case ZSTD_c_enableSeqProducerFallback: + case ZSTD_c_maxBlockSize: + case ZSTD_c_repcodeResolution: break; - default: RETURN_ERROR(parameter_unsupported); + default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); } @@ -516,47 +795,48 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, return (size_t)CCtxParams->format; case ZSTD_c_compressionLevel : { - FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value)); - if (value) { /* 0 : does not change current level */ + FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); + if (value == 0) + CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ + else CCtxParams->compressionLevel = value; - } - if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel; + if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel; return 0; /* return type (size_t) cannot represent negative values */ } case ZSTD_c_windowLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_windowLog, value); - CCtxParams->cParams.windowLog = value; + CCtxParams->cParams.windowLog = (U32)value; return CCtxParams->cParams.windowLog; case ZSTD_c_hashLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_hashLog, value); - CCtxParams->cParams.hashLog = value; + CCtxParams->cParams.hashLog = (U32)value; return CCtxParams->cParams.hashLog; case ZSTD_c_chainLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_chainLog, value); - CCtxParams->cParams.chainLog = value; + CCtxParams->cParams.chainLog = (U32)value; return CCtxParams->cParams.chainLog; case ZSTD_c_searchLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_searchLog, value); - CCtxParams->cParams.searchLog = value; - return value; + CCtxParams->cParams.searchLog = (U32)value; + return (size_t)value; case ZSTD_c_minMatch : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_minMatch, value); - CCtxParams->cParams.minMatch = value; + CCtxParams->cParams.minMatch = (U32)value; return CCtxParams->cParams.minMatch; case ZSTD_c_targetLength : BOUNDCHECK(ZSTD_c_targetLength, value); - CCtxParams->cParams.targetLength = value; + CCtxParams->cParams.targetLength = (U32)value; return CCtxParams->cParams.targetLength; case ZSTD_c_strategy : @@ -569,12 +849,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, /* Content size written in frame header _when known_ (default:1) */ DEBUGLOG(4, "set content size flag = %u", (value!=0)); CCtxParams->fParams.contentSizeFlag = value != 0; - return CCtxParams->fParams.contentSizeFlag; + return (size_t)CCtxParams->fParams.contentSizeFlag; case ZSTD_c_checksumFlag : /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ CCtxParams->fParams.checksumFlag = value != 0; - return CCtxParams->fParams.checksumFlag; + return (size_t)CCtxParams->fParams.checksumFlag; case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0)); @@ -583,18 +863,18 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, case ZSTD_c_forceMaxWindow : CCtxParams->forceWindow = (value != 0); - return CCtxParams->forceWindow; + return (size_t)CCtxParams->forceWindow; case ZSTD_c_forceAttachDict : { const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; - BOUNDCHECK(ZSTD_c_forceAttachDict, pref); + BOUNDCHECK(ZSTD_c_forceAttachDict, (int)pref); CCtxParams->attachDictPref = pref; return CCtxParams->attachDictPref; } case ZSTD_c_literalCompressionMode : { - const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value; - BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); + const ZSTD_ParamSwitch_e lcm = (ZSTD_ParamSwitch_e)value; + BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm); CCtxParams->literalCompressionMode = lcm; return CCtxParams->literalCompressionMode; } @@ -604,9 +884,9 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else - FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value)); + FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); CCtxParams->nbWorkers = value; - return CCtxParams->nbWorkers; + return (size_t)(CCtxParams->nbWorkers); #endif case ZSTD_c_jobSize : @@ -617,9 +897,9 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, /* Adjust to the minimum non-default value. */ if (value != 0 && value < ZSTDMT_JOBSIZE_MIN) value = ZSTDMT_JOBSIZE_MIN; - FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value)); + FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); assert(value >= 0); - CCtxParams->jobSize = value; + CCtxParams->jobSize = (size_t)value; return CCtxParams->jobSize; #endif @@ -628,9 +908,9 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else - FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value)); + FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); CCtxParams->overlapLog = value; - return CCtxParams->overlapLog; + return (size_t)CCtxParams->overlapLog; #endif case ZSTD_c_rsyncable : @@ -638,79 +918,160 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else - FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value)); + FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); CCtxParams->rsyncable = value; - return CCtxParams->rsyncable; + return (size_t)CCtxParams->rsyncable; #endif + case ZSTD_c_enableDedicatedDictSearch : + CCtxParams->enableDedicatedDictSearch = (value!=0); + return (size_t)CCtxParams->enableDedicatedDictSearch; + case ZSTD_c_enableLongDistanceMatching : - CCtxParams->ldmParams.enableLdm = (value!=0); + BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value); + CCtxParams->ldmParams.enableLdm = (ZSTD_ParamSwitch_e)value; return CCtxParams->ldmParams.enableLdm; case ZSTD_c_ldmHashLog : if (value!=0) /* 0 ==> auto */ BOUNDCHECK(ZSTD_c_ldmHashLog, value); - CCtxParams->ldmParams.hashLog = value; + CCtxParams->ldmParams.hashLog = (U32)value; return CCtxParams->ldmParams.hashLog; case ZSTD_c_ldmMinMatch : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmMinMatch, value); - CCtxParams->ldmParams.minMatchLength = value; + CCtxParams->ldmParams.minMatchLength = (U32)value; return CCtxParams->ldmParams.minMatchLength; case ZSTD_c_ldmBucketSizeLog : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value); - CCtxParams->ldmParams.bucketSizeLog = value; + CCtxParams->ldmParams.bucketSizeLog = (U32)value; return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_c_ldmHashRateLog : - RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN, - parameter_outOfBound); - CCtxParams->ldmParams.hashRateLog = value; + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_ldmHashRateLog, value); + CCtxParams->ldmParams.hashRateLog = (U32)value; return CCtxParams->ldmParams.hashRateLog; + case ZSTD_c_targetCBlockSize : + if (value!=0) { /* 0 ==> default */ + value = MAX(value, ZSTD_TARGETCBLOCKSIZE_MIN); + BOUNDCHECK(ZSTD_c_targetCBlockSize, value); + } + CCtxParams->targetCBlockSize = (U32)value; + return CCtxParams->targetCBlockSize; + + case ZSTD_c_srcSizeHint : + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_srcSizeHint, value); + CCtxParams->srcSizeHint = value; + return (size_t)CCtxParams->srcSizeHint; + + case ZSTD_c_stableInBuffer: + BOUNDCHECK(ZSTD_c_stableInBuffer, value); + CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value; + return CCtxParams->inBufferMode; + + case ZSTD_c_stableOutBuffer: + BOUNDCHECK(ZSTD_c_stableOutBuffer, value); + CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; + return CCtxParams->outBufferMode; + + case ZSTD_c_blockDelimiters: + BOUNDCHECK(ZSTD_c_blockDelimiters, value); + CCtxParams->blockDelimiters = (ZSTD_SequenceFormat_e)value; + return CCtxParams->blockDelimiters; + + case ZSTD_c_validateSequences: + BOUNDCHECK(ZSTD_c_validateSequences, value); + CCtxParams->validateSequences = value; + return (size_t)CCtxParams->validateSequences; + + case ZSTD_c_splitAfterSequences: + BOUNDCHECK(ZSTD_c_splitAfterSequences, value); + CCtxParams->postBlockSplitter = (ZSTD_ParamSwitch_e)value; + return CCtxParams->postBlockSplitter; + + case ZSTD_c_blockSplitterLevel: + BOUNDCHECK(ZSTD_c_blockSplitterLevel, value); + CCtxParams->preBlockSplitter_level = value; + return (size_t)CCtxParams->preBlockSplitter_level; + + case ZSTD_c_useRowMatchFinder: + BOUNDCHECK(ZSTD_c_useRowMatchFinder, value); + CCtxParams->useRowMatchFinder = (ZSTD_ParamSwitch_e)value; + return CCtxParams->useRowMatchFinder; + + case ZSTD_c_deterministicRefPrefix: + BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value); + CCtxParams->deterministicRefPrefix = !!value; + return (size_t)CCtxParams->deterministicRefPrefix; + + case ZSTD_c_prefetchCDictTables: + BOUNDCHECK(ZSTD_c_prefetchCDictTables, value); + CCtxParams->prefetchCDictTables = (ZSTD_ParamSwitch_e)value; + return CCtxParams->prefetchCDictTables; + + case ZSTD_c_enableSeqProducerFallback: + BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value); + CCtxParams->enableMatchFinderFallback = value; + return (size_t)CCtxParams->enableMatchFinderFallback; + + case ZSTD_c_maxBlockSize: + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_maxBlockSize, value); + assert(value>=0); + CCtxParams->maxBlockSize = (size_t)value; + return CCtxParams->maxBlockSize; + + case ZSTD_c_repcodeResolution: + BOUNDCHECK(ZSTD_c_repcodeResolution, value); + CCtxParams->searchForExternalRepcodes = (ZSTD_ParamSwitch_e)value; + return CCtxParams->searchForExternalRepcodes; + default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } } -size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value) +size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value) { return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); } size_t ZSTD_CCtxParams_getParameter( - ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value) + ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value) { switch(param) { case ZSTD_c_format : - *value = CCtxParams->format; + *value = (int)CCtxParams->format; break; case ZSTD_c_compressionLevel : *value = CCtxParams->compressionLevel; break; case ZSTD_c_windowLog : - *value = CCtxParams->cParams.windowLog; + *value = (int)CCtxParams->cParams.windowLog; break; case ZSTD_c_hashLog : - *value = CCtxParams->cParams.hashLog; + *value = (int)CCtxParams->cParams.hashLog; break; case ZSTD_c_chainLog : - *value = CCtxParams->cParams.chainLog; + *value = (int)CCtxParams->cParams.chainLog; break; case ZSTD_c_searchLog : - *value = CCtxParams->cParams.searchLog; + *value = (int)CCtxParams->cParams.searchLog; break; case ZSTD_c_minMatch : - *value = CCtxParams->cParams.minMatch; + *value = (int)CCtxParams->cParams.minMatch; break; case ZSTD_c_targetLength : - *value = CCtxParams->cParams.targetLength; + *value = (int)CCtxParams->cParams.targetLength; break; case ZSTD_c_strategy : - *value = (unsigned)CCtxParams->cParams.strategy; + *value = (int)CCtxParams->cParams.strategy; break; case ZSTD_c_contentSizeFlag : *value = CCtxParams->fParams.contentSizeFlag; @@ -725,10 +1086,10 @@ size_t ZSTD_CCtxParams_getParameter( *value = CCtxParams->forceWindow; break; case ZSTD_c_forceAttachDict : - *value = CCtxParams->attachDictPref; + *value = (int)CCtxParams->attachDictPref; break; case ZSTD_c_literalCompressionMode : - *value = CCtxParams->literalCompressionMode; + *value = (int)CCtxParams->literalCompressionMode; break; case ZSTD_c_nbWorkers : #ifndef ZSTD_MULTITHREAD @@ -758,20 +1119,65 @@ size_t ZSTD_CCtxParams_getParameter( *value = CCtxParams->rsyncable; break; #endif + case ZSTD_c_enableDedicatedDictSearch : + *value = CCtxParams->enableDedicatedDictSearch; + break; case ZSTD_c_enableLongDistanceMatching : - *value = CCtxParams->ldmParams.enableLdm; + *value = (int)CCtxParams->ldmParams.enableLdm; break; case ZSTD_c_ldmHashLog : - *value = CCtxParams->ldmParams.hashLog; + *value = (int)CCtxParams->ldmParams.hashLog; break; case ZSTD_c_ldmMinMatch : - *value = CCtxParams->ldmParams.minMatchLength; + *value = (int)CCtxParams->ldmParams.minMatchLength; break; case ZSTD_c_ldmBucketSizeLog : - *value = CCtxParams->ldmParams.bucketSizeLog; + *value = (int)CCtxParams->ldmParams.bucketSizeLog; break; case ZSTD_c_ldmHashRateLog : - *value = CCtxParams->ldmParams.hashRateLog; + *value = (int)CCtxParams->ldmParams.hashRateLog; + break; + case ZSTD_c_targetCBlockSize : + *value = (int)CCtxParams->targetCBlockSize; + break; + case ZSTD_c_srcSizeHint : + *value = (int)CCtxParams->srcSizeHint; + break; + case ZSTD_c_stableInBuffer : + *value = (int)CCtxParams->inBufferMode; + break; + case ZSTD_c_stableOutBuffer : + *value = (int)CCtxParams->outBufferMode; + break; + case ZSTD_c_blockDelimiters : + *value = (int)CCtxParams->blockDelimiters; + break; + case ZSTD_c_validateSequences : + *value = (int)CCtxParams->validateSequences; + break; + case ZSTD_c_splitAfterSequences : + *value = (int)CCtxParams->postBlockSplitter; + break; + case ZSTD_c_blockSplitterLevel : + *value = CCtxParams->preBlockSplitter_level; + break; + case ZSTD_c_useRowMatchFinder : + *value = (int)CCtxParams->useRowMatchFinder; + break; + case ZSTD_c_deterministicRefPrefix: + *value = (int)CCtxParams->deterministicRefPrefix; + break; + case ZSTD_c_prefetchCDictTables: + *value = (int)CCtxParams->prefetchCDictTables; + break; + case ZSTD_c_enableSeqProducerFallback: + *value = CCtxParams->enableMatchFinderFallback; + break; + case ZSTD_c_maxBlockSize: + *value = (int)CCtxParams->maxBlockSize; + break; + case ZSTD_c_repcodeResolution: + *value = (int)CCtxParams->searchForExternalRepcodes; break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } @@ -789,31 +1195,79 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams( ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params) { DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams"); - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong); - RETURN_ERROR_IF(cctx->cdict, stage_wrong); + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "The context is in the wrong stage!"); + RETURN_ERROR_IF(cctx->cdict, stage_wrong, + "Can't override parameters with cdict attached (some must " + "be inherited from the cdict)."); cctx->requestedParams = *params; return 0; } -ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) +size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams) +{ + ZSTD_STATIC_ASSERT(sizeof(cparams) == 7 * 4 /* all params are listed below */); + DEBUGLOG(4, "ZSTD_CCtx_setCParams"); + /* only update if all parameters are valid */ + FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, (int)cparams.windowLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, (int)cparams.chainLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, (int)cparams.hashLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, (int)cparams.searchLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, (int)cparams.minMatch), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, (int)cparams.targetLength), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, (int)cparams.strategy), ""); + return 0; +} + +size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams) +{ + ZSTD_STATIC_ASSERT(sizeof(fparams) == 3 * 4 /* all params are listed below */); + DEBUGLOG(4, "ZSTD_CCtx_setFParams"); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, fparams.checksumFlag != 0), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0), ""); + return 0; +} + +size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params) +{ + DEBUGLOG(4, "ZSTD_CCtx_setParams"); + /* First check cParams, because we want to update all or none. */ + FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); + /* Next set fParams, because this could fail if the cctx isn't in init stage. */ + FORWARD_IF_ERROR(ZSTD_CCtx_setFParams(cctx, params.fParams), ""); + /* Finally set cParams, which should succeed. */ + FORWARD_IF_ERROR(ZSTD_CCtx_setCParams(cctx, params.cParams), ""); + return 0; +} + +size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) { - DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong); + DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %llu bytes", pledgedSrcSize); + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't set pledgedSrcSize when not in init stage."); cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; return 0; } +static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams( + int const compressionLevel, + size_t const dictSize); +static int ZSTD_dedicatedDictSearch_isSupported( + const ZSTD_compressionParameters* cParams); +static void ZSTD_dedicatedDictSearch_revertCParams( + ZSTD_compressionParameters* cParams); + /** - * Initializes the local dict using the requested parameters. - * NOTE: This does not use the pledged src size, because it may be used for more - * than one compression. + * Initializes the local dictionary using requested parameters. + * NOTE: Initialization does not employ the pledged src size, + * because the dictionary may be used for multiple compressions. */ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) { ZSTD_localDict* const dl = &cctx->localDict; - ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams( - &cctx->requestedParams, 0, dl->dictSize); if (dl->dict == NULL) { /* No local dictionary. */ assert(dl->dictBuffer == NULL); @@ -822,59 +1276,65 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) return 0; } if (dl->cdict != NULL) { - assert(cctx->cdict == dl->cdict); /* Local dictionary already initialized. */ + assert(cctx->cdict == dl->cdict); return 0; } assert(dl->dictSize > 0); assert(cctx->cdict == NULL); assert(cctx->prefixDict.dict == NULL); - dl->cdict = ZSTD_createCDict_advanced( + dl->cdict = ZSTD_createCDict_advanced2( dl->dict, dl->dictSize, ZSTD_dlm_byRef, dl->dictContentType, - cParams, + &cctx->requestedParams, cctx->customMem); - RETURN_ERROR_IF(!dl->cdict, memory_allocation); + RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed"); cctx->cdict = dl->cdict; return 0; } size_t ZSTD_CCtx_loadDictionary_advanced( - ZSTD_CCtx* cctx, const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType) { - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong); - RETURN_ERROR_IF(cctx->staticSize, memory_allocation, - "no malloc for static CCtx"); DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); - ZSTD_clearAllDicts(cctx); /* in case one already exists */ - if (dict == NULL || dictSize == 0) /* no dictionary mode */ + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't load a dictionary when cctx is not in init stage."); + ZSTD_clearAllDicts(cctx); /* erase any previously set dictionary */ + if (dict == NULL || dictSize == 0) /* no dictionary */ return 0; if (dictLoadMethod == ZSTD_dlm_byRef) { cctx->localDict.dict = dict; } else { - void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem); - RETURN_ERROR_IF(!dictBuffer, memory_allocation); - memcpy(dictBuffer, dict, dictSize); - cctx->localDict.dictBuffer = dictBuffer; - cctx->localDict.dict = dictBuffer; + /* copy dictionary content inside CCtx to own its lifetime */ + void* dictBuffer; + RETURN_ERROR_IF(cctx->staticSize, memory_allocation, + "static CCtx can't allocate for an internal copy of dictionary"); + dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); + RETURN_ERROR_IF(dictBuffer==NULL, memory_allocation, + "allocation failed for dictionary content"); + ZSTD_memcpy(dictBuffer, dict, dictSize); + cctx->localDict.dictBuffer = dictBuffer; /* owned ptr to free */ + cctx->localDict.dict = dictBuffer; /* read-only reference */ } cctx->localDict.dictSize = dictSize; cctx->localDict.dictContentType = dictContentType; return 0; } -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference( +size_t ZSTD_CCtx_loadDictionary_byReference( ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); } -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); @@ -883,13 +1343,22 @@ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, s size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong); + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't ref a dict when ctx not in init stage."); /* Free the existing local cdict (if any) to save memory. */ ZSTD_clearAllDicts(cctx); cctx->cdict = cdict; return 0; } +size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool) +{ + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't ref a pool when ctx not in init stage."); + cctx->pool = pool; + return 0; +} + size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize) { return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent); @@ -898,11 +1367,14 @@ size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSiz size_t ZSTD_CCtx_refPrefix_advanced( ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) { - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong); + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't ref a prefix when ctx not in init stage."); ZSTD_clearAllDicts(cctx); - cctx->prefixDict.dict = prefix; - cctx->prefixDict.dictSize = prefixSize; - cctx->prefixDict.dictContentType = dictContentType; + if (prefix != NULL && prefixSize > 0) { + cctx->prefixDict.dict = prefix; + cctx->prefixDict.dictSize = prefixSize; + cctx->prefixDict.dictContentType = dictContentType; + } return 0; } @@ -917,7 +1389,8 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) } if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong); + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Reset parameters is only possible during init stage."); ZSTD_clearAllDicts(cctx); return ZSTD_CCtxParams_reset(&cctx->requestedParams); } @@ -936,7 +1409,7 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog); BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch); BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); - BOUNDCHECK(ZSTD_c_strategy, cParams.strategy); + BOUNDCHECK(ZSTD_c_strategy, (int)cParams.strategy); return 0; } @@ -946,11 +1419,12 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) { -# define CLAMP_TYPE(cParam, val, type) { \ - ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \ - if ((int)valbounds.upperBound) val=(type)bounds.upperBound; \ - } +# define CLAMP_TYPE(cParam, val, type) \ + do { \ + ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \ + if ((int)valbounds.upperBound) val=(type)bounds.upperBound; \ + } while (0) # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) CLAMP(ZSTD_c_windowLog, cParams.windowLog); CLAMP(ZSTD_c_chainLog, cParams.chainLog); @@ -964,50 +1438,189 @@ ZSTD_clampCParams(ZSTD_compressionParameters cParams) /** ZSTD_cycleLog() : * condition for correct operation : hashLog > 1 */ -static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) { U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); return hashLog - btScale; } +/** ZSTD_dictAndWindowLog() : + * Returns an adjusted window log that is large enough to fit the source and the dictionary. + * The zstd format says that the entire dictionary is valid if one byte of the dictionary + * is within the window. So the hashLog and chainLog should be large enough to reference both + * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing + * the hashLog and windowLog. + * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN. + */ +static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize) +{ + const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX; + /* No dictionary ==> No change */ + if (dictSize == 0) { + return windowLog; + } + assert(windowLog <= ZSTD_WINDOWLOG_MAX); + assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */ + { + U64 const windowSize = 1ULL << windowLog; + U64 const dictAndWindowSize = dictSize + windowSize; + /* If the window size is already large enough to fit both the source and the dictionary + * then just use the window size. Otherwise adjust so that it fits the dictionary and + * the window. + */ + if (windowSize >= dictSize + srcSize) { + return windowLog; /* Window size large enough already */ + } else if (dictAndWindowSize >= maxWindowSize) { + return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */ + } else { + return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1; + } + } +} + /** ZSTD_adjustCParams_internal() : * optimize `cPar` for a specified input (`srcSize` and `dictSize`). * mostly downsize to reduce memory consumption and initialization latency. * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. - * note : for the time being, `srcSize==0` means "unknown" too, for compatibility with older convention. + * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`. + * note : `srcSize==0` means 0! * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, - size_t dictSize) + size_t dictSize, + ZSTD_CParamMode_e mode, + ZSTD_ParamSwitch_e useRowMatchFinder) { - static const U64 minSrcSize = 513; /* (1<<9) + 1 */ - static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); + const U64 minSrcSize = 513; /* (1<<9) + 1 */ + const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); assert(ZSTD_checkCParams(cPar)==0); - if (dictSize && (srcSize+1<2) /* ZSTD_CONTENTSIZE_UNKNOWN and 0 mean "unknown" */ ) - srcSize = minSrcSize; /* presumed small when there is a dictionary */ - else if (srcSize == 0) - srcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* 0 == unknown : presumed large */ + /* Cascade the selected strategy down to the next-highest one built into + * this binary. */ +#ifdef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_btultra2) { + cPar.strategy = ZSTD_btultra; + } + if (cPar.strategy == ZSTD_btultra) { + cPar.strategy = ZSTD_btopt; + } +#endif +#ifdef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_btopt) { + cPar.strategy = ZSTD_btlazy2; + } +#endif +#ifdef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_btlazy2) { + cPar.strategy = ZSTD_lazy2; + } +#endif +#ifdef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_lazy2) { + cPar.strategy = ZSTD_lazy; + } +#endif +#ifdef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_lazy) { + cPar.strategy = ZSTD_greedy; + } +#endif +#ifdef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_greedy) { + cPar.strategy = ZSTD_dfast; + } +#endif +#ifdef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_dfast) { + cPar.strategy = ZSTD_fast; + cPar.targetLength = 0; + } +#endif + + switch (mode) { + case ZSTD_cpm_unknown: + case ZSTD_cpm_noAttachDict: + /* If we don't know the source size, don't make any + * assumptions about it. We will already have selected + * smaller parameters if a dictionary is in use. + */ + break; + case ZSTD_cpm_createCDict: + /* Assume a small source size when creating a dictionary + * with an unknown source size. + */ + if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN) + srcSize = minSrcSize; + break; + case ZSTD_cpm_attachDict: + /* Dictionary has its own dedicated parameters which have + * already been selected. We are selecting parameters + * for only the source. + */ + dictSize = 0; + break; + default: + assert(0); + break; + } /* resize windowLog if input is small enough, to use less memory */ - if ( (srcSize < maxWindowResize) - && (dictSize < maxWindowResize) ) { + if ( (srcSize <= maxWindowResize) + && (dictSize <= maxWindowResize) ) { U32 const tSize = (U32)(srcSize + dictSize); static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : ZSTD_highbit32(tSize-1) + 1; if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; } - if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1; - { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); - if (cycleLog > cPar.windowLog) - cPar.chainLog -= (cycleLog - cPar.windowLog); + if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) { + U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize); + U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); + if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1; + if (cycleLog > dictAndWindowLog) + cPar.chainLog -= (cycleLog - dictAndWindowLog); } if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ + /* We can't use more than 32 bits of hash in total, so that means that we require: + * (hashLog + 8) <= 32 && (chainLog + 8) <= 32 + */ + if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) { + U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS; + if (cPar.hashLog > maxShortCacheHashLog) { + cPar.hashLog = maxShortCacheHashLog; + } + if (cPar.chainLog > maxShortCacheHashLog) { + cPar.chainLog = maxShortCacheHashLog; + } + } + + + /* At this point, we aren't 100% sure if we are using the row match finder. + * Unless it is explicitly disabled, conservatively assume that it is enabled. + * In this case it will only be disabled for small sources, so shrinking the + * hash log a little bit shouldn't result in any ratio loss. + */ + if (useRowMatchFinder == ZSTD_ps_auto) + useRowMatchFinder = ZSTD_ps_enable; + + /* We can't hash more than 32-bits in total. So that means that we require: + * (hashLog - rowLog + 8) <= 32 + */ + if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) { + /* Switch to 32-entry rows if searchLog is 5 (or more) */ + U32 const rowLog = BOUNDED(4, cPar.searchLog, 6); + U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS; + U32 const maxHashLog = maxRowHashLog + rowLog; + assert(cPar.hashLog >= rowLog); + if (cPar.hashLog > maxHashLog) { + cPar.hashLog = maxHashLog; + } + } + return cPar; } @@ -1017,79 +1630,186 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar, size_t dictSize) { cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ - return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize); + if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; + return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto); +} + +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); + +static void ZSTD_overrideCParams( + ZSTD_compressionParameters* cParams, + const ZSTD_compressionParameters* overrides) +{ + if (overrides->windowLog) cParams->windowLog = overrides->windowLog; + if (overrides->hashLog) cParams->hashLog = overrides->hashLog; + if (overrides->chainLog) cParams->chainLog = overrides->chainLog; + if (overrides->searchLog) cParams->searchLog = overrides->searchLog; + if (overrides->minMatch) cParams->minMatch = overrides->minMatch; + if (overrides->targetLength) cParams->targetLength = overrides->targetLength; + if (overrides->strategy) cParams->strategy = overrides->strategy; } ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( - const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize) -{ - ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize); - if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; - if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog; - if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog; - if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog; - if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog; - if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch; - if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength; - if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy; + const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) +{ + ZSTD_compressionParameters cParams; + if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) { + assert(CCtxParams->srcSizeHint>=0); + srcSizeHint = (U64)CCtxParams->srcSizeHint; + } + cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); + if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; + ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); assert(!ZSTD_checkCParams(cParams)); - return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize); + /* srcSizeHint == 0 means 0 */ + return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder); } static size_t ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, + const ZSTD_ParamSwitch_e useRowMatchFinder, + const int enableDedicatedDictSearch, const U32 forCCtx) { - size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); + /* chain table size should be 0 for fast or row-hash strategies */ + size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx) + ? ((size_t)1 << cParams->chainLog) + : 0; size_t const hSize = ((size_t)1) << cParams->hashLog; U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; - size_t const h3Size = ((size_t)1) << hashLog3; - size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); - size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<strategy, useRowMatchFinder) + ? ZSTD_cwksp_aligned64_alloc_size(hSize) + : 0; size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt)) ? optPotentialSpace : 0; + size_t const slackSpace = ZSTD_cwksp_slack_space_required(); + + /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */ + ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4); + assert(useRowMatchFinder != ZSTD_ps_auto); + DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", (U32)chainSize, (U32)hSize, (U32)h3Size); - return tableSpace + optSpace; + return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; +} + +/* Helper function for calculating memory requirements. + * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */ +static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) { + U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4; + return blockSize / divider; +} + +static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( + const ZSTD_compressionParameters* cParams, + const ldmParams_t* ldmParams, + const int isStatic, + const ZSTD_ParamSwitch_e useRowMatchFinder, + const size_t buffInSize, + const size_t buffOutSize, + const U64 pledgedSrcSize, + int useSequenceProducer, + size_t maxBlockSize) +{ + size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize); + size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize); + size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); + size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) + + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * sizeof(SeqDef)) + + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); + size_t const tmpWorkSpace = ZSTD_cwksp_alloc_size(TMP_WORKSPACE_SIZE); + size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); + size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1); + + size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); + size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); + size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ? + ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; + + + size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + + ZSTD_cwksp_alloc_size(buffOutSize); + + size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; + + size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + size_t const externalSeqSpace = useSequenceProducer + ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence)) + : 0; + + size_t const neededSpace = + cctxSpace + + tmpWorkSpace + + blockStateSpace + + ldmSpace + + ldmSeqSpace + + matchStateSize + + tokenSpace + + bufferSpace + + externalSeqSpace; + + DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); + return neededSpace; } size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) { - RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); - { ZSTD_compressionParameters const cParams = - ZSTD_getCParamsFromCCtxParams(params, 0, 0); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); - U32 const divider = (cParams.minMatch==3) ? 3 : 4; - size_t const maxNbSeq = blockSize / divider; - size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq; - size_t const entropySpace = HUF_WORKSPACE_SIZE; - size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t); - size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1); - - size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams); - size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq); + ZSTD_compressionParameters const cParams = + ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); + ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, + &cParams); - size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace + - matchStateSize + ldmSpace + ldmSeqSpace; - - DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx)); - DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace); - return sizeof(ZSTD_CCtx) + neededSpace; - } + RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); + /* estimateCCtxSize is for one-shot compression. So no buffers should + * be needed. However, we still allocate two 0-sized buffers, which can + * take space under ASAN. */ + return ZSTD_estimateCCtxSize_usingCCtxParams_internal( + &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize); } size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) { - ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); - return ZSTD_estimateCCtxSize_usingCCtxParams(¶ms); + ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); + if (ZSTD_rowMatchFinderSupported(cParams.strategy)) { + /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ + size_t noRowCCtxSize; + size_t rowCCtxSize; + initialParams.useRowMatchFinder = ZSTD_ps_disable; + noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); + initialParams.useRowMatchFinder = ZSTD_ps_enable; + rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); + return MAX(noRowCCtxSize, rowCCtxSize); + } else { + return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); + } } static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel) { - ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0); - return ZSTD_estimateCCtxSize_usingCParams(cParams); + int tier = 0; + size_t largestSize = 0; + static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN}; + for (; tier < 4; ++tier) { + /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */ + ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict); + largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize); + } + return largestSize; } size_t ZSTD_estimateCCtxSize(int compressionLevel) @@ -1097,6 +1817,7 @@ size_t ZSTD_estimateCCtxSize(int compressionLevel) int level; size_t memBudget = 0; for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { + /* Ensure monotonically increasing memory usage as compression level increases */ size_t const newMB = ZSTD_estimateCCtxSize_internal(level); if (newMB > memBudget) memBudget = newMB; } @@ -1107,26 +1828,42 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) { RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); { ZSTD_compressionParameters const cParams = - ZSTD_getCParamsFromCCtxParams(params, 0, 0); - size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); - size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize; - size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; - size_t const streamingSize = inBuffSize + outBuffSize; - - return CCtxSize + streamingSize; + ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); + size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(params->maxBlockSize), (size_t)1 << cParams.windowLog); + size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered) + ? ((size_t)1 << cParams.windowLog) + blockSize + : 0; + size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered) + ? ZSTD_compressBound(blockSize) + 1 + : 0; + ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); + + return ZSTD_estimateCCtxSize_usingCCtxParams_internal( + &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, + ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize); } } size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) { - ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); - return ZSTD_estimateCStreamSize_usingCCtxParams(¶ms); + ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); + if (ZSTD_rowMatchFinderSupported(cParams.strategy)) { + /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ + size_t noRowCCtxSize; + size_t rowCCtxSize; + initialParams.useRowMatchFinder = ZSTD_ps_disable; + noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); + initialParams.useRowMatchFinder = ZSTD_ps_enable; + rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); + return MAX(noRowCCtxSize, rowCCtxSize); + } else { + return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); + } } static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) { - ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0); + ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); return ZSTD_estimateCStreamSize_usingCParams(cParams); } @@ -1180,17 +1917,6 @@ size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx) return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */ } - - -static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1, - ZSTD_compressionParameters cParams2) -{ - return (cParams1.hashLog == cParams2.hashLog) - & (cParams1.chainLog == cParams2.chainLog) - & (cParams1.strategy == cParams2.strategy) /* opt parser space */ - & ((cParams1.minMatch==3) == (cParams2.minMatch==3)); /* hashlog3 space */ -} - static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, ZSTD_compressionParameters cParams2) { @@ -1205,72 +1931,7 @@ static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, assert(cParams1.strategy == cParams2.strategy); } -/** The parameters are equivalent if ldm is not enabled in both sets or - * all the parameters are equivalent. */ -static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1, - ldmParams_t ldmParams2) -{ - return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) || - (ldmParams1.enableLdm == ldmParams2.enableLdm && - ldmParams1.hashLog == ldmParams2.hashLog && - ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog && - ldmParams1.minMatchLength == ldmParams2.minMatchLength && - ldmParams1.hashRateLog == ldmParams2.hashRateLog); -} - -typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e; - -/* ZSTD_sufficientBuff() : - * check internal buffers exist for streaming if buffPol == ZSTDb_buffered . - * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */ -static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1, - size_t maxNbLit1, - ZSTD_buffered_policy_e buffPol2, - ZSTD_compressionParameters cParams2, - U64 pledgedSrcSize) -{ - size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize)); - size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2); - size_t const maxNbSeq2 = blockSize2 / ((cParams2.minMatch == 3) ? 3 : 4); - size_t const maxNbLit2 = blockSize2; - size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0; - DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u", - (U32)neededBufferSize2, (U32)bufferSize1); - DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u", - (U32)maxNbSeq2, (U32)maxNbSeq1); - DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u", - (U32)maxNbLit2, (U32)maxNbLit1); - return (maxNbLit2 <= maxNbLit1) - & (maxNbSeq2 <= maxNbSeq1) - & (neededBufferSize2 <= bufferSize1); -} - -/** Equivalence for resetCCtx purposes */ -static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1, - ZSTD_CCtx_params params2, - size_t buffSize1, - size_t maxNbSeq1, size_t maxNbLit1, - ZSTD_buffered_policy_e buffPol2, - U64 pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize); - if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) { - DEBUGLOG(4, "ZSTD_equivalentCParams() == 0"); - return 0; - } - if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) { - DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0"); - return 0; - } - if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2, - params2.cParams, pledgedSrcSize)) { - DEBUGLOG(4, "ZSTD_sufficientBuff() == 0"); - return 0; - } - return 1; -} - -static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) { int i; for (i = 0; i < ZSTD_REP_NUM; ++i) @@ -1285,7 +1946,7 @@ static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) * Invalidate all the matches in the match finder tables. * Requires nextSrc and base to be set (can be NULL). */ -static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) +static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms) { ZSTD_window_clear(&ms->window); @@ -1295,186 +1956,257 @@ static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) ms->dictMatchState = NULL; } -/*! ZSTD_continueCCtx() : - * reuse CCtx without reset (note : requires no dictionary) */ -static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize) -{ - size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize)); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); - DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place"); +/** + * Controls, for this matchState reset, whether the tables need to be cleared / + * prepared for the coming compression (ZSTDcrp_makeClean), or whether the + * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a + * subsequent operation will overwrite the table space anyways (e.g., copying + * the matchState contents in from a CDict). + */ +typedef enum { + ZSTDcrp_makeClean, + ZSTDcrp_leaveDirty +} ZSTD_compResetPolicy_e; - cctx->blockSize = blockSize; /* previous block size could be different even for same windowLog, due to pledgedSrcSize */ - cctx->appliedParams = params; - cctx->blockState.matchState.cParams = params.cParams; - cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; - cctx->consumedSrcSize = 0; - cctx->producedCSize = 0; - if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) - cctx->appliedParams.fParams.contentSizeFlag = 0; - DEBUGLOG(4, "pledged content size : %u ; flag : %u", - (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag); - cctx->stage = ZSTDcs_init; - cctx->dictID = 0; - if (params.ldmParams.enableLdm) - ZSTD_window_clear(&cctx->ldmState.window); - ZSTD_referenceExternalSequences(cctx, NULL, 0); - ZSTD_invalidateMatchState(&cctx->blockState.matchState); - ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock); - XXH64_reset(&cctx->xxhState, 0); - return 0; +/** + * Controls, for this matchState reset, whether indexing can continue where it + * left off (ZSTDirp_continue), or whether it needs to be restarted from zero + * (ZSTDirp_reset). + */ +typedef enum { + ZSTDirp_continue, + ZSTDirp_reset +} ZSTD_indexResetPolicy_e; + +typedef enum { + ZSTD_resetTarget_CDict, + ZSTD_resetTarget_CCtx +} ZSTD_resetTarget_e; + +/* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */ +static U64 ZSTD_bitmix(U64 val, U64 len) { + val ^= ZSTD_rotateRight_U64(val, 49) ^ ZSTD_rotateRight_U64(val, 24); + val *= 0x9FB21C651E98DF25ULL; + val ^= (val >> 35) + len ; + val *= 0x9FB21C651E98DF25ULL; + return val ^ (val >> 28); } -typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e; +/* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */ +static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) { + ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4); +} -static void* -ZSTD_reset_matchState(ZSTD_matchState_t* ms, - void* ptr, +static size_t +ZSTD_reset_matchState(ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, const ZSTD_compressionParameters* cParams, - ZSTD_compResetPolicy_e const crp, U32 const forCCtx) -{ - size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); + const ZSTD_ParamSwitch_e useRowMatchFinder, + const ZSTD_compResetPolicy_e crp, + const ZSTD_indexResetPolicy_e forceResetIndex, + const ZSTD_resetTarget_e forWho) +{ + /* disable chain table allocation for fast or row-based strategies */ + size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, + ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict)) + ? ((size_t)1 << cParams->chainLog) + : 0; size_t const hSize = ((size_t)1) << cParams->hashLog; - U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; - size_t const h3Size = ((size_t)1) << hashLog3; - size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); - - assert(((size_t)ptr & 3) == 0); + U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; + size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; + + DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset); + assert(useRowMatchFinder != ZSTD_ps_auto); + if (forceResetIndex == ZSTDirp_reset) { + ZSTD_window_init(&ms->window); + ZSTD_cwksp_mark_tables_dirty(ws); + } ms->hashLog3 = hashLog3; - memset(&ms->window, 0, sizeof(ms->window)); - ms->window.dictLimit = 1; /* start from 1, so that 1st position is valid */ - ms->window.lowLimit = 1; /* it ensures first and later CCtx usages compress the same */ - ms->window.nextSrc = ms->window.base + 1; /* see issue #1241 */ + ms->lazySkipping = 0; + ZSTD_invalidateMatchState(ms); + assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */ + + ZSTD_cwksp_clear_tables(ws); + + DEBUGLOG(5, "reserving table space"); + /* table Space */ + ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32)); + ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32)); + ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32)); + RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, + "failed a workspace allocation in ZSTD_reset_matchState"); + + DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty); + if (crp!=ZSTDcrp_leaveDirty) { + /* reset tables only */ + ZSTD_cwksp_clean_tables(ws); + } + + if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) { + /* Row match finder needs an additional table of hashes ("tags") */ + size_t const tagTableSize = hSize; + /* We want to generate a new salt in case we reset a Cctx, but we always want to use + * 0 when we reset a Cdict */ + if(forWho == ZSTD_resetTarget_CCtx) { + ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize); + ZSTD_advanceHashSalt(ms); + } else { + /* When we are not salting we want to always memset the memory */ + ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned64(ws, tagTableSize); + ZSTD_memset(ms->tagTable, 0, tagTableSize); + ms->hashSalt = 0; + } + { /* Switch to 32-entry rows if searchLog is 5 (or more) */ + U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); + assert(cParams->hashLog >= rowLog); + ms->rowHashLog = cParams->hashLog - rowLog; + } + } + /* opt parser space */ - if (forCCtx && (cParams->strategy >= ZSTD_btopt)) { + if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { DEBUGLOG(4, "reserving optimal parser space"); - ms->opt.litFreq = (unsigned*)ptr; - ms->opt.litLengthFreq = ms->opt.litFreq + (1<opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1); - ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1); - ptr = ms->opt.offCodeFreq + (MaxOff+1); - ms->opt.matchTable = (ZSTD_match_t*)ptr; - ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1; - ms->opt.priceTable = (ZSTD_optimal_t*)ptr; - ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1; + ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxLL+1) * sizeof(unsigned)); + ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxML+1) * sizeof(unsigned)); + ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxOff+1) * sizeof(unsigned)); + ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t)); + ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t)); } - /* table Space */ - DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset); - assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ - if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace); /* reset tables only */ - ms->hashTable = (U32*)(ptr); - ms->chainTable = ms->hashTable + hSize; - ms->hashTable3 = ms->chainTable + chainSize; - ptr = ms->hashTable3 + h3Size; - ms->cParams = *cParams; - assert(((size_t)ptr & 3) == 0); - return ptr; + RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, + "failed a workspace allocation in ZSTD_reset_matchState"); + return 0; +} + +/* ZSTD_indexTooCloseToMax() : + * minor optimization : prefer memset() rather than reduceIndex() + * which is measurably slow in some circumstances (reported for Visual Studio). + * Works when re-using a context for a lot of smallish inputs : + * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN, + * memset() will be triggered before reduceIndex(). + */ +#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB) +static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) +{ + return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN); } -#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */ -#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 /* when workspace is continuously too large - * during at least this number of times, - * context's memory usage is considered wasteful, - * because it's sized to handle a worst case scenario which rarely happens. - * In which case, resize it down to free some memory */ +/** ZSTD_dictTooBig(): + * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in + * one go generically. So we ensure that in that case we reset the tables to zero, + * so that we can load as much of the dictionary as possible. + */ +static int ZSTD_dictTooBig(size_t const loadedDictSize) +{ + return loadedDictSize > ZSTD_CHUNKSIZE_MAX; +} /*! ZSTD_resetCCtx_internal() : - note : `params` are assumed fully validated at this stage */ + * @param loadedDictSize The size of the dictionary to be loaded + * into the context, if any. If no dictionary is used, or the + * dictionary is being attached / copied, then pass 0. + * note : `params` are assumed fully validated at this stage. + */ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, - ZSTD_CCtx_params params, + ZSTD_CCtx_params const* params, U64 const pledgedSrcSize, + size_t const loadedDictSize, ZSTD_compResetPolicy_e const crp, ZSTD_buffered_policy_e const zbuff) { - DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u", - (U32)pledgedSrcSize, params.cParams.windowLog); - assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); - - if (crp == ZSTDcrp_continue) { - if (ZSTD_equivalentParams(zc->appliedParams, params, - zc->inBuffSize, - zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit, - zbuff, pledgedSrcSize)) { - DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)", - zc->appliedParams.cParams.windowLog, zc->blockSize); - zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0); /* if it was too large, it still is */ - if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION) - return ZSTD_continueCCtx(zc, params, pledgedSrcSize); - } } - DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx"); + ZSTD_cwksp* const ws = &zc->workspace; + DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d", + (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->postBlockSplitter); + assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); + + zc->isFirstBlock = 1; - if (params.ldmParams.enableLdm) { + /* Set applied params early so we can modify them for LDM, + * and point params at the applied params. + */ + zc->appliedParams = *params; + params = &zc->appliedParams; + + assert(params->useRowMatchFinder != ZSTD_ps_auto); + assert(params->postBlockSplitter != ZSTD_ps_auto); + assert(params->ldmParams.enableLdm != ZSTD_ps_auto); + assert(params->maxBlockSize != 0); + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* Adjust long distance matching parameters */ - ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); - assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); - assert(params.ldmParams.hashRateLog < 32); - zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength); - } - - { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize)); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); - U32 const divider = (params.cParams.minMatch==3) ? 3 : 4; - size_t const maxNbSeq = blockSize / divider; - size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq; - size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0; - size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0; - size_t const matchStateSize = ZSTD_sizeof_matchState(¶ms.cParams, /* forCCtx */ 1); - size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize); - void* ptr; /* used to partition workSpace */ - - /* Check if workSpace is large enough, alloc a new one if needed */ - { size_t const entropySpace = HUF_WORKSPACE_SIZE; - size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t); - size_t const bufferSpace = buffInSize + buffOutSize; - size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams); - size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq); - - size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace + - ldmSeqSpace + matchStateSize + tokenSpace + - bufferSpace; - - int const workSpaceTooSmall = zc->workSpaceSize < neededSpace; - int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace; - int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION); - zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0; - - DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers", - neededSpace>>10, matchStateSize>>10, bufferSpace>>10); + ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams); + assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog); + assert(params->ldmParams.hashRateLog < 32); + } + + { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize)); + size_t const blockSize = MIN(params->maxBlockSize, windowSize); + size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, ZSTD_hasExtSeqProd(params)); + size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered) + ? ZSTD_compressBound(blockSize) + 1 + : 0; + size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered) + ? windowSize + blockSize + : 0; + size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize); + + int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window); + int const dictTooBig = ZSTD_dictTooBig(loadedDictSize); + ZSTD_indexResetPolicy_e needsIndexReset = + (indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue; + + size_t const neededSpace = + ZSTD_estimateCCtxSize_usingCCtxParams_internal( + ¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder, + buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(params), params->maxBlockSize); + + FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!"); + + if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0); + + { /* Check if workspace is large enough, alloc a new one if needed */ + int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace; + int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); + int resizeWorkspace = workspaceTooSmall || workspaceWasteful; + DEBUGLOG(4, "Need %zu B workspace", neededSpace); DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); - if (workSpaceTooSmall || workSpaceWasteful) { - DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB", - zc->workSpaceSize >> 10, + if (resizeWorkspace) { + DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB", + ZSTD_cwksp_sizeof(ws) >> 10, neededSpace >> 10); RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize"); - zc->workSpaceSize = 0; - ZSTD_free(zc->workSpace, zc->customMem); - zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem); - RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation); - zc->workSpaceSize = neededSpace; - zc->workSpaceOversizedDuration = 0; + needsIndexReset = ZSTDirp_reset; + + ZSTD_cwksp_free(ws, zc->customMem); + FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), ""); + DEBUGLOG(5, "reserving object space"); /* Statically sized space. - * entropyWorkspace never moves, + * tmpWorkspace never moves, * though prev/next block swap places */ - assert(((size_t)zc->workSpace & 3) == 0); /* ensure correct alignment */ - assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t)); - zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace; - zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1; - ptr = zc->blockState.nextCBlock + 1; - zc->entropyWorkspace = (U32*)ptr; + assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t))); + zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); + RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock"); + zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); + RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); + zc->tmpWorkspace = ZSTD_cwksp_reserve_object(ws, TMP_WORKSPACE_SIZE); + RETURN_ERROR_IF(zc->tmpWorkspace == NULL, memory_allocation, "couldn't allocate tmpWorkspace"); + zc->tmpWkspSize = TMP_WORKSPACE_SIZE; } } + ZSTD_cwksp_clear(ws); + /* init params */ - zc->appliedParams = params; - zc->blockState.matchState.cParams = params.cParams; + zc->blockState.matchState.cParams = params->cParams; + zc->blockState.matchState.prefetchCDictTables = params->prefetchCDictTables == ZSTD_ps_enable; zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; zc->consumedSrcSize = 0; zc->producedCSize = 0; @@ -1482,65 +2214,82 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->appliedParams.fParams.contentSizeFlag = 0; DEBUGLOG(4, "pledged content size : %u ; flag : %u", (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); - zc->blockSize = blockSize; + zc->blockSizeMax = blockSize; XXH64_reset(&zc->xxhState, 0); zc->stage = ZSTDcs_init; zc->dictID = 0; + zc->dictContentSize = 0; ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); - ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32; + FORWARD_IF_ERROR(ZSTD_reset_matchState( + &zc->blockState.matchState, + ws, + ¶ms->cParams, + params->useRowMatchFinder, + crp, + needsIndexReset, + ZSTD_resetTarget_CCtx), ""); + + zc->seqStore.sequencesStart = (SeqDef*)ZSTD_cwksp_reserve_aligned64(ws, maxNbSeq * sizeof(SeqDef)); /* ldm hash table */ - /* initialize bucketOffsets table later for pointer alignment */ - if (params.ldmParams.enableLdm) { - size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog; - memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t)); - assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ - zc->ldmState.hashTable = (ldmEntry_t*)ptr; - ptr = zc->ldmState.hashTable + ldmHSize; - zc->ldmSequences = (rawSeq*)ptr; - ptr = zc->ldmSequences + maxNbLdmSeq; + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { + /* TODO: avoid memset? */ + size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; + zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64(ws, ldmHSize * sizeof(ldmEntry_t)); + ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); + zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64(ws, maxNbLdmSeq * sizeof(rawSeq)); zc->maxNbLdmSequences = maxNbLdmSeq; - memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window)); + ZSTD_window_init(&zc->ldmState.window); + zc->ldmState.loadedDictEnd = 0; } - assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ - ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, ¶ms.cParams, crp, /* forCCtx */ 1); + /* reserve space for block-level external sequences */ + if (ZSTD_hasExtSeqProd(params)) { + size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + zc->extSeqBufCapacity = maxNbExternalSeq; + zc->extSeqBuf = + (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence)); + } + + /* buffers */ - /* sequences storage */ - zc->seqStore.maxNbSeq = maxNbSeq; - zc->seqStore.sequencesStart = (seqDef*)ptr; - ptr = zc->seqStore.sequencesStart + maxNbSeq; - zc->seqStore.llCode = (BYTE*) ptr; - zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq; - zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq; - zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq; /* ZSTD_wildcopy() is used to copy into the literals buffer, * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes. */ + zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH); zc->seqStore.maxNbLit = blockSize; - ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH; + + zc->bufferedPolicy = zbuff; + zc->inBuffSize = buffInSize; + zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); + zc->outBuffSize = buffOutSize; + zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); /* ldm bucketOffsets table */ - if (params.ldmParams.enableLdm) { - size_t const ldmBucketSize = - ((size_t)1) << (params.ldmParams.hashLog - - params.ldmParams.bucketSizeLog); - memset(ptr, 0, ldmBucketSize); - zc->ldmState.bucketOffsets = (BYTE*)ptr; - ptr = zc->ldmState.bucketOffsets + ldmBucketSize; - ZSTD_window_clear(&zc->ldmState.window); + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { + /* TODO: avoid memset? */ + size_t const numBuckets = + ((size_t)1) << (params->ldmParams.hashLog - + params->ldmParams.bucketSizeLog); + zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets); + ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets); } + + /* sequences storage */ ZSTD_referenceExternalSequences(zc, NULL, 0); + zc->seqStore.maxNbSeq = maxNbSeq; + zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); + zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); + zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); - /* buffers */ - zc->inBuffSize = buffInSize; - zc->inBuff = (char*)ptr; - zc->outBuffSize = buffOutSize; - zc->outBuff = zc->inBuff + buffInSize; + DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); + assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace)); + + zc->initialized = 1; return 0; } @@ -1574,16 +2323,18 @@ static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = { }; static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, + const ZSTD_CCtx_params* params, U64 pledgedSrcSize) { size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy]; - return ( pledgedSrcSize <= cutoff - || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN - || params.attachDictPref == ZSTD_dictForceAttach ) - && params.attachDictPref != ZSTD_dictForceCopy - && !params.forceWindow; /* dictMatchState isn't correctly - * handled in _enforceMaxDist */ + int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch; + return dedicatedDictSearch + || ( ( pledgedSrcSize <= cutoff + || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN + || params->attachDictPref == ZSTD_dictForceAttach ) + && params->attachDictPref != ZSTD_dictForceCopy + && !params->forceWindow ); /* dictMatchState isn't correctly + * handled in _enforceMaxDist */ } static size_t @@ -1593,16 +2344,29 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { - { const ZSTD_compressionParameters* const cdict_cParams = &cdict->matchState.cParams; + DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu", + (unsigned long long)pledgedSrcSize); + { + ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams; unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Resize working context table params for input only, since the dict * has its own tables. */ - params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0); + /* pledgedSrcSize == 0 means 0! */ + + if (cdict->matchState.dedicatedDictSearch) { + ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams); + } + + params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, + cdict->dictContentSize, ZSTD_cpm_attachDict, + params.useRowMatchFinder); params.cParams.windowLog = windowLog; - ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, - ZSTDcrp_continue, zbuff); - assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); + params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */ + FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, + /* loadedDictSize */ 0, + ZSTDcrp_makeClean, zbuff), ""); + assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy); } { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc @@ -1627,13 +2391,30 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, } } cctx->dictID = cdict->dictID; + cctx->dictContentSize = cdict->dictContentSize; /* copy block state */ - memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); + ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); return 0; } +static void ZSTD_copyCDictTableIntoCCtx(U32* dst, U32 const* src, size_t tableSize, + ZSTD_compressionParameters const* cParams) { + if (ZSTD_CDictIndicesAreTagged(cParams)){ + /* Remove tags from the CDict table if they are present. + * See docs on "short cache" in zstd_compress_internal.h for context. */ + size_t i; + for (i = 0; i < tableSize; i++) { + U32 const taggedIndex = src[i]; + U32 const index = taggedIndex >> ZSTD_SHORT_CACHE_TAG_BITS; + dst[i] = index; + } + } else { + ZSTD_memcpy(dst, src, tableSize * sizeof(U32)); + } +} + static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, @@ -1642,49 +2423,76 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, { const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams; - DEBUGLOG(4, "copying dictionary into context"); + assert(!cdict->matchState.dedicatedDictSearch); + DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu", + (unsigned long long)pledgedSrcSize); { unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Copy only compression parameters related to tables. */ params.cParams = *cdict_cParams; params.cParams.windowLog = windowLog; - ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, - ZSTDcrp_noMemset, zbuff); + params.useRowMatchFinder = cdict->useRowMatchFinder; + FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, + /* loadedDictSize */ 0, + ZSTDcrp_leaveDirty, zbuff), ""); assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); } + ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); + assert(params.useRowMatchFinder != ZSTD_ps_auto); + /* copy tables */ - { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog); + { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */) + ? ((size_t)1 << cdict_cParams->chainLog) + : 0; size_t const hSize = (size_t)1 << cdict_cParams->hashLog; - size_t const tableSpace = (chainSize + hSize) * sizeof(U32); - assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */ - assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize); - assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize); /* chainTable must follow hashTable */ - assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize); - memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace); /* presumes all tables follow each other */ + + ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable, + cdict->matchState.hashTable, + hSize, cdict_cParams); + + /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */ + if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) { + ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable, + cdict->matchState.chainTable, + chainSize, cdict_cParams); + } + /* copy tag table */ + if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) { + size_t const tagTableSize = hSize; + ZSTD_memcpy(cctx->blockState.matchState.tagTable, + cdict->matchState.tagTable, + tagTableSize); + cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt; + } } /* Zero the hashTable3, since the cdict never fills it */ - { size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3; + assert(cctx->blockState.matchState.hashLog3 <= 31); + { U32 const h3log = cctx->blockState.matchState.hashLog3; + size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; assert(cdict->matchState.hashLog3 == 0); - memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); + ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); } + ZSTD_cwksp_mark_tables_clean(&cctx->workspace); + /* copy dictionary offsets */ - { ZSTD_matchState_t const* srcMatchState = &cdict->matchState; - ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; + { ZSTD_MatchState_t const* srcMatchState = &cdict->matchState; + ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } cctx->dictID = cdict->dictID; + cctx->dictContentSize = cdict->dictContentSize; /* copy block state */ - memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); + ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); return 0; } @@ -1694,7 +2502,7 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, * in-place. We decide here which strategy to use. */ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, + const ZSTD_CCtx_params* params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { @@ -1704,10 +2512,10 @@ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx, if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) { return ZSTD_resetCCtx_byAttachingCDict( - cctx, cdict, params, pledgedSrcSize, zbuff); + cctx, cdict, *params, pledgedSrcSize, zbuff); } else { return ZSTD_resetCCtx_byCopyingCDict( - cctx, cdict, params, pledgedSrcSize, zbuff); + cctx, cdict, *params, pledgedSrcSize, zbuff); } } @@ -1724,16 +2532,24 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { + RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong, + "Can't copy a ctx that's not in init stage."); DEBUGLOG(5, "ZSTD_copyCCtx_internal"); - RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong); - - memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); + ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); { ZSTD_CCtx_params params = dstCCtx->requestedParams; /* Copy only compression parameters related to tables. */ params.cParams = srcCCtx->appliedParams.cParams; + assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto); + assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_ps_auto); + assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto); + params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; + params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter; + params.ldmParams = srcCCtx->appliedParams.ldmParams; params.fParams = fParams; - ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize, - ZSTDcrp_noMemset, zbuff); + params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; + ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize, + /* loadedDictSize */ 0, + ZSTDcrp_leaveDirty, zbuff); assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); @@ -1741,28 +2557,44 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); } + ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); + /* copy tables */ - { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog); + { size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy, + srcCCtx->appliedParams.useRowMatchFinder, + 0 /* forDDSDict */) + ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog) + : 0; size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; - size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3; - size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); - assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */ - assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize); - memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace); /* presumes all tables follow each other */ + U32 const h3log = srcCCtx->blockState.matchState.hashLog3; + size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; + + ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable, + srcCCtx->blockState.matchState.hashTable, + hSize * sizeof(U32)); + ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable, + srcCCtx->blockState.matchState.chainTable, + chainSize * sizeof(U32)); + ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3, + srcCCtx->blockState.matchState.hashTable3, + h3Size * sizeof(U32)); } + ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); + /* copy dictionary offsets */ { - const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState; - ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; + const ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState; + ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } dstCCtx->dictID = srcCCtx->dictID; + dstCCtx->dictContentSize = srcCCtx->dictContentSize; /* copy block state */ - memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock)); + ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock)); return 0; } @@ -1775,7 +2607,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) { ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; - ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0); + ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy; ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1); if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN); @@ -1799,17 +2631,38 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa int const nbRows = (int)size / ZSTD_ROWSIZE; int cellNb = 0; int rowNb; + /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ + U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX; assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ - assert(size < (1U<<31)); /* can be casted to int */ + assert(size < (1U<<31)); /* can be cast to int */ + +#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) + /* To validate that the table reuse logic is sound, and that we don't + * access table space that we haven't cleaned, we re-"poison" the table + * space every time we mark it dirty. + * + * This function however is intended to operate on those dirty tables and + * re-clean them. So when this function is used correctly, we can unpoison + * the memory it operated on. This introduces a blind spot though, since + * if we now try to operate on __actually__ poisoned memory, we will not + * detect that. */ + __msan_unpoison(table, size * sizeof(U32)); +#endif + for (rowNb=0 ; rowNb < nbRows ; rowNb++) { int column; for (column=0; columnblockState.matchState; - { U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog; + { U32 const hSize = (U32)1 << params->cParams.hashLog; ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); } - if (zc->appliedParams.cParams.strategy != ZSTD_fast) { - U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog; - if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2) + if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) { + U32 const chainSize = (U32)1 << params->cParams.chainLog; + if (params->cParams.strategy == ZSTD_btlazy2) ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); else ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); @@ -1854,757 +2706,297 @@ static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue) /* See doc/zstd_compression_format.md for detailed format description */ -static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) -{ - U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3); - RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity, - dstSize_tooSmall); - MEM_writeLE24(dst, cBlockHeader24); - memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); - return ZSTD_blockHeaderSize + srcSize; -} - -static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - BYTE* const ostart = (BYTE* const)dst; - U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); - - RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall); - - switch(flSize) - { - case 1: /* 2 - 1 - 5 */ - ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); - break; - case 2: /* 2 - 2 - 12 */ - MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); - break; - case 3: /* 2 - 2 - 20 */ - MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); - break; - default: /* not necessary : flSize is {1,2,3} */ - assert(0); - } - - memcpy(ostart + flSize, src, srcSize); - return srcSize + flSize; -} - -static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - BYTE* const ostart = (BYTE* const)dst; - U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); - - (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ - - switch(flSize) - { - case 1: /* 2 - 1 - 5 */ - ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); - break; - case 2: /* 2 - 2 - 12 */ - MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); - break; - case 3: /* 2 - 2 - 20 */ - MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); - break; - default: /* not necessary : flSize is {1,2,3} */ - assert(0); - } - - ostart[flSize] = *(const BYTE*)src; - return flSize+1; -} - - -/* ZSTD_minGain() : - * minimum compression required - * to generate a compress block or a compressed literals section. - * note : use same formula for both situations */ -static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) -{ - U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6; - ZSTD_STATIC_ASSERT(ZSTD_btultra == 8); - assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); - return (srcSize >> minlog) + 2; -} - -static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_strategy strategy, int disableLiteralCompression, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - void* workspace, size_t wkspSize, - const int bmi2) -{ - size_t const minGain = ZSTD_minGain(srcSize, strategy); - size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); - BYTE* const ostart = (BYTE*)dst; - U32 singleStream = srcSize < 256; - symbolEncodingType_e hType = set_compressed; - size_t cLitSize; - - DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)", - disableLiteralCompression); - - /* Prepare nextEntropy assuming reusing the existing table */ - memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - - if (disableLiteralCompression) - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - - /* small ? don't even attempt compression (speed opt) */ -# define COMPRESS_LITERALS_SIZE_MIN 63 - { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; - if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } - - RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression"); - { HUF_repeat repeat = prevHuf->repeatMode; - int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0; - if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; - cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, - workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) - : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, - workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2); - if (repeat != HUF_repeat_none) { - /* reused the existing table */ - hType = set_repeat; - } - } - - if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) { - memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } - if (cLitSize==1) { - memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); - } - - if (hType == set_compressed) { - /* using a newly constructed table */ - nextHuf->repeatMode = HUF_repeat_check; - } - - /* Build header */ - switch(lhSize) - { - case 3: /* 2 - 2 - 10 - 10 */ - { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); - MEM_writeLE24(ostart, lhc); - break; - } - case 4: /* 2 - 2 - 14 - 14 */ - { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); - MEM_writeLE32(ostart, lhc); - break; - } - case 5: /* 2 - 2 - 18 - 18 */ - { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); - MEM_writeLE32(ostart, lhc); - ostart[4] = (BYTE)(cLitSize >> 10); - break; - } - default: /* not possible : lhSize is {3,4,5} */ - assert(0); - } - return lhSize+cLitSize; -} - - -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) +int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr) { - const seqDef* const sequences = seqStorePtr->sequencesStart; + const SeqDef* const sequences = seqStorePtr->sequencesStart; BYTE* const llCodeTable = seqStorePtr->llCode; BYTE* const ofCodeTable = seqStorePtr->ofCode; BYTE* const mlCodeTable = seqStorePtr->mlCode; U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); U32 u; + int longOffsets = 0; assert(nbSeq <= seqStorePtr->maxNbSeq); for (u=0; u= STREAM_ACCUMULATOR_MIN)); + if (MEM_32bits() && ofCode >= STREAM_ACCUMULATOR_MIN) + longOffsets = 1; } - if (seqStorePtr->longLengthID==1) + if (seqStorePtr->longLengthType==ZSTD_llt_literalLength) llCodeTable[seqStorePtr->longLengthPos] = MaxLL; - if (seqStorePtr->longLengthID==2) + if (seqStorePtr->longLengthType==ZSTD_llt_matchLength) mlCodeTable[seqStorePtr->longLengthPos] = MaxML; + return longOffsets; } - -/** - * -log2(x / 256) lookup table for x in [0, 256). - * If x == 0: Return 0 - * Else: Return floor(-log2(x / 256) * 256) - */ -static unsigned const kInverseProbabilityLog256[256] = { - 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162, - 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889, - 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734, - 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626, - 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542, - 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473, - 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415, - 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366, - 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322, - 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282, - 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247, - 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215, - 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185, - 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157, - 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132, - 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108, - 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85, - 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64, - 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44, - 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25, - 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7, - 5, 4, 2, 1, -}; - - -/** - * Returns the cost in bits of encoding the distribution described by count - * using the entropy bound. - */ -static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total) +/* ZSTD_useTargetCBlockSize(): + * Returns if target compressed block size param is being used. + * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize. + * Returns 1 if true, 0 otherwise. */ +static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams) { - unsigned cost = 0; - unsigned s; - for (s = 0; s <= max; ++s) { - unsigned norm = (unsigned)((256 * count[s]) / total); - if (count[s] != 0 && norm == 0) - norm = 1; - assert(count[s] < total); - cost += count[s] * kInverseProbabilityLog256[norm]; - } - return cost >> 8; + DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize); + return (cctxParams->targetCBlockSize != 0); } - -/** - * Returns the cost in bits of encoding the distribution in count using the - * table described by norm. The max symbol support by norm is assumed >= max. - * norm must be valid for every symbol with non-zero probability in count. - */ -static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, - unsigned const* count, unsigned const max) +/* ZSTD_blockSplitterEnabled(): + * Returns if block splitting param is being used + * If used, compression will do best effort to split a block in order to improve compression ratio. + * At the time this function is called, the parameter must be finalized. + * Returns 1 if true, 0 otherwise. */ +static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams) { - unsigned const shift = 8 - accuracyLog; - size_t cost = 0; - unsigned s; - assert(accuracyLog <= 8); - for (s = 0; s <= max; ++s) { - unsigned const normAcc = norm[s] != -1 ? norm[s] : 1; - unsigned const norm256 = normAcc << shift; - assert(norm256 > 0); - assert(norm256 < 256); - cost += count[s] * kInverseProbabilityLog256[norm256]; - } - return cost >> 8; + DEBUGLOG(5, "ZSTD_blockSplitterEnabled (postBlockSplitter=%d)", cctxParams->postBlockSplitter); + assert(cctxParams->postBlockSplitter != ZSTD_ps_auto); + return (cctxParams->postBlockSplitter == ZSTD_ps_enable); } - -static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) { - void const* ptr = ctable; - U16 const* u16ptr = (U16 const*)ptr; - U32 const maxSymbolValue = MEM_read16(u16ptr + 1); - return maxSymbolValue; -} - - -/** - * Returns the cost in bits of encoding the distribution in count using ctable. - * Returns an error if ctable cannot represent all the symbols in count. +/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types + * and size of the sequences statistics */ -static size_t ZSTD_fseBitCost( - FSE_CTable const* ctable, - unsigned const* count, - unsigned const max) -{ - unsigned const kAccuracyLog = 8; - size_t cost = 0; - unsigned s; - FSE_CState_t cstate; - FSE_initCState(&cstate, ctable); - RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC, - "Repeat FSE_CTable has maxSymbolValue %u < %u", - ZSTD_getFSEMaxSymbolValue(ctable), max); - for (s = 0; s <= max; ++s) { - unsigned const tableLog = cstate.stateLog; - unsigned const badCost = (tableLog + 1) << kAccuracyLog; - unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog); - if (count[s] == 0) - continue; - RETURN_ERROR_IF(bitCost >= badCost, GENERIC, - "Repeat FSE_CTable has Prob[%u] == 0", s); - cost += count[s] * bitCost; - } - return cost >> kAccuracyLog; -} - -/** - * Returns the cost in bytes of encoding the normalized count header. - * Returns an error if any of the helper functions return an error. +typedef struct { + U32 LLtype; + U32 Offtype; + U32 MLtype; + size_t size; + size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ + int longOffsets; +} ZSTD_symbolEncodingTypeStats_t; + +/* ZSTD_buildSequencesStatistics(): + * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field. + * Modifies `nextEntropy` to have the appropriate values as a side effect. + * nbSeq must be greater than 0. + * + * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) */ -static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max, - size_t const nbSeq, unsigned const FSELog) -{ - BYTE wksp[FSE_NCOUNTBOUND]; - S16 norm[MaxSeq + 1]; - const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); - FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max)); - return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog); -} - +static ZSTD_symbolEncodingTypeStats_t +ZSTD_buildSequencesStatistics( + const SeqStore_t* seqStorePtr, size_t nbSeq, + const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, + BYTE* dst, const BYTE* const dstEnd, + ZSTD_strategy strategy, unsigned* countWorkspace, + void* entropyWorkspace, size_t entropyWkspSize) +{ + BYTE* const ostart = dst; + const BYTE* const oend = dstEnd; + BYTE* op = ostart; + FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable; + FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable; + FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable; + const BYTE* const ofCodeTable = seqStorePtr->ofCode; + const BYTE* const llCodeTable = seqStorePtr->llCode; + const BYTE* const mlCodeTable = seqStorePtr->mlCode; + ZSTD_symbolEncodingTypeStats_t stats; -typedef enum { - ZSTD_defaultDisallowed = 0, - ZSTD_defaultAllowed = 1 -} ZSTD_defaultPolicy_e; - -MEM_STATIC symbolEncodingType_e -ZSTD_selectEncodingType( - FSE_repeat* repeatMode, unsigned const* count, unsigned const max, - size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, - FSE_CTable const* prevCTable, - short const* defaultNorm, U32 defaultNormLog, - ZSTD_defaultPolicy_e const isDefaultAllowed, - ZSTD_strategy const strategy) -{ - ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); - if (mostFrequent == nbSeq) { - *repeatMode = FSE_repeat_none; - if (isDefaultAllowed && nbSeq <= 2) { - /* Prefer set_basic over set_rle when there are 2 or less symbols, - * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. - * If basic encoding isn't possible, always choose RLE. - */ - DEBUGLOG(5, "Selected set_basic"); - return set_basic; - } - DEBUGLOG(5, "Selected set_rle"); - return set_rle; - } - if (strategy < ZSTD_lazy) { - if (isDefaultAllowed) { - size_t const staticFse_nbSeq_max = 1000; - size_t const mult = 10 - strategy; - size_t const baseLog = 3; - size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */ - assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */ - assert(mult <= 9 && mult >= 7); - if ( (*repeatMode == FSE_repeat_valid) - && (nbSeq < staticFse_nbSeq_max) ) { - DEBUGLOG(5, "Selected set_repeat"); - return set_repeat; + stats.lastCountSize = 0; + /* convert length/distances into codes */ + stats.longOffsets = ZSTD_seqToCodes(seqStorePtr); + assert(op <= oend); + assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */ + /* build CTable for Literal Lengths */ + { unsigned max = MaxLL; + size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ + DEBUGLOG(5, "Building LL table"); + nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; + stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, + countWorkspace, max, mostFrequent, nbSeq, + LLFSELog, prevEntropy->litlengthCTable, + LL_defaultNorm, LL_defaultNormLog, + ZSTD_defaultAllowed, strategy); + assert(set_basic < set_compressed && set_rle < set_compressed); + assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable( + op, (size_t)(oend - op), + CTable_LitLength, LLFSELog, (SymbolEncodingType_e)stats.LLtype, + countWorkspace, max, llCodeTable, nbSeq, + LL_defaultNorm, LL_defaultNormLog, MaxLL, + prevEntropy->litlengthCTable, + sizeof(prevEntropy->litlengthCTable), + entropyWorkspace, entropyWkspSize); + if (ZSTD_isError(countSize)) { + DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed"); + stats.size = countSize; + return stats; } - if ( (nbSeq < dynamicFse_nbSeq_min) - || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) { - DEBUGLOG(5, "Selected set_basic"); - /* The format allows default tables to be repeated, but it isn't useful. - * When using simple heuristics to select encoding type, we don't want - * to confuse these tables with dictionaries. When running more careful - * analysis, we don't need to waste time checking both repeating tables - * and default tables. - */ - *repeatMode = FSE_repeat_none; - return set_basic; + if (stats.LLtype == set_compressed) + stats.lastCountSize = countSize; + op += countSize; + assert(op <= oend); + } } + /* build CTable for Offsets */ + { unsigned max = MaxOff; + size_t const mostFrequent = HIST_countFast_wksp( + countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ + /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ + ZSTD_DefaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; + DEBUGLOG(5, "Building OF table"); + nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; + stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, + countWorkspace, max, mostFrequent, nbSeq, + OffFSELog, prevEntropy->offcodeCTable, + OF_defaultNorm, OF_defaultNormLog, + defaultPolicy, strategy); + assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable( + op, (size_t)(oend - op), + CTable_OffsetBits, OffFSELog, (SymbolEncodingType_e)stats.Offtype, + countWorkspace, max, ofCodeTable, nbSeq, + OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, + prevEntropy->offcodeCTable, + sizeof(prevEntropy->offcodeCTable), + entropyWorkspace, entropyWkspSize); + if (ZSTD_isError(countSize)) { + DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed"); + stats.size = countSize; + return stats; } - } - } else { - size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC); - size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC); - size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); - size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); - - if (isDefaultAllowed) { - assert(!ZSTD_isError(basicCost)); - assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost))); - } - assert(!ZSTD_isError(NCountCost)); - assert(compressedCost < ERROR(maxCode)); - DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u", - (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost); - if (basicCost <= repeatCost && basicCost <= compressedCost) { - DEBUGLOG(5, "Selected set_basic"); - assert(isDefaultAllowed); - *repeatMode = FSE_repeat_none; - return set_basic; - } - if (repeatCost <= compressedCost) { - DEBUGLOG(5, "Selected set_repeat"); - assert(!ZSTD_isError(repeatCost)); - return set_repeat; - } - assert(compressedCost < basicCost && compressedCost < repeatCost); - } - DEBUGLOG(5, "Selected set_compressed"); - *repeatMode = FSE_repeat_check; - return set_compressed; -} - -MEM_STATIC size_t -ZSTD_buildCTable(void* dst, size_t dstCapacity, - FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, - unsigned* count, U32 max, - const BYTE* codeTable, size_t nbSeq, - const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, - const FSE_CTable* prevCTable, size_t prevCTableSize, - void* workspace, size_t workspaceSize) -{ - BYTE* op = (BYTE*)dst; - const BYTE* const oend = op + dstCapacity; - DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity); - - switch (type) { - case set_rle: - FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max)); - RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall); - *op = codeTable[0]; - return 1; - case set_repeat: - memcpy(nextCTable, prevCTable, prevCTableSize); - return 0; - case set_basic: - FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */ - return 0; - case set_compressed: { - S16 norm[MaxSeq + 1]; - size_t nbSeq_1 = nbSeq; - const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); - if (count[codeTable[nbSeq-1]] > 1) { - count[codeTable[nbSeq-1]]--; - nbSeq_1--; - } - assert(nbSeq_1 > 1); - FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max)); - { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ - FORWARD_IF_ERROR(NCountSize); - FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize)); - return NCountSize; - } - } - default: assert(0); RETURN_ERROR(GENERIC); - } -} - -FORCE_INLINE_TEMPLATE size_t -ZSTD_encodeSequences_body( - void* dst, size_t dstCapacity, - FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, - FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, - FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets) -{ - BIT_CStream_t blockStream; - FSE_CState_t stateMatchLength; - FSE_CState_t stateOffsetBits; - FSE_CState_t stateLitLength; - - RETURN_ERROR_IF( - ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)), - dstSize_tooSmall, "not enough space remaining"); - DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)", - (int)(blockStream.endPtr - blockStream.startPtr), - (unsigned)dstCapacity); - - /* first symbols */ - FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); - FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); - FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); - BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); - if (MEM_32bits()) BIT_flushBits(&blockStream); - BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); - if (MEM_32bits()) BIT_flushBits(&blockStream); - if (longOffsets) { - U32 const ofBits = ofCodeTable[nbSeq-1]; - int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); - if (extraBits) { - BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); - BIT_flushBits(&blockStream); - } - BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, - ofBits - extraBits); - } else { - BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); - } - BIT_flushBits(&blockStream); - - { size_t n; - for (n=nbSeq-2 ; n= 64-7-(LLFSELog+MLFSELog+OffFSELog))) - BIT_flushBits(&blockStream); /* (7)*/ - BIT_addBits(&blockStream, sequences[n].litLength, llBits); - if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); - BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); - if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); - if (longOffsets) { - int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); - if (extraBits) { - BIT_addBits(&blockStream, sequences[n].offset, extraBits); - BIT_flushBits(&blockStream); /* (7)*/ - } - BIT_addBits(&blockStream, sequences[n].offset >> extraBits, - ofBits - extraBits); /* 31 */ - } else { - BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ + if (stats.Offtype == set_compressed) + stats.lastCountSize = countSize; + op += countSize; + assert(op <= oend); + } } + /* build CTable for MatchLengths */ + { unsigned max = MaxML; + size_t const mostFrequent = HIST_countFast_wksp( + countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ + DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); + nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; + stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, + countWorkspace, max, mostFrequent, nbSeq, + MLFSELog, prevEntropy->matchlengthCTable, + ML_defaultNorm, ML_defaultNormLog, + ZSTD_defaultAllowed, strategy); + assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable( + op, (size_t)(oend - op), + CTable_MatchLength, MLFSELog, (SymbolEncodingType_e)stats.MLtype, + countWorkspace, max, mlCodeTable, nbSeq, + ML_defaultNorm, ML_defaultNormLog, MaxML, + prevEntropy->matchlengthCTable, + sizeof(prevEntropy->matchlengthCTable), + entropyWorkspace, entropyWkspSize); + if (ZSTD_isError(countSize)) { + DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed"); + stats.size = countSize; + return stats; } - BIT_flushBits(&blockStream); /* (7)*/ - DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr)); + if (stats.MLtype == set_compressed) + stats.lastCountSize = countSize; + op += countSize; + assert(op <= oend); } } - - DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog); - FSE_flushCState(&blockStream, &stateMatchLength); - DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog); - FSE_flushCState(&blockStream, &stateOffsetBits); - DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog); - FSE_flushCState(&blockStream, &stateLitLength); - - { size_t const streamSize = BIT_closeCStream(&blockStream); - RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space"); - return streamSize; - } -} - -static size_t -ZSTD_encodeSequences_default( - void* dst, size_t dstCapacity, - FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, - FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, - FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets) -{ - return ZSTD_encodeSequences_body(dst, dstCapacity, - CTable_MatchLength, mlCodeTable, - CTable_OffsetBits, ofCodeTable, - CTable_LitLength, llCodeTable, - sequences, nbSeq, longOffsets); -} - - -#if DYNAMIC_BMI2 - -static TARGET_ATTRIBUTE("bmi2") size_t -ZSTD_encodeSequences_bmi2( - void* dst, size_t dstCapacity, - FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, - FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, - FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets) -{ - return ZSTD_encodeSequences_body(dst, dstCapacity, - CTable_MatchLength, mlCodeTable, - CTable_OffsetBits, ofCodeTable, - CTable_LitLength, llCodeTable, - sequences, nbSeq, longOffsets); -} - -#endif - -static size_t ZSTD_encodeSequences( - void* dst, size_t dstCapacity, - FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, - FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, - FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) -{ - DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity); -#if DYNAMIC_BMI2 - if (bmi2) { - return ZSTD_encodeSequences_bmi2(dst, dstCapacity, - CTable_MatchLength, mlCodeTable, - CTable_OffsetBits, ofCodeTable, - CTable_LitLength, llCodeTable, - sequences, nbSeq, longOffsets); - } -#endif - (void)bmi2; - return ZSTD_encodeSequences_default(dst, dstCapacity, - CTable_MatchLength, mlCodeTable, - CTable_OffsetBits, ofCodeTable, - CTable_LitLength, llCodeTable, - sequences, nbSeq, longOffsets); -} - -static int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams) -{ - switch (cctxParams->literalCompressionMode) { - case ZSTD_lcm_huffman: - return 0; - case ZSTD_lcm_uncompressed: - return 1; - default: - assert(0 /* impossible: pre-validated */); - /* fall-through */ - case ZSTD_lcm_auto: - return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); - } + stats.size = (size_t)(op-ostart); + return stats; } -/* ZSTD_compressSequences_internal(): - * actually compresses both literals and sequences */ +/* ZSTD_entropyCompressSeqStore_internal(): + * compresses both literals and sequences + * Returns compressed size of block, or a zstd error. + */ +#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 MEM_STATIC size_t -ZSTD_compressSequences_internal(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - void* workspace, size_t wkspSize, - const int bmi2) +ZSTD_entropyCompressSeqStore_internal( + void* dst, size_t dstCapacity, + const void* literals, size_t litSize, + const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* entropyWorkspace, size_t entropyWkspSize, + const int bmi2) { - const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; ZSTD_strategy const strategy = cctxParams->cParams.strategy; - unsigned count[MaxSeq+1]; + unsigned* count = (unsigned*)entropyWorkspace; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; - U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ - const seqDef* const sequences = seqStorePtr->sequencesStart; + const SeqDef* const sequences = seqStorePtr->sequencesStart; + const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; - size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; - BYTE* seqHead; - BYTE* lastNCount = NULL; + size_t lastCountSize; + int longOffsets = 0; + + entropyWorkspace = count + (MaxSeq + 1); + entropyWkspSize -= (MaxSeq + 1) * sizeof(*count); + DEBUGLOG(5, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu, dstCapacity=%zu)", nbSeq, dstCapacity); ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= HUF_WORKSPACE_SIZE); /* Compress literals */ - { const BYTE* const literals = seqStorePtr->litStart; - size_t const litSize = seqStorePtr->lit - literals; + { size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + /* Base suspicion of uncompressibility on ratio of literals to sequences */ + int const suspectUncompressible = (numSequences == 0) || (litSize / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); + size_t const cSize = ZSTD_compressLiterals( - &prevEntropy->huf, &nextEntropy->huf, - cctxParams->cParams.strategy, - ZSTD_disableLiteralsCompression(cctxParams), op, dstCapacity, literals, litSize, - workspace, wkspSize, - bmi2); - FORWARD_IF_ERROR(cSize); - assert(cSize <= dstCapacity); + entropyWorkspace, entropyWkspSize, + &prevEntropy->huf, &nextEntropy->huf, + cctxParams->cParams.strategy, + ZSTD_literalsCompressionIsDisabled(cctxParams), + suspectUncompressible, bmi2); + FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); + assert(cSize <= dstCapacity); op += cSize; } /* Sequences Header */ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, - dstSize_tooSmall); - if (nbSeq < 0x7F) + dstSize_tooSmall, "Can't fit seq hdr in output buf!"); + if (nbSeq < 128) { *op++ = (BYTE)nbSeq; - else if (nbSeq < LONGNBSEQ) - op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; - else - op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; + } else if (nbSeq < LONGNBSEQ) { + op[0] = (BYTE)((nbSeq>>8) + 0x80); + op[1] = (BYTE)nbSeq; + op+=2; + } else { + op[0]=0xFF; + MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)); + op+=3; + } + assert(op <= oend); if (nbSeq==0) { /* Copy the old tables over as if we repeated them */ - memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); - return op - ostart; + ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); + return (size_t)(op - ostart); + } + { BYTE* const seqHead = op++; + /* build stats for sequences */ + const ZSTD_symbolEncodingTypeStats_t stats = + ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, + &prevEntropy->fse, &nextEntropy->fse, + op, oend, + strategy, count, + entropyWorkspace, entropyWkspSize); + FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); + *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2)); + lastCountSize = stats.lastCountSize; + op += stats.size; + longOffsets = stats.longOffsets; } - - /* seqHead : flags for FSE encoding type */ - seqHead = op++; - - /* convert length/distances into codes */ - ZSTD_seqToCodes(seqStorePtr); - /* build CTable for Literal Lengths */ - { unsigned max = MaxLL; - size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ - DEBUGLOG(5, "Building LL table"); - nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode; - LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode, - count, max, mostFrequent, nbSeq, - LLFSELog, prevEntropy->fse.litlengthCTable, - LL_defaultNorm, LL_defaultNormLog, - ZSTD_defaultAllowed, strategy); - assert(set_basic < set_compressed && set_rle < set_compressed); - assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ - { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, - count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, - prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable), - workspace, wkspSize); - FORWARD_IF_ERROR(countSize); - if (LLtype == set_compressed) - lastNCount = op; - op += countSize; - } } - /* build CTable for Offsets */ - { unsigned max = MaxOff; - size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ - /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ - ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; - DEBUGLOG(5, "Building OF table"); - nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode; - Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode, - count, max, mostFrequent, nbSeq, - OffFSELog, prevEntropy->fse.offcodeCTable, - OF_defaultNorm, OF_defaultNormLog, - defaultPolicy, strategy); - assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ - { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, - count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, - prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable), - workspace, wkspSize); - FORWARD_IF_ERROR(countSize); - if (Offtype == set_compressed) - lastNCount = op; - op += countSize; - } } - /* build CTable for MatchLengths */ - { unsigned max = MaxML; - size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ - DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); - nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode; - MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode, - count, max, mostFrequent, nbSeq, - MLFSELog, prevEntropy->fse.matchlengthCTable, - ML_defaultNorm, ML_defaultNormLog, - ZSTD_defaultAllowed, strategy); - assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ - { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, - count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, - prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable), - workspace, wkspSize); - FORWARD_IF_ERROR(countSize); - if (MLtype == set_compressed) - lastNCount = op; - op += countSize; - } } - - *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); { size_t const bitstreamSize = ZSTD_encodeSequences( - op, oend - op, + op, (size_t)(oend - op), CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets, bmi2); - FORWARD_IF_ERROR(bitstreamSize); + FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); op += bitstreamSize; + assert(op <= oend); /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() receives a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. @@ -2613,9 +3005,9 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr, * In this exceedingly rare case, we will simply emit an uncompressed * block, since it isn't worth optimizing. */ - if (lastNCount && (op - lastNCount) < 4) { - /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ - assert(op - lastNCount == 3); + if (lastCountSize && (lastCountSize + bitstreamSize) < 4) { + /* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ + assert(lastCountSize + bitstreamSize == 3); DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " "emitting an uncompressed block."); return 0; @@ -2623,123 +3015,290 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr, } DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart)); - return op - ostart; + return (size_t)(op - ostart); } -MEM_STATIC size_t -ZSTD_compressSequences(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - size_t srcSize, - void* workspace, size_t wkspSize, - int bmi2) -{ - size_t const cSize = ZSTD_compressSequences_internal( - seqStorePtr, prevEntropy, nextEntropy, cctxParams, +static size_t +ZSTD_entropyCompressSeqStore_wExtLitBuffer( + void* dst, size_t dstCapacity, + const void* literals, size_t litSize, + size_t blockSize, + const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* entropyWorkspace, size_t entropyWkspSize, + int bmi2) +{ + size_t const cSize = ZSTD_entropyCompressSeqStore_internal( dst, dstCapacity, - workspace, wkspSize, bmi2); + literals, litSize, + seqStorePtr, prevEntropy, nextEntropy, cctxParams, + entropyWorkspace, entropyWkspSize, bmi2); if (cSize == 0) return 0; /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. */ - if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) + if ((cSize == ERROR(dstSize_tooSmall)) & (blockSize <= dstCapacity)) { + DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity); return 0; /* block not compressed */ - FORWARD_IF_ERROR(cSize); + } + FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed"); /* Check compressibility */ - { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); + { size_t const maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy); if (cSize >= maxCSize) return 0; /* block not compressed */ } - + DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); + /* libzstd decoder before > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly. + * This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above. + */ + assert(cSize < ZSTD_BLOCKSIZE_MAX); return cSize; } +static size_t +ZSTD_entropyCompressSeqStore( + const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + size_t srcSize, + void* entropyWorkspace, size_t entropyWkspSize, + int bmi2) +{ + return ZSTD_entropyCompressSeqStore_wExtLitBuffer( + dst, dstCapacity, + seqStorePtr->litStart, (size_t)(seqStorePtr->lit - seqStorePtr->litStart), + srcSize, + seqStorePtr, + prevEntropy, nextEntropy, + cctxParams, + entropyWorkspace, entropyWkspSize, + bmi2); +} + /* ZSTD_selectBlockCompressor() : * Not static, but internal use only (used by long distance matcher) * assumption : strat is a valid strategy */ -ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode) +ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) { - static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = { + static const ZSTD_BlockCompressor_f blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { { ZSTD_compressBlock_fast /* default for 0 */, ZSTD_compressBlock_fast, - ZSTD_compressBlock_doubleFast, - ZSTD_compressBlock_greedy, - ZSTD_compressBlock_lazy, - ZSTD_compressBlock_lazy2, - ZSTD_compressBlock_btlazy2, - ZSTD_compressBlock_btopt, - ZSTD_compressBlock_btultra, - ZSTD_compressBlock_btultra2 }, + ZSTD_COMPRESSBLOCK_DOUBLEFAST, + ZSTD_COMPRESSBLOCK_GREEDY, + ZSTD_COMPRESSBLOCK_LAZY, + ZSTD_COMPRESSBLOCK_LAZY2, + ZSTD_COMPRESSBLOCK_BTLAZY2, + ZSTD_COMPRESSBLOCK_BTOPT, + ZSTD_COMPRESSBLOCK_BTULTRA, + ZSTD_COMPRESSBLOCK_BTULTRA2 + }, { ZSTD_compressBlock_fast_extDict /* default for 0 */, ZSTD_compressBlock_fast_extDict, - ZSTD_compressBlock_doubleFast_extDict, - ZSTD_compressBlock_greedy_extDict, - ZSTD_compressBlock_lazy_extDict, - ZSTD_compressBlock_lazy2_extDict, - ZSTD_compressBlock_btlazy2_extDict, - ZSTD_compressBlock_btopt_extDict, - ZSTD_compressBlock_btultra_extDict, - ZSTD_compressBlock_btultra_extDict }, + ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT, + ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT, + ZSTD_COMPRESSBLOCK_LAZY_EXTDICT, + ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT, + ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT, + ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT, + ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT, + ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT + }, { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */, ZSTD_compressBlock_fast_dictMatchState, - ZSTD_compressBlock_doubleFast_dictMatchState, - ZSTD_compressBlock_greedy_dictMatchState, - ZSTD_compressBlock_lazy_dictMatchState, - ZSTD_compressBlock_lazy2_dictMatchState, - ZSTD_compressBlock_btlazy2_dictMatchState, - ZSTD_compressBlock_btopt_dictMatchState, - ZSTD_compressBlock_btultra_dictMatchState, - ZSTD_compressBlock_btultra_dictMatchState } + ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE + }, + { NULL /* default for 0 */, + NULL, + NULL, + ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH, + ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH, + ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH, + NULL, + NULL, + NULL, + NULL } }; - ZSTD_blockCompressor selectedCompressor; + ZSTD_BlockCompressor_f selectedCompressor; ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); - assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); - selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; + assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat)); + DEBUGLOG(5, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder); + if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) { + static const ZSTD_BlockCompressor_f rowBasedBlockCompressors[4][3] = { + { + ZSTD_COMPRESSBLOCK_GREEDY_ROW, + ZSTD_COMPRESSBLOCK_LAZY_ROW, + ZSTD_COMPRESSBLOCK_LAZY2_ROW + }, + { + ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW, + ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW, + ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW + }, + { + ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW, + ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW, + ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW + }, + { + ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW, + ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW, + ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW + } + }; + DEBUGLOG(5, "Selecting a row-based matchfinder"); + assert(useRowMatchFinder != ZSTD_ps_auto); + selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy]; + } else { + selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; + } assert(selectedCompressor != NULL); return selectedCompressor; } -static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, +static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr, const BYTE* anchor, size_t lastLLSize) { - memcpy(seqStorePtr->lit, anchor, lastLLSize); + ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } -void ZSTD_resetSeqStore(seqStore_t* ssPtr) +void ZSTD_resetSeqStore(SeqStore_t* ssPtr) { ssPtr->lit = ssPtr->litStart; ssPtr->sequences = ssPtr->sequencesStart; - ssPtr->longLengthID = 0; + ssPtr->longLengthType = ZSTD_llt_none; +} + +/* ZSTD_postProcessSequenceProducerResult() : + * Validates and post-processes sequences obtained through the external matchfinder API: + * - Checks whether nbExternalSeqs represents an error condition. + * - Appends a block delimiter to outSeqs if one is not already present. + * See zstd.h for context regarding block delimiters. + * Returns the number of sequences after post-processing, or an error code. */ +static size_t ZSTD_postProcessSequenceProducerResult( + ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize +) { + RETURN_ERROR_IF( + nbExternalSeqs > outSeqsCapacity, + sequenceProducer_failed, + "External sequence producer returned error code %lu", + (unsigned long)nbExternalSeqs + ); + + RETURN_ERROR_IF( + nbExternalSeqs == 0 && srcSize > 0, + sequenceProducer_failed, + "Got zero sequences from external sequence producer for a non-empty src buffer!" + ); + + if (srcSize == 0) { + ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence)); + return 1; + } + + { + ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1]; + + /* We can return early if lastSeq is already a block delimiter. */ + if (lastSeq.offset == 0 && lastSeq.matchLength == 0) { + return nbExternalSeqs; + } + + /* This error condition is only possible if the external matchfinder + * produced an invalid parse, by definition of ZSTD_sequenceBound(). */ + RETURN_ERROR_IF( + nbExternalSeqs == outSeqsCapacity, + sequenceProducer_failed, + "nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!" + ); + + /* lastSeq is not a block delimiter, so we need to append one. */ + ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence)); + return nbExternalSeqs + 1; + } } -static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) +/* ZSTD_fastSequenceLengthSum() : + * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*. + * Similar to another function in zstd_compress.c (determine_blockSize), + * except it doesn't check for a block delimiter to end summation. + * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P). + * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ +static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) { + size_t matchLenSum, litLenSum, i; + matchLenSum = 0; + litLenSum = 0; + for (i = 0; i < seqBufSize; i++) { + litLenSum += seqBuf[i].litLength; + matchLenSum += seqBuf[i].matchLength; + } + return litLenSum + matchLenSum; +} + +/** + * Function to validate sequences produced by a block compressor. + */ +static void ZSTD_validateSeqStore(const SeqStore_t* seqStore, const ZSTD_compressionParameters* cParams) +{ +#if DEBUGLEVEL >= 1 + const SeqDef* seq = seqStore->sequencesStart; + const SeqDef* const seqEnd = seqStore->sequences; + size_t const matchLenLowerBound = cParams->minMatch == 3 ? 3 : 4; + for (; seq < seqEnd; ++seq) { + const ZSTD_SequenceLength seqLength = ZSTD_getSequenceLength(seqStore, seq); + assert(seqLength.matchLength >= matchLenLowerBound); + (void)seqLength; + (void)matchLenLowerBound; + } +#else + (void)seqStore; + (void)cParams; +#endif +} + +static size_t +ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx, + ZSTD_SequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, + ZSTD_ParamSwitch_e externalRepSearch); + +typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_BuildSeqStore_e; + +static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) { - ZSTD_matchState_t* const ms = &zc->blockState.matchState; - size_t cSize; - DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", - (unsigned)dstCapacity, (unsigned)ms->window.dictLimit, (unsigned)ms->nextToUpdate); + ZSTD_MatchState_t* const ms = &zc->blockState.matchState; + DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize); assert(srcSize <= ZSTD_BLOCKSIZE_MAX); - /* Assert that we have correctly flushed the ctx params into the ms's copy */ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); - - if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { - ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch); - cSize = 0; - goto out; /* don't even attempt compression below a certain srcSize */ + /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding + * additional 1. We need to revisit and change this logic to be more consistent */ + if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { + if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) { + ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); + } else { + ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch); + } + return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */ } ZSTD_resetSeqStore(&(zc->seqStore)); /* required for optimal parser to read stats from dictionary */ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; /* tell the optimal parser how we expect to compress literals */ ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode; - /* a gap between an attached dict and the current window is not safe, * they must remain adjacent, * and when that stops being the case, the dict must be unset */ @@ -2748,10 +3307,10 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, /* limited update after a very long match */ { const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; - const U32 current = (U32)(istart-base); + const U32 curr = (U32)(istart-base); if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */ - if (current > ms->nextToUpdate + 384) - ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384)); + if (curr > ms->nextToUpdate + 384) + ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384)); } /* select and store sequences */ @@ -2762,1506 +3321,4864 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; } if (zc->externSeqStore.pos < zc->externSeqStore.size) { - assert(!zc->appliedParams.ldmParams.enableLdm); + assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); + + /* External matchfinder + LDM is technically possible, just not implemented yet. + * We need to revisit soon and implement it. */ + RETURN_ERROR_IF( + ZSTD_hasExtSeqProd(&zc->appliedParams), + parameter_combination_unsupported, + "Long-distance matching with external sequence producer enabled is not currently supported." + ); + /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, + zc->appliedParams.useRowMatchFinder, src, srcSize); assert(zc->externSeqStore.pos <= zc->externSeqStore.size); - } else if (zc->appliedParams.ldmParams.enableLdm) { - rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0}; + } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { + RawSeqStore_t ldmSeqStore = kNullRawSeqStore; + + /* External matchfinder + LDM is technically possible, just not implemented yet. + * We need to revisit soon and implement it. */ + RETURN_ERROR_IF( + ZSTD_hasExtSeqProd(&zc->appliedParams), + parameter_combination_unsupported, + "Long-distance matching with external sequence producer enabled is not currently supported." + ); ldmSeqStore.seq = zc->ldmSequences; ldmSeqStore.capacity = zc->maxNbLdmSequences; /* Updates ldmSeqStore.size */ FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, &zc->appliedParams.ldmParams, - src, srcSize)); + src, srcSize), ""); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&ldmSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, + zc->appliedParams.useRowMatchFinder, src, srcSize); assert(ldmSeqStore.pos == ldmSeqStore.size); - } else { /* not long range mode */ - ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode); + } else if (ZSTD_hasExtSeqProd(&zc->appliedParams)) { + assert( + zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize) + ); + assert(zc->appliedParams.extSeqProdFunc != NULL); + + { U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog; + + size_t const nbExternalSeqs = (zc->appliedParams.extSeqProdFunc)( + zc->appliedParams.extSeqProdState, + zc->extSeqBuf, + zc->extSeqBufCapacity, + src, srcSize, + NULL, 0, /* dict and dictSize, currently not supported */ + zc->appliedParams.compressionLevel, + windowSize + ); + + size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult( + zc->extSeqBuf, + nbExternalSeqs, + zc->extSeqBufCapacity, + srcSize + ); + + /* Return early if there is no error, since we don't need to worry about last literals */ + if (!ZSTD_isError(nbPostProcessedSeqs)) { + ZSTD_SequencePosition seqPos = {0,0,0}; + size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs); + RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!"); + FORWARD_IF_ERROR( + ZSTD_transferSequences_wBlockDelim( + zc, &seqPos, + zc->extSeqBuf, nbPostProcessedSeqs, + src, srcSize, + zc->appliedParams.searchForExternalRepcodes + ), + "Failed to copy external sequences to seqStore!" + ); + ms->ldmSeqStore = NULL; + DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs); + return ZSTDbss_compress; + } + + /* Propagate the error if fallback is disabled */ + if (!zc->appliedParams.enableMatchFinderFallback) { + return nbPostProcessedSeqs; + } + + /* Fallback to software matchfinder */ + { ZSTD_BlockCompressor_f const blockCompressor = + ZSTD_selectBlockCompressor( + zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode); + ms->ldmSeqStore = NULL; + DEBUGLOG( + 5, + "External sequence producer returned error code %lu. Falling back to internal parser.", + (unsigned long)nbExternalSeqs + ); + lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); + } } + } else { /* not long range mode and no external matchfinder */ + ZSTD_BlockCompressor_f const blockCompressor = ZSTD_selectBlockCompressor( + zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode); + ms->ldmSeqStore = NULL; lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); } { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); } } - - /* encode sequences and literals */ - cSize = ZSTD_compressSequences(&zc->seqStore, - &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, - &zc->appliedParams, - dst, dstCapacity, - srcSize, - zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */, - zc->bmi2); - -out: - if (!ZSTD_isError(cSize) && cSize != 0) { - /* confirm repcodes and entropy tables when emitting a compressed block */ - ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock; - zc->blockState.prevCBlock = zc->blockState.nextCBlock; - zc->blockState.nextCBlock = tmp; - } - /* We check that dictionaries have offset codes available for the first - * block. After the first block, the offcode table might not have large - * enough codes to represent the offsets in the data. - */ - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - - return cSize; + ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams); + return ZSTDbss_compress; } - -/*! ZSTD_compress_frameChunk() : -* Compress a chunk of data into one or multiple blocks. -* All blocks will be terminated, all input will be consumed. -* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. -* Frame is supposed already started (header already produced) -* @return : compressed size, or an error code -*/ -static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - U32 lastFrameChunk) +static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const SeqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM]) { - size_t blockSize = cctx->blockSize; - size_t remaining = srcSize; - const BYTE* ip = (const BYTE*)src; - BYTE* const ostart = (BYTE*)dst; - BYTE* op = ostart; - U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; - assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); - - DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize); - if (cctx->appliedParams.fParams.checksumFlag && srcSize) - XXH64_update(&cctx->xxhState, src, srcSize); + const SeqDef* inSeqs = seqStore->sequencesStart; + const size_t nbInSequences = (size_t)(seqStore->sequences - inSeqs); + const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart); - while (remaining) { - ZSTD_matchState_t* const ms = &cctx->blockState.matchState; - U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); + ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex; + const size_t nbOutSequences = nbInSequences + 1; + size_t nbOutLiterals = 0; + Repcodes_t repcodes; + size_t i; - RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE, - dstSize_tooSmall, - "not enough space to store compressed block"); - if (remaining < blockSize) blockSize = remaining; - - if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) { - U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy); - U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); - ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); - ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); - ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); - ZSTD_reduceIndex(cctx, correction); - if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; - else ms->nextToUpdate -= correction; - /* invalidate dictionaries on overflow correction */ - ms->loadedDictEnd = 0; - ms->dictMatchState = NULL; + /* Bounds check that we have enough space for every input sequence + * and the block delimiter + */ + assert(seqCollector->seqIndex <= seqCollector->maxSequences); + RETURN_ERROR_IF( + nbOutSequences > (size_t)(seqCollector->maxSequences - seqCollector->seqIndex), + dstSize_tooSmall, + "Not enough space to copy sequences"); + + ZSTD_memcpy(&repcodes, prevRepcodes, sizeof(repcodes)); + for (i = 0; i < nbInSequences; ++i) { + U32 rawOffset; + outSeqs[i].litLength = inSeqs[i].litLength; + outSeqs[i].matchLength = inSeqs[i].mlBase + MINMATCH; + outSeqs[i].rep = 0; + + /* Handle the possible single length >= 64K + * There can only be one because we add MINMATCH to every match length, + * and blocks are at most 128K. + */ + if (i == seqStore->longLengthPos) { + if (seqStore->longLengthType == ZSTD_llt_literalLength) { + outSeqs[i].litLength += 0x10000; + } else if (seqStore->longLengthType == ZSTD_llt_matchLength) { + outSeqs[i].matchLength += 0x10000; + } } - ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); - - /* Ensure hash/chain table insertion resumes no sooner than lowlimit */ - if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; - - { size_t cSize = ZSTD_compressBlock_internal(cctx, - op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, - ip, blockSize); - FORWARD_IF_ERROR(cSize); - - if (cSize == 0) { /* block is not compressible */ - cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cSize); + /* Determine the raw offset given the offBase, which may be a repcode. */ + if (OFFBASE_IS_REPCODE(inSeqs[i].offBase)) { + const U32 repcode = OFFBASE_TO_REPCODE(inSeqs[i].offBase); + assert(repcode > 0); + outSeqs[i].rep = repcode; + if (outSeqs[i].litLength != 0) { + rawOffset = repcodes.rep[repcode - 1]; } else { - U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); - MEM_writeLE24(op, cBlockHeader24); - cSize += ZSTD_blockHeaderSize; + if (repcode == 3) { + assert(repcodes.rep[0] > 1); + rawOffset = repcodes.rep[0] - 1; + } else { + rawOffset = repcodes.rep[repcode]; + } } + } else { + rawOffset = OFFBASE_TO_OFFSET(inSeqs[i].offBase); + } + outSeqs[i].offset = rawOffset; - ip += blockSize; - assert(remaining >= blockSize); - remaining -= blockSize; - op += cSize; - assert(dstCapacity >= cSize); - dstCapacity -= cSize; - DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", - (unsigned)cSize); - } } - - if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; - return (size_t)(op-ostart); -} + /* Update repcode history for the sequence */ + ZSTD_updateRep(repcodes.rep, + inSeqs[i].offBase, + inSeqs[i].litLength == 0); + nbOutLiterals += outSeqs[i].litLength; + } + /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0. + * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker + * for the block boundary, according to the API. + */ + assert(nbInLiterals >= nbOutLiterals); + { + const size_t lastLLSize = nbInLiterals - nbOutLiterals; + outSeqs[nbInSequences].litLength = (U32)lastLLSize; + outSeqs[nbInSequences].matchLength = 0; + outSeqs[nbInSequences].offset = 0; + assert(nbOutSequences == nbInSequences + 1); + } + seqCollector->seqIndex += nbOutSequences; + assert(seqCollector->seqIndex <= seqCollector->maxSequences); -static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, - ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID) -{ BYTE* const op = (BYTE*)dst; - U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ - U32 const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ - U32 const checksumFlag = params.fParams.checksumFlag>0; - U32 const windowSize = (U32)1 << params.cParams.windowLog; - U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); - BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); - U32 const fcsCode = params.fParams.contentSizeFlag ? - (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ - BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); - size_t pos=0; + return 0; +} - assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)); - RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall); - DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", - !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode); +size_t ZSTD_sequenceBound(size_t srcSize) { + const size_t maxNbSeq = (srcSize / ZSTD_MINMATCH_MIN) + 1; + const size_t maxNbDelims = (srcSize / ZSTD_BLOCKSIZE_MAX_MIN) + 1; + return maxNbSeq + maxNbDelims; +} - if (params.format == ZSTD_f_zstd1) { - MEM_writeLE32(dst, ZSTD_MAGICNUMBER); - pos = 4; +size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, + size_t outSeqsSize, const void* src, size_t srcSize) +{ + const size_t dstCapacity = ZSTD_compressBound(srcSize); + void* dst; /* Make C90 happy. */ + SeqCollector seqCollector; + { + int targetCBlockSize; + FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_targetCBlockSize, &targetCBlockSize), ""); + RETURN_ERROR_IF(targetCBlockSize != 0, parameter_unsupported, "targetCBlockSize != 0"); } - op[pos++] = frameHeaderDescriptionByte; - if (!singleSegment) op[pos++] = windowLogByte; - switch(dictIDSizeCode) { - default: assert(0); /* impossible */ - case 0 : break; - case 1 : op[pos] = (BYTE)(dictID); pos++; break; - case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; - case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; + int nbWorkers; + FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_nbWorkers, &nbWorkers), ""); + RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0"); } - switch(fcsCode) + + dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); + RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!"); + + seqCollector.collectSequences = 1; + seqCollector.seqStart = outSeqs; + seqCollector.seqIndex = 0; + seqCollector.maxSequences = outSeqsSize; + zc->seqCollector = seqCollector; + { - default: assert(0); /* impossible */ - case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; - case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; - case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; - case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; + const size_t ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); + ZSTD_customFree(dst, ZSTD_defaultCMem); + FORWARD_IF_ERROR(ret, "ZSTD_compress2 failed"); } - return pos; + assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize)); + return zc->seqCollector.seqIndex; } -/* ZSTD_writeLastEmptyBlock() : - * output an empty Block with end-of-frame mark to complete a frame - * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) - * or an error code if `dstCapacity` is too small (stage != ZSTDcs_init, stage_wrong); - RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm, - parameter_unsupported); - cctx->externSeqStore.seq = seq; - cctx->externSeqStore.size = nbSeq; - cctx->externSeqStore.capacity = nbSeq; - cctx->externSeqStore.pos = 0; - return 0; -} + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); + size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart); + return nbSeqs < 4 && nbLits < 10; +} -static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - U32 frame, U32 lastFrameChunk) +static void +ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) { - ZSTD_matchState_t* const ms = &cctx->blockState.matchState; - size_t fhSize = 0; + ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock; + bs->prevCBlock = bs->nextCBlock; + bs->nextCBlock = tmp; +} - DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", - cctx->stage, (unsigned)srcSize); - RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong, - "missing init (ZSTD_compressBegin)"); +/* Writes the block header */ +static void +writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) +{ + U32 const cBlockHeader = cSize == 1 ? + lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : + lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); + MEM_writeLE24(op, cBlockHeader); + DEBUGLOG(5, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock); +} - if (frame && (cctx->stage==ZSTDcs_init)) { - fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, - cctx->pledgedSrcSizePlusOne-1, cctx->dictID); - FORWARD_IF_ERROR(fhSize); - assert(fhSize <= dstCapacity); - dstCapacity -= fhSize; - dst = (char*)dst + fhSize; - cctx->stage = ZSTDcs_ongoing; - } +/** ZSTD_buildBlockEntropyStats_literals() : + * Builds entropy for the literals. + * Stores literals block type (raw, rle, compressed, repeat) and + * huffman description table to hufMetadata. + * Requires ENTROPY_WORKSPACE_SIZE workspace + * @return : size of huffman description table, or an error code + */ +static size_t +ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + const int literalsCompressionIsDisabled, + void* workspace, size_t wkspSize, + int hufFlags) +{ + BYTE* const wkspStart = (BYTE*)workspace; + BYTE* const wkspEnd = wkspStart + wkspSize; + BYTE* const countWkspStart = wkspStart; + unsigned* const countWksp = (unsigned*)workspace; + const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); + BYTE* const nodeWksp = countWkspStart + countWkspSize; + const size_t nodeWkspSize = (size_t)(wkspEnd - nodeWksp); + unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; + unsigned huffLog = LitHufLog; + HUF_repeat repeat = prevHuf->repeatMode; + DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize); - if (!srcSize) return fhSize; /* do not generate an empty block if no input */ + /* Prepare nextEntropy assuming reusing the existing table */ + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - if (!ZSTD_window_update(&ms->window, src, srcSize)) { - ms->nextToUpdate = ms->window.dictLimit; - } - if (cctx->appliedParams.ldmParams.enableLdm) { - ZSTD_window_update(&cctx->ldmState.window, src, srcSize); + if (literalsCompressionIsDisabled) { + DEBUGLOG(5, "set_basic - disabled"); + hufMetadata->hType = set_basic; + return 0; } - if (!frame) { - /* overflow check and correction for block mode */ - if (ZSTD_window_needOverflowCorrection(ms->window, (const char*)src + srcSize)) { - U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy); - U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, 1 << cctx->appliedParams.cParams.windowLog, src); - ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); - ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); - ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); - ZSTD_reduceIndex(cctx, correction); - if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; - else ms->nextToUpdate -= correction; - ms->loadedDictEnd = 0; - ms->dictMatchState = NULL; + /* small ? don't even attempt compression (speed opt) */ +#ifndef COMPRESS_LITERALS_SIZE_MIN +# define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */ +#endif + { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; + if (srcSize <= minLitSize) { + DEBUGLOG(5, "set_basic - too small"); + hufMetadata->hType = set_basic; + return 0; + } } + + /* Scan input and build symbol stats */ + { size_t const largest = + HIST_count_wksp (countWksp, &maxSymbolValue, + (const BYTE*)src, srcSize, + workspace, wkspSize); + FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); + if (largest == srcSize) { + /* only one literal symbol */ + DEBUGLOG(5, "set_rle"); + hufMetadata->hType = set_rle; + return 0; } + if (largest <= (srcSize >> 7)+4) { + /* heuristic: likely not compressible */ + DEBUGLOG(5, "set_basic - no gain"); + hufMetadata->hType = set_basic; + return 0; + } } + + /* Validate the previous Huffman table */ + if (repeat == HUF_repeat_check + && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { + repeat = HUF_repeat_none; } - DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize); - { size_t const cSize = frame ? - ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : - ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize); - FORWARD_IF_ERROR(cSize); - cctx->consumedSrcSize += srcSize; - cctx->producedCSize += (cSize + fhSize); - assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); - if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ - ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); - RETURN_ERROR_IF( - cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne, - srcSize_wrong, - "error : pledgedSrcSize = %u, while realSrcSize >= %u", - (unsigned)cctx->pledgedSrcSizePlusOne-1, - (unsigned)cctx->consumedSrcSize); + /* Build Huffman Tree */ + ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags); + assert(huffLog <= LitHufLog); + { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, + maxSymbolValue, huffLog, + nodeWksp, nodeWkspSize); + FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); + huffLog = (U32)maxBits; + } + { /* Build and write the CTable */ + size_t const newCSize = HUF_estimateCompressedSize( + (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); + size_t const hSize = HUF_writeCTable_wksp( + hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), + (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, + nodeWksp, nodeWkspSize); + /* Check against repeating the previous CTable */ + if (repeat != HUF_repeat_none) { + size_t const oldCSize = HUF_estimateCompressedSize( + (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); + if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { + DEBUGLOG(5, "set_repeat - smaller"); + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + hufMetadata->hType = set_repeat; + return 0; + } } + if (newCSize + hSize >= srcSize) { + DEBUGLOG(5, "set_basic - no gains"); + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + hufMetadata->hType = set_basic; + return 0; } - return cSize + fhSize; + DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); + hufMetadata->hType = set_compressed; + nextHuf->repeatMode = HUF_repeat_check; + return hSize; } } -size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) + +/* ZSTD_buildDummySequencesStatistics(): + * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic, + * and updates nextEntropy to the appropriate repeatMode. + */ +static ZSTD_symbolEncodingTypeStats_t +ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) { - DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize); - return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); + ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0}; + nextEntropy->litlength_repeatMode = FSE_repeat_none; + nextEntropy->offcode_repeatMode = FSE_repeat_none; + nextEntropy->matchlength_repeatMode = FSE_repeat_none; + return stats; } - -size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) +/** ZSTD_buildBlockEntropyStats_sequences() : + * Builds entropy for the sequences. + * Stores symbol compression modes and fse table to fseMetadata. + * Requires ENTROPY_WORKSPACE_SIZE wksp. + * @return : size of fse tables or error code */ +static size_t +ZSTD_buildBlockEntropyStats_sequences( + const SeqStore_t* seqStorePtr, + const ZSTD_fseCTables_t* prevEntropy, + ZSTD_fseCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize) { - ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; - assert(!ZSTD_checkCParams(cParams)); - return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); + ZSTD_strategy const strategy = cctxParams->cParams.strategy; + size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + BYTE* const ostart = fseMetadata->fseTablesBuffer; + BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); + BYTE* op = ostart; + unsigned* countWorkspace = (unsigned*)workspace; + unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1); + size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace); + ZSTD_symbolEncodingTypeStats_t stats; + + DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq); + stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, + prevEntropy, nextEntropy, op, oend, + strategy, countWorkspace, + entropyWorkspace, entropyWorkspaceSize) + : ZSTD_buildDummySequencesStatistics(nextEntropy); + FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); + fseMetadata->llType = (SymbolEncodingType_e) stats.LLtype; + fseMetadata->ofType = (SymbolEncodingType_e) stats.Offtype; + fseMetadata->mlType = (SymbolEncodingType_e) stats.MLtype; + fseMetadata->lastCountSize = stats.lastCountSize; + return stats.size; +} + + +/** ZSTD_buildBlockEntropyStats() : + * Builds entropy for the block. + * Requires workspace size ENTROPY_WORKSPACE_SIZE + * @return : 0 on success, or an error code + * Note : also employed in superblock + */ +size_t ZSTD_buildBlockEntropyStats( + const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize) +{ + size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); + int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD); + int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0; + + entropyMetadata->hufMetadata.hufDesSize = + ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, + &prevEntropy->huf, &nextEntropy->huf, + &entropyMetadata->hufMetadata, + ZSTD_literalsCompressionIsDisabled(cctxParams), + workspace, wkspSize, hufFlags); + + FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed"); + entropyMetadata->fseMetadata.fseTablesSize = + ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, + &prevEntropy->fse, &nextEntropy->fse, + cctxParams, + &entropyMetadata->fseMetadata, + workspace, wkspSize); + FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed"); + return 0; } -size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - size_t const blockSizeMax = ZSTD_getBlockSize(cctx); - RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong); +/* Returns the size estimate for the literals section (header + content) of a block */ +static size_t +ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, + const ZSTD_hufCTables_t* huf, + const ZSTD_hufCTablesMetadata_t* hufMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) +{ + unsigned* const countWksp = (unsigned*)workspace; + unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; + size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB); + U32 singleStream = litSize < 256; + + if (hufMetadata->hType == set_basic) return litSize; + else if (hufMetadata->hType == set_rle) return 1; + else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { + size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); + if (ZSTD_isError(largest)) return litSize; + { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); + if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; + if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */ + return cLitSizeEstimate + literalSectionHeaderSize; + } } + assert(0); /* impossible */ + return 0; +} - return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); +/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ +static size_t +ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type, + const BYTE* codeTable, size_t nbSeq, unsigned maxCode, + const FSE_CTable* fseCTable, + const U8* additionalBits, + short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, + void* workspace, size_t wkspSize) +{ + unsigned* const countWksp = (unsigned*)workspace; + const BYTE* ctp = codeTable; + const BYTE* const ctStart = ctp; + const BYTE* const ctEnd = ctStart + nbSeq; + size_t cSymbolTypeSizeEstimateInBits = 0; + unsigned max = maxCode; + + HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ + if (type == set_basic) { + /* We selected this encoding type, so it must be valid. */ + assert(max <= defaultMax); + (void)defaultMax; + cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max); + } else if (type == set_rle) { + cSymbolTypeSizeEstimateInBits = 0; + } else if (type == set_compressed || type == set_repeat) { + cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); + } + if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) { + return nbSeq * 10; + } + while (ctp < ctEnd) { + if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; + else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ + ctp++; + } + return cSymbolTypeSizeEstimateInBits >> 3; } -/*! ZSTD_loadDictionaryContent() : - * @return : 0, or an error code +/* Returns the size estimate for the sequences section (header + content) of a block */ +static size_t +ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_fseCTables_t* fseTables, + const ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) +{ + size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ); + size_t cSeqSizeEstimate = 0; + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff, + fseTables->offcodeCTable, NULL, + OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, + workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL, + fseTables->litlengthCTable, LL_bits, + LL_defaultNorm, LL_defaultNormLog, MaxLL, + workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML, + fseTables->matchlengthCTable, ML_bits, + ML_defaultNorm, ML_defaultNormLog, MaxML, + workspace, wkspSize); + if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; + return cSeqSizeEstimate + sequencesSectionHeaderSize; +} + +/* Returns the size estimate for a given stream of literals, of, ll, ml */ +static size_t +ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, + const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_entropyCTables_t* entropy, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize, + int writeLitEntropy, int writeSeqEntropy) +{ + size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize, + &entropy->huf, &entropyMetadata->hufMetadata, + workspace, wkspSize, writeLitEntropy); + size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, + nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, + workspace, wkspSize, writeSeqEntropy); + return seqSize + literalsSize + ZSTD_blockHeaderSize; +} + +/* Builds entropy statistics and uses them for blocksize estimation. + * + * @return: estimated compressed size of the seqStore, or a zstd error. */ -static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, - ZSTD_CCtx_params const* params, - const void* src, size_t srcSize, - ZSTD_dictTableLoadMethod_e dtlm) -{ - const BYTE* const ip = (const BYTE*) src; - const BYTE* const iend = ip + srcSize; - - ZSTD_window_update(&ms->window, src, srcSize); - ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); - - /* Assert that we the ms params match the params we're being given */ - ZSTD_assertEqualCParams(params->cParams, ms->cParams); - - if (srcSize <= HASH_READ_SIZE) return 0; +static size_t +ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(SeqStore_t* seqStore, ZSTD_CCtx* zc) +{ + ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata; + DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()"); + FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore, + &zc->blockState.prevCBlock->entropy, + &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + entropyMetadata, + zc->tmpWorkspace, zc->tmpWkspSize), ""); + return ZSTD_estimateBlockSize( + seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), + seqStore->ofCode, seqStore->llCode, seqStore->mlCode, + (size_t)(seqStore->sequences - seqStore->sequencesStart), + &zc->blockState.nextCBlock->entropy, + entropyMetadata, + zc->tmpWorkspace, zc->tmpWkspSize, + (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1); +} + +/* Returns literals bytes represented in a seqStore */ +static size_t ZSTD_countSeqStoreLiteralsBytes(const SeqStore_t* const seqStore) +{ + size_t literalsBytes = 0; + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); + size_t i; + for (i = 0; i < nbSeqs; ++i) { + SeqDef const seq = seqStore->sequencesStart[i]; + literalsBytes += seq.litLength; + if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) { + literalsBytes += 0x10000; + } } + return literalsBytes; +} - switch(params->cParams.strategy) - { - case ZSTD_fast: - ZSTD_fillHashTable(ms, iend, dtlm); - break; - case ZSTD_dfast: - ZSTD_fillDoubleHashTable(ms, iend, dtlm); - break; +/* Returns match bytes represented in a seqStore */ +static size_t ZSTD_countSeqStoreMatchBytes(const SeqStore_t* const seqStore) +{ + size_t matchBytes = 0; + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); + size_t i; + for (i = 0; i < nbSeqs; ++i) { + SeqDef seq = seqStore->sequencesStart[i]; + matchBytes += seq.mlBase + MINMATCH; + if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) { + matchBytes += 0x10000; + } } + return matchBytes; +} - case ZSTD_greedy: - case ZSTD_lazy: - case ZSTD_lazy2: - if (srcSize >= HASH_READ_SIZE) - ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE); - break; +/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx). + * Stores the result in resultSeqStore. + */ +static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore, + const SeqStore_t* originalSeqStore, + size_t startIdx, size_t endIdx) +{ + *resultSeqStore = *originalSeqStore; + if (startIdx > 0) { + resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx; + resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + } - case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ - case ZSTD_btopt: - case ZSTD_btultra: - case ZSTD_btultra2: - if (srcSize >= HASH_READ_SIZE) - ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend); - break; + /* Move longLengthPos into the correct position if necessary */ + if (originalSeqStore->longLengthType != ZSTD_llt_none) { + if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) { + resultSeqStore->longLengthType = ZSTD_llt_none; + } else { + resultSeqStore->longLengthPos -= (U32)startIdx; + } + } + resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx; + resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; + if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) { + /* This accounts for possible last literals if the derived chunk reaches the end of the block */ + assert(resultSeqStore->lit == originalSeqStore->lit); + } else { + size_t const literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + resultSeqStore->lit = resultSeqStore->litStart + literalsBytes; + } + resultSeqStore->llCode += startIdx; + resultSeqStore->mlCode += startIdx; + resultSeqStore->ofCode += startIdx; +} - default: - assert(0); /* not possible : not a valid strategy id */ +/** + * Returns the raw offset represented by the combination of offBase, ll0, and repcode history. + * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq(). + */ +static U32 +ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0) +{ + U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; /* [ 0 - 3 ] */ + assert(OFFBASE_IS_REPCODE(offBase)); + if (adjustedRepCode == ZSTD_REP_NUM) { + assert(ll0); + /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 + * This is only valid if it results in a valid offset value, aka > 0. + * Note : it may happen that `rep[0]==1` in exceptional circumstances. + * In which case this function will return 0, which is an invalid offset. + * It's not an issue though, since this value will be + * compared and discarded within ZSTD_seqStore_resolveOffCodes(). + */ + return rep[0] - 1; } + return rep[adjustedRepCode]; +} - ms->nextToUpdate = (U32)(iend - ms->window.base); - return 0; +/** + * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise + * due to emission of RLE/raw blocks that disturb the offset history, + * and replaces any repcodes within the seqStore that may be invalid. + * + * dRepcodes are updated as would be on the decompression side. + * cRepcodes are updated exactly in accordance with the seqStore. + * + * Note : this function assumes seq->offBase respects the following numbering scheme : + * 0 : invalid + * 1-3 : repcode 1-3 + * 4+ : real_offset+3 + */ +static void +ZSTD_seqStore_resolveOffCodes(Repcodes_t* const dRepcodes, Repcodes_t* const cRepcodes, + const SeqStore_t* const seqStore, U32 const nbSeq) +{ + U32 idx = 0; + U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq; + for (; idx < nbSeq; ++idx) { + SeqDef* const seq = seqStore->sequencesStart + idx; + U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx); + U32 const offBase = seq->offBase; + assert(offBase > 0); + if (OFFBASE_IS_REPCODE(offBase)) { + U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0); + U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0); + /* Adjust simulated decompression repcode history if we come across a mismatch. Replace + * the repcode with the offset it actually references, determined by the compression + * repcode history. + */ + if (dRawOffset != cRawOffset) { + seq->offBase = OFFSET_TO_OFFBASE(cRawOffset); + } + } + /* Compression repcode history is always updated with values directly from the unmodified seqStore. + * Decompression repcode history may use modified seq->offset value taken from compression repcode history. + */ + ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0); + ZSTD_updateRep(cRepcodes->rep, offBase, ll0); + } } +/* ZSTD_compressSeqStore_singleBlock(): + * Compresses a seqStore into a block with a block header, into the buffer dst. + * + * Returns the total size of that block (including header) or a ZSTD error code. + */ +static size_t +ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, + const SeqStore_t* const seqStore, + Repcodes_t* const dRep, Repcodes_t* const cRep, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 lastBlock, U32 isPartition) +{ + const U32 rleMaxLength = 25; + BYTE* op = (BYTE*)dst; + const BYTE* ip = (const BYTE*)src; + size_t cSize; + size_t cSeqsSize; + + /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ + Repcodes_t const dRepOriginal = *dRep; + DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock"); + if (isPartition) + ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart)); + + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit"); + cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore, + &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, + srcSize, + zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */, + zc->bmi2); + FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!"); + + if (!zc->isFirstBlock && + cSeqsSize < rleMaxLength && + ZSTD_isRLE((BYTE const*)src, srcSize)) { + /* We don't want to emit our first block as a RLE even if it qualifies because + * doing so will cause the decoder (cli only) to throw a "should consume all input error." + * This is only an issue for zstd <= v1.4.3 + */ + cSeqsSize = 1; + } -/* Dictionaries that assign zero probability to symbols that show up causes problems - when FSE encoding. Refuse dictionaries that assign zero probability to symbols - that we may encounter during compression. - NOTE: This behavior is not standard and could be improved in the future. */ -static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { - U32 s; - RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted); - for (s = 0; s <= maxSymbolValue; ++s) { - RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted); + /* Sequence collection not supported when block splitting */ + if (zc->seqCollector.collectSequences) { + FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep), "copyBlockSequences failed"); + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + return 0; } - return 0; -} + if (cSeqsSize == 0) { + cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); + FORWARD_IF_ERROR(cSize, "Nocompress block failed"); + DEBUGLOG(5, "Writing out nocompress block, size: %zu", cSize); + *dRep = dRepOriginal; /* reset simulated decompression repcode history */ + } else if (cSeqsSize == 1) { + cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock); + FORWARD_IF_ERROR(cSize, "RLE compress block failed"); + DEBUGLOG(5, "Writing out RLE block, size: %zu", cSize); + *dRep = dRepOriginal; /* reset simulated decompression repcode history */ + } else { + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + writeBlockHeader(op, cSeqsSize, srcSize, lastBlock); + cSize = ZSTD_blockHeaderSize + cSeqsSize; + DEBUGLOG(5, "Writing out compressed block, size: %zu", cSize); + } -/* Dictionary format : - * See : - * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format - */ -/*! ZSTD_loadZstdDictionary() : - * @return : dictID, or an error code - * assumptions : magic number supposed already checked - * dictSize supposed > 8 - */ -static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, - ZSTD_matchState_t* ms, - ZSTD_CCtx_params const* params, - const void* dict, size_t dictSize, - ZSTD_dictTableLoadMethod_e dtlm, - void* workspace) -{ - const BYTE* dictPtr = (const BYTE*)dict; - const BYTE* const dictEnd = dictPtr + dictSize; - short offcodeNCount[MaxOff+1]; - unsigned offcodeMaxValue = MaxOff; - size_t dictID; + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1< 8); - assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY); + return cSize; +} - dictPtr += 4; /* skip magic number */ - dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr); - dictPtr += 4; +/* Struct to keep track of where we are in our recursive calls. */ +typedef struct { + U32* splitLocations; /* Array of split indices */ + size_t idx; /* The current index within splitLocations being worked on */ +} seqStoreSplits; - { unsigned maxSymbolValue = 255; - size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr); - RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted); - RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted); - dictPtr += hufHeaderSize; - } +#define MIN_SEQUENCES_BLOCK_SPLITTING 300 - { unsigned offcodeLog; - size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); - RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted); - RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted); - /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ - /* fill all offset symbols to avoid garbage at end of table */ - RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( - bs->entropy.fse.offcodeCTable, - offcodeNCount, MaxOff, offcodeLog, - workspace, HUF_WORKSPACE_SIZE)), - dictionary_corrupted); - dictPtr += offcodeHeaderSize; +/* Helper function to perform the recursive search for block splits. + * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. + * If advantageous to split, then we recurse down the two sub-blocks. + * If not, or if an error occurred in estimation, then we do not recurse. + * + * Note: The recursion depth is capped by a heuristic minimum number of sequences, + * defined by MIN_SEQUENCES_BLOCK_SPLITTING. + * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). + * In practice, recursion depth usually doesn't go beyond 4. + * + * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. + * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize + * maximum of 128 KB, this value is actually impossible to reach. + */ +static void +ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, + ZSTD_CCtx* zc, const SeqStore_t* origSeqStore) +{ + SeqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; + SeqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; + SeqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; + size_t estimatedOriginalSize; + size_t estimatedFirstHalfSize; + size_t estimatedSecondHalfSize; + size_t midIdx = (startIdx + endIdx)/2; + + DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); + assert(endIdx >= startIdx); + if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) { + DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx); + return; } - - { short matchlengthNCount[MaxML+1]; - unsigned matchlengthMaxValue = MaxML, matchlengthLog; - size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); - RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted); - RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted); - /* Every match length code must have non-zero probability */ - FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML)); - RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( - bs->entropy.fse.matchlengthCTable, - matchlengthNCount, matchlengthMaxValue, matchlengthLog, - workspace, HUF_WORKSPACE_SIZE)), - dictionary_corrupted); - dictPtr += matchlengthHeaderSize; + ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); + ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); + ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); + estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc); + estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc); + estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); + DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", + estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize); + if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) { + return; } + if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) { + DEBUGLOG(5, "split decided at seqNb:%zu", midIdx); + ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore); + splits->splitLocations[splits->idx] = (U32)midIdx; + splits->idx++; + ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore); + } +} - { short litlengthNCount[MaxLL+1]; - unsigned litlengthMaxValue = MaxLL, litlengthLog; - size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); - RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted); - RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted); - /* Every literal length code must have non-zero probability */ - FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL)); - RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( - bs->entropy.fse.litlengthCTable, - litlengthNCount, litlengthMaxValue, litlengthLog, - workspace, HUF_WORKSPACE_SIZE)), - dictionary_corrupted); - dictPtr += litlengthHeaderSize; +/* Base recursive function. + * Populates a table with intra-block partition indices that can improve compression ratio. + * + * @return: number of splits made (which equals the size of the partition table - 1). + */ +static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) +{ + seqStoreSplits splits; + splits.splitLocations = partitions; + splits.idx = 0; + if (nbSeq <= 4) { + DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq); + /* Refuse to try and split anything with less than 4 sequences */ + return 0; } + ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore); + splits.splitLocations[splits.idx] = nbSeq; + DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1); + return splits.idx; +} - RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted); - bs->rep[0] = MEM_readLE32(dictPtr+0); - bs->rep[1] = MEM_readLE32(dictPtr+4); - bs->rep[2] = MEM_readLE32(dictPtr+8); - dictPtr += 12; +/* ZSTD_compressBlock_splitBlock(): + * Attempts to split a given block into multiple blocks to improve compression ratio. + * + * Returns combined size of all blocks (which includes headers), or a ZSTD error code. + */ +static size_t +ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t blockSize, + U32 lastBlock, U32 nbSeq) +{ + size_t cSize = 0; + const BYTE* ip = (const BYTE*)src; + BYTE* op = (BYTE*)dst; + size_t i = 0; + size_t srcBytesTotal = 0; + U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ + SeqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore; + SeqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore; + size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); + + /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history + * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two + * separate repcode histories that simulate repcode history on compression and decompression side, + * and use the histories to determine whether we must replace a particular repcode with its raw offset. + * + * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed + * or RLE. This allows us to retrieve the offset value that an invalid repcode references within + * a nocompress/RLE block. + * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use + * the replacement offset value rather than the original repcode to update the repcode history. + * dRep also will be the final repcode history sent to the next block. + * + * See ZSTD_seqStore_resolveOffCodes() for more details. + */ + Repcodes_t dRep; + Repcodes_t cRep; + ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t)); + ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t)); + ZSTD_memset(nextSeqStore, 0, sizeof(SeqStore_t)); + + DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", + (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, + (unsigned)zc->blockState.matchState.nextToUpdate); + + if (numSplits == 0) { + size_t cSizeSingleBlock = + ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, + &dRep, &cRep, + op, dstCapacity, + ip, blockSize, + lastBlock, 0 /* isPartition */); + FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!"); + DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits"); + assert(zc->blockSizeMax <= ZSTD_BLOCKSIZE_MAX); + assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize); + return cSizeSingleBlock; + } - { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); - U32 offcodeMax = MaxOff; - if (dictContentSize <= ((U32)-1) - 128 KB) { - U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ - offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ + ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); + for (i = 0; i <= numSplits; ++i) { + size_t cSizeChunk; + U32 const lastPartition = (i == numSplits); + U32 lastBlockEntireSrc = 0; + + size_t srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); + srcBytesTotal += srcBytes; + if (lastPartition) { + /* This is the final partition, need to account for possible last literals */ + srcBytes += blockSize - srcBytesTotal; + lastBlockEntireSrc = lastBlock; + } else { + ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]); } - /* All offset values <= dictContentSize + 128 KB must be representable */ - FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); - /* All repCodes must be <= dictContentSize and != 0*/ - { U32 u; - for (u=0; u<3; u++) { - RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted); - RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted); - } } - bs->entropy.huf.repeatMode = HUF_repeat_valid; - bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid; - bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid; - bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid; - FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm)); - return dictID; + cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore, + &dRep, &cRep, + op, dstCapacity, + ip, srcBytes, + lastBlockEntireSrc, 1 /* isPartition */); + DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size", + ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); + FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!"); + + ip += srcBytes; + op += cSizeChunk; + dstCapacity -= cSizeChunk; + cSize += cSizeChunk; + *currSeqStore = *nextSeqStore; + assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize); } + /* cRep and dRep may have diverged during the compression. + * If so, we use the dRep repcodes for the next block. + */ + ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(Repcodes_t)); + return cSize; } -/** ZSTD_compress_insertDictionary() : -* @return : dictID, or an error code */ static size_t -ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, - ZSTD_matchState_t* ms, - const ZSTD_CCtx_params* params, - const void* dict, size_t dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_dictTableLoadMethod_e dtlm, - void* workspace) +ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, U32 lastBlock) { - DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); - if ((dict==NULL) || (dictSize<=8)) return 0; - - ZSTD_reset_compressedBlockState(bs); - - /* dict restricted modes */ - if (dictContentType == ZSTD_dct_rawContent) - return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm); - - if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { - if (dictContentType == ZSTD_dct_auto) { - DEBUGLOG(4, "raw content dictionary detected"); - return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm); + U32 nbSeq; + size_t cSize; + DEBUGLOG(5, "ZSTD_compressBlock_splitBlock"); + assert(zc->appliedParams.postBlockSplitter == ZSTD_ps_enable); + + { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); + FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); + if (bss == ZSTDbss_noCompress) { + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); + cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); + FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); + DEBUGLOG(5, "ZSTD_compressBlock_splitBlock: Nocompress block"); + return cSize; } - RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong); - assert(0); /* impossible */ + nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart); } - /* dict as full zstd dictionary */ - return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace); + cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq); + FORWARD_IF_ERROR(cSize, "Splitting blocks failed!"); + return cSize; } -/*! ZSTD_compressBegin_internal() : - * @return : 0, or an error code */ -static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, - const void* dict, size_t dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_dictTableLoadMethod_e dtlm, - const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, U64 pledgedSrcSize, - ZSTD_buffered_policy_e zbuff) +static size_t +ZSTD_compressBlock_internal(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, U32 frame) { - DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog); - /* params are supposed to be fully validated at this point */ - assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); - assert(!((dict) && (cdict))); /* either dict or cdict, not both */ - - if (cdict && cdict->dictContentSize>0) { - return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff); + /* This is an estimated upper bound for the length of an rle block. + * This isn't the actual upper bound. + * Finding the real threshold needs further investigation. + */ + const U32 rleMaxLength = 25; + size_t cSize; + const BYTE* ip = (const BYTE*)src; + BYTE* op = (BYTE*)dst; + DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", + (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, + (unsigned)zc->blockState.matchState.nextToUpdate); + + { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); + FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); + if (bss == ZSTDbss_noCompress) { + RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); + cSize = 0; + goto out; + } } - FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, - ZSTDcrp_continue, zbuff) ); - { size_t const dictID = ZSTD_compress_insertDictionary( - cctx->blockState.prevCBlock, &cctx->blockState.matchState, - ¶ms, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace); - FORWARD_IF_ERROR(dictID); - assert(dictID <= UINT_MAX); - cctx->dictID = (U32)dictID; + if (zc->seqCollector.collectSequences) { + FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep), "copyBlockSequences failed"); + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + return 0; } - return 0; -} -size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, - const void* dict, size_t dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_dictTableLoadMethod_e dtlm, - const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, - unsigned long long pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog); - /* compression parameters verification and optimization */ - FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) ); - return ZSTD_compressBegin_internal(cctx, - dict, dictSize, dictContentType, dtlm, - cdict, - params, pledgedSrcSize, - ZSTDb_not_buffered); + /* encode sequences and literals */ + cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore, + &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + dst, dstCapacity, + srcSize, + zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */, + zc->bmi2); + + if (frame && + /* We don't want to emit our first block as a RLE even if it qualifies because + * doing so will cause the decoder (cli only) to throw a "should consume all input error." + * This is only an issue for zstd <= v1.4.3 + */ + !zc->isFirstBlock && + cSize < rleMaxLength && + ZSTD_isRLE(ip, srcSize)) + { + cSize = 1; + op[0] = ip[0]; + } + +out: + if (!ZSTD_isError(cSize) && cSize > 1) { + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + } + /* We check that dictionaries have offset codes available for the first + * block. After the first block, the offcode table might not have large + * enough codes to represent the offsets in the data. + */ + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + + return cSize; } -/*! ZSTD_compressBegin_advanced() : -* @return : 0, or an error code */ -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, - const void* dict, size_t dictSize, - ZSTD_parameters params, unsigned long long pledgedSrcSize) -{ - ZSTD_CCtx_params const cctxParams = - ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); - return ZSTD_compressBegin_advanced_internal(cctx, - dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, - NULL /*cdict*/, - cctxParams, pledgedSrcSize); +static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const size_t bss, U32 lastBlock) +{ + DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()"); + if (bss == ZSTDbss_compress) { + if (/* We don't want to emit our first block as a RLE even if it qualifies because + * doing so will cause the decoder (cli only) to throw a "should consume all input error." + * This is only an issue for zstd <= v1.4.3 + */ + !zc->isFirstBlock && + ZSTD_maybeRLE(&zc->seqStore) && + ZSTD_isRLE((BYTE const*)src, srcSize)) + { + return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock); + } + /* Attempt superblock compression. + * + * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the + * standard ZSTD_compressBound(). This is a problem, because even if we have + * space now, taking an extra byte now could cause us to run out of space later + * and violate ZSTD_compressBound(). + * + * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize. + * + * In order to respect ZSTD_compressBound() we must attempt to emit a raw + * uncompressed block in these cases: + * * cSize == 0: Return code for an uncompressed block. + * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize). + * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of + * output space. + * * cSize >= blockBound(srcSize): We have expanded the block too much so + * emit an uncompressed block. + */ + { size_t const cSize = + ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); + if (cSize != ERROR(dstSize_tooSmall)) { + size_t const maxCSize = + srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); + FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); + if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + return cSize; + } + } + } + } /* if (bss == ZSTDbss_compress)*/ + + DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); + /* Superblock compression failed, attempt to emit a single no compress block. + * The decoder will be able to stream this block since it is uncompressed. + */ + return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); } -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 lastBlock) { - ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); - ZSTD_CCtx_params const cctxParams = - ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); - DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); - return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, - cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); + size_t cSize = 0; + const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); + DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)", + (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize); + FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); + + cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock); + FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed"); + + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + + return cSize; } -size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) -{ - return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); +static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_CCtx_params const* params, + void const* ip, + void const* iend) +{ + U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy); + U32 const maxDist = (U32)1 << params->cParams.windowLog; + if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) { + U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); + ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); + ZSTD_cwksp_mark_tables_dirty(ws); + ZSTD_reduceIndex(ms, params, correction); + ZSTD_cwksp_mark_tables_clean(ws); + if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; + else ms->nextToUpdate -= correction; + /* invalidate dictionaries on overflow correction */ + ms->loadedDictEnd = 0; + ms->dictMatchState = NULL; + } } +#include "zstd_preSplit.h" -/*! ZSTD_writeEpilogue() : -* Ends a frame. -* @return : nb of bytes written into dst (or an error code) */ -static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) +static size_t ZSTD_optimalBlockSize(ZSTD_CCtx* cctx, const void* src, size_t srcSize, size_t blockSizeMax, int splitLevel, ZSTD_strategy strat, S64 savings) +{ + /* split level based on compression strategy, from `fast` to `btultra2` */ + static const int splitLevels[] = { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }; + /* note: conservatively only split full blocks (128 KB) currently. + * While it's possible to go lower, let's keep it simple for a first implementation. + * Besides, benefits of splitting are reduced when blocks are already small. + */ + if (srcSize < 128 KB || blockSizeMax < 128 KB) + return MIN(srcSize, blockSizeMax); + /* do not split incompressible data though: + * require verified savings to allow pre-splitting. + * Note: as a consequence, the first full block is not split. + */ + if (savings < 3) { + DEBUGLOG(6, "don't attempt splitting: savings (%i) too low", (int)savings); + return 128 KB; + } + /* apply @splitLevel, or use default value (which depends on @strat). + * note that splitting heuristic is still conditioned by @savings >= 3, + * so the first block will not reach this code path */ + if (splitLevel == 1) return 128 KB; + if (splitLevel == 0) { + assert(ZSTD_fast <= strat && strat <= ZSTD_btultra2); + splitLevel = splitLevels[strat]; + } else { + assert(2 <= splitLevel && splitLevel <= 6); + splitLevel -= 2; + } + return ZSTD_splitBlock(src, blockSizeMax, splitLevel, cctx->tmpWorkspace, cctx->tmpWkspSize); +} + +/*! ZSTD_compress_frameChunk() : +* Compress a chunk of data into one or multiple blocks. +* All blocks will be terminated, all input will be consumed. +* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. +* Frame is supposed already started (header already produced) +* @return : compressed size, or an error code +*/ +static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 lastFrameChunk) { + size_t blockSizeMax = cctx->blockSizeMax; + size_t remaining = srcSize; + const BYTE* ip = (const BYTE*)src; BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; - size_t fhSize = 0; + U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; + S64 savings = (S64)cctx->consumedSrcSize - (S64)cctx->producedCSize; - DEBUGLOG(4, "ZSTD_writeEpilogue"); - RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing"); + assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); - /* special case : empty frame */ - if (cctx->stage == ZSTDcs_init) { - fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0); - FORWARD_IF_ERROR(fhSize); - dstCapacity -= fhSize; - op += fhSize; - cctx->stage = ZSTDcs_ongoing; - } + DEBUGLOG(5, "ZSTD_compress_frameChunk (srcSize=%u, blockSizeMax=%u)", (unsigned)srcSize, (unsigned)blockSizeMax); + if (cctx->appliedParams.fParams.checksumFlag && srcSize) + XXH64_update(&cctx->xxhState, src, srcSize); - if (cctx->stage != ZSTDcs_ending) { - /* write one last empty block, make it the "last" block */ - U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; - RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall); - MEM_writeLE32(op, cBlockHeader24); - op += ZSTD_blockHeaderSize; - dstCapacity -= ZSTD_blockHeaderSize; - } + while (remaining) { + ZSTD_MatchState_t* const ms = &cctx->blockState.matchState; + size_t const blockSize = ZSTD_optimalBlockSize(cctx, + ip, remaining, + blockSizeMax, + cctx->appliedParams.preBlockSplitter_level, + cctx->appliedParams.cParams.strategy, + savings); + U32 const lastBlock = lastFrameChunk & (blockSize == remaining); + assert(blockSize <= remaining); + + /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding + * additional 1. We need to revisit and change this logic to be more consistent */ + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1, + dstSize_tooSmall, + "not enough space to store compressed block"); - if (cctx->appliedParams.fParams.checksumFlag) { - U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); - RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall); - DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum); - MEM_writeLE32(op, checksum); - op += 4; - } + ZSTD_overflowCorrectIfNeeded( + ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); + ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); + ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); - cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ - return op-ostart; + /* Ensure hash/chain table insertion resumes no sooner than lowlimit */ + if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; + + { size_t cSize; + if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) { + cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock); + FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed"); + assert(cSize > 0); + assert(cSize <= blockSize + ZSTD_blockHeaderSize); + } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) { + cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock); + FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed"); + assert(cSize > 0 || cctx->seqCollector.collectSequences == 1); + } else { + cSize = ZSTD_compressBlock_internal(cctx, + op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, + ip, blockSize, 1 /* frame */); + FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed"); + + if (cSize == 0) { /* block is not compressible */ + cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); + FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); + } else { + U32 const cBlockHeader = cSize == 1 ? + lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : + lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); + MEM_writeLE24(op, cBlockHeader); + cSize += ZSTD_blockHeaderSize; + } + } /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/ + + /* @savings is employed to ensure that splitting doesn't worsen expansion of incompressible data. + * Without splitting, the maximum expansion is 3 bytes per full block. + * An adversarial input could attempt to fudge the split detector, + * and make it split incompressible data, resulting in more block headers. + * Note that, since ZSTD_COMPRESSBOUND() assumes a worst case scenario of 1KB per block, + * and the splitter never creates blocks that small (current lower limit is 8 KB), + * there is already no risk to expand beyond ZSTD_COMPRESSBOUND() limit. + * But if the goal is to not expand by more than 3-bytes per 128 KB full block, + * then yes, it becomes possible to make the block splitter oversplit incompressible data. + * Using @savings, we enforce an even more conservative condition, + * requiring the presence of enough savings (at least 3 bytes) to authorize splitting, + * otherwise only full blocks are used. + * But being conservative is fine, + * since splitting barely compressible blocks is not fruitful anyway */ + savings += (S64)blockSize - (S64)cSize; + + ip += blockSize; + assert(remaining >= blockSize); + remaining -= blockSize; + op += cSize; + assert(dstCapacity >= cSize); + dstCapacity -= cSize; + cctx->isFirstBlock = 0; + DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", + (unsigned)cSize); + } } + + if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; + return (size_t)(op-ostart); } -size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) + +static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, + const ZSTD_CCtx_params* params, + U64 pledgedSrcSize, U32 dictID) { - size_t endResult; - size_t const cSize = ZSTD_compressContinue_internal(cctx, - dst, dstCapacity, src, srcSize, - 1 /* frame mode */, 1 /* last chunk */); - FORWARD_IF_ERROR(cSize); - endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); - FORWARD_IF_ERROR(endResult); - assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); - if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ - ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); - DEBUGLOG(4, "end of frame : controlling src size"); - RETURN_ERROR_IF( - cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1, - srcSize_wrong, - "error : pledgedSrcSize = %u, while realSrcSize = %u", - (unsigned)cctx->pledgedSrcSizePlusOne-1, - (unsigned)cctx->consumedSrcSize); + BYTE* const op = (BYTE*)dst; + U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ + U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ + U32 const checksumFlag = params->fParams.checksumFlag>0; + U32 const windowSize = (U32)1 << params->cParams.windowLog; + U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); + BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); + U32 const fcsCode = params->fParams.contentSizeFlag ? + (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ + BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); + size_t pos=0; + + assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)); + RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall, + "dst buf is too small to fit worst-case frame header size."); + DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", + !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode); + if (params->format == ZSTD_f_zstd1) { + MEM_writeLE32(dst, ZSTD_MAGICNUMBER); + pos = 4; } - return cSize + endResult; + op[pos++] = frameHeaderDescriptionByte; + if (!singleSegment) op[pos++] = windowLogByte; + switch(dictIDSizeCode) + { + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; + case 0 : break; + case 1 : op[pos] = (BYTE)(dictID); pos++; break; + case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; + case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; + } + switch(fcsCode) + { + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; + case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; + case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; + case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; + case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; + } + return pos; } +/* ZSTD_writeSkippableFrame_advanced() : + * Writes out a skippable frame with the specified magic number variant (16 are supported), + * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data. + * + * Returns the total number of bytes written, or a ZSTD error code. + */ +size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, + const void* src, size_t srcSize, unsigned magicVariant) { + BYTE* op = (BYTE*)dst; + RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */, + dstSize_tooSmall, "Not enough room for skippable frame"); + RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame"); + RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported"); -static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - ZSTD_parameters params) -{ - ZSTD_CCtx_params const cctxParams = - ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); - DEBUGLOG(4, "ZSTD_compress_internal"); - return ZSTD_compress_advanced_internal(cctx, - dst, dstCapacity, - src, srcSize, - dict, dictSize, - cctxParams); + MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant)); + MEM_writeLE32(op+4, (U32)srcSize); + ZSTD_memcpy(op+8, src, srcSize); + return srcSize + ZSTD_SKIPPABLEHEADERSIZE; } -size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - ZSTD_parameters params) +/* ZSTD_writeLastEmptyBlock() : + * output an empty Block with end-of-frame mark to complete a frame + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) + * or an error code if `dstCapacity` is too small (stage == ZSTDcs_init); + assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_ps_enable); + cctx->externSeqStore.seq = seq; + cctx->externSeqStore.size = nbSeq; + cctx->externSeqStore.capacity = nbSeq; + cctx->externSeqStore.pos = 0; + cctx->externSeqStore.posInSequence = 0; } -size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict, size_t dictSize, - int compressionLevel) -{ - ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0); - ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); - assert(params.fParams.contentSizeFlag == 1); - return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams); -} -size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - int compressionLevel) +static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 frame, U32 lastFrameChunk) { - DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize); - assert(cctx != NULL); - return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); -} + ZSTD_MatchState_t* const ms = &cctx->blockState.matchState; + size_t fhSize = 0; + + DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", + cctx->stage, (unsigned)srcSize); + RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong, + "missing init (ZSTD_compressBegin)"); + + if (frame && (cctx->stage==ZSTDcs_init)) { + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, + cctx->pledgedSrcSizePlusOne-1, cctx->dictID); + FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); + assert(fhSize <= dstCapacity); + dstCapacity -= fhSize; + dst = (char*)dst + fhSize; + cctx->stage = ZSTDcs_ongoing; + } + + if (!srcSize) return fhSize; /* do not generate an empty block if no input */ + + if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) { + ms->forceNonContiguous = 0; + ms->nextToUpdate = ms->window.dictLimit; + } + if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { + ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0); + } + + if (!frame) { + /* overflow check and correction for block mode */ + ZSTD_overflowCorrectIfNeeded( + ms, &cctx->workspace, &cctx->appliedParams, + src, (BYTE const*)src + srcSize); + } + + DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSizeMax); + { size_t const cSize = frame ? + ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : + ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */); + FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed"); + cctx->consumedSrcSize += srcSize; + cctx->producedCSize += (cSize + fhSize); + assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); + if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ + ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); + RETURN_ERROR_IF( + cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne, + srcSize_wrong, + "error : pledgedSrcSize = %u, while realSrcSize >= %u", + (unsigned)cctx->pledgedSrcSizePlusOne-1, + (unsigned)cctx->consumedSrcSize); + } + return cSize + fhSize; + } +} + +size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize); + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); +} + +/* NOTE: Must just wrap ZSTD_compressContinue_public() */ +size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize); +} + +static size_t ZSTD_getBlockSize_deprecated(const ZSTD_CCtx* cctx) +{ + ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; + assert(!ZSTD_checkCParams(cParams)); + return MIN(cctx->appliedParams.maxBlockSize, (size_t)1 << cParams.windowLog); +} + +/* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */ +size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) +{ + return ZSTD_getBlockSize_deprecated(cctx); +} + +/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ +size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize); + { size_t const blockSizeMax = ZSTD_getBlockSize_deprecated(cctx); + RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); } + + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); +} + +/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize); +} + +/*! ZSTD_loadDictionaryContent() : + * @return : 0, or an error code + */ +static size_t +ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, + ldmState_t* ls, + ZSTD_cwksp* ws, + ZSTD_CCtx_params const* params, + const void* src, size_t srcSize, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) +{ + const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL; + + /* Assert that the ms params match the params we're being given */ + ZSTD_assertEqualCParams(params->cParams, ms->cParams); + + { /* Ensure large dictionaries can't cause index overflow */ + + /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. + * Dictionaries right at the edge will immediately trigger overflow + * correction, but I don't want to insert extra constraints here. + */ + U32 maxDictSize = ZSTD_CURRENT_MAX - ZSTD_WINDOW_START_INDEX; + + int const CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(¶ms->cParams); + if (CDictTaggedIndices && tfp == ZSTD_tfp_forCDict) { + /* Some dictionary matchfinders in zstd use "short cache", + * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each + * CDict hashtable entry as a tag rather than as part of an index. + * When short cache is used, we need to truncate the dictionary + * so that its indices don't overlap with the tag. */ + U32 const shortCacheMaxDictSize = (1u << (32 - ZSTD_SHORT_CACHE_TAG_BITS)) - ZSTD_WINDOW_START_INDEX; + maxDictSize = MIN(maxDictSize, shortCacheMaxDictSize); + assert(!loadLdmDict); + } + + /* If the dictionary is too large, only load the suffix of the dictionary. */ + if (srcSize > maxDictSize) { + ip = iend - maxDictSize; + src = ip; + srcSize = maxDictSize; + } + } + + if (srcSize > ZSTD_CHUNKSIZE_MAX) { + /* We must have cleared our windows when our source is this large. */ + assert(ZSTD_window_isEmpty(ms->window)); + if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window)); + } + ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0); + + DEBUGLOG(4, "ZSTD_loadDictionaryContent: useRowMatchFinder=%d", (int)params->useRowMatchFinder); + + if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */ + DEBUGLOG(4, "ZSTD_loadDictionaryContent: Trigger loadLdmDict"); + ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0); + ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); + ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); + DEBUGLOG(4, "ZSTD_loadDictionaryContent: ZSTD_ldm_fillHashTable completes"); + } + + /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */ + { U32 maxDictSize = 1U << MIN(MAX(params->cParams.hashLog + 3, params->cParams.chainLog + 1), 31); + if (srcSize > maxDictSize) { + ip = iend - maxDictSize; + src = ip; + srcSize = maxDictSize; + } + } + + ms->nextToUpdate = (U32)(ip - ms->window.base); + ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); + ms->forceNonContiguous = params->deterministicRefPrefix; + + if (srcSize <= HASH_READ_SIZE) return 0; + + ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend); + + switch(params->cParams.strategy) + { + case ZSTD_fast: + ZSTD_fillHashTable(ms, iend, dtlm, tfp); + break; + case ZSTD_dfast: +#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR + ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp); +#else + assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif + break; + + case ZSTD_greedy: + case ZSTD_lazy: + case ZSTD_lazy2: +#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) + assert(srcSize >= HASH_READ_SIZE); + if (ms->dedicatedDictSearch) { + assert(ms->chainTable != NULL); + ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE); + } else { + assert(params->useRowMatchFinder != ZSTD_ps_auto); + if (params->useRowMatchFinder == ZSTD_ps_enable) { + size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog); + ZSTD_memset(ms->tagTable, 0, tagTableSize); + ZSTD_row_update(ms, iend-HASH_READ_SIZE); + DEBUGLOG(4, "Using row-based hash table for lazy dict"); + } else { + ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE); + DEBUGLOG(4, "Using chain-based hash table for lazy dict"); + } + } +#else + assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif + break; + + case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ + case ZSTD_btopt: + case ZSTD_btultra: + case ZSTD_btultra2: +#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) + assert(srcSize >= HASH_READ_SIZE); + DEBUGLOG(4, "Fill %u bytes into the Binary Tree", (unsigned)srcSize); + ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend); +#else + assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif + break; + + default: + assert(0); /* not possible : not a valid strategy id */ + } + + ms->nextToUpdate = (U32)(iend - ms->window.base); + return 0; +} + + +/* Dictionaries that assign zero probability to symbols that show up causes problems + * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check + * and only dictionaries with 100% valid symbols can be assumed valid. + */ +static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) +{ + U32 s; + if (dictMaxSymbolValue < maxSymbolValue) { + return FSE_repeat_check; + } + for (s = 0; s <= maxSymbolValue; ++s) { + if (normalizedCounter[s] == 0) { + return FSE_repeat_check; + } + } + return FSE_repeat_valid; +} + +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, + const void* const dict, size_t dictSize) +{ + short offcodeNCount[MaxOff+1]; + unsigned offcodeMaxValue = MaxOff; + const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */ + const BYTE* const dictEnd = dictPtr + dictSize; + dictPtr += 8; + bs->entropy.huf.repeatMode = HUF_repeat_check; + + { unsigned maxSymbolValue = 255; + unsigned hasZeroWeights = 1; + size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, + (size_t)(dictEnd-dictPtr), &hasZeroWeights); + + /* We only set the loaded table as valid if it contains all non-zero + * weights. Otherwise, we set it to check */ + if (!hasZeroWeights && maxSymbolValue == 255) + bs->entropy.huf.repeatMode = HUF_repeat_valid; + + RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, ""); + dictPtr += hufHeaderSize; + } + + { unsigned offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr)); + RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); + /* fill all offset symbols to avoid garbage at end of table */ + RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( + bs->entropy.fse.offcodeCTable, + offcodeNCount, MaxOff, offcodeLog, + workspace, HUF_WORKSPACE_SIZE)), + dictionary_corrupted, ""); + /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ + dictPtr += offcodeHeaderSize; + } + + { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); + RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); + RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( + bs->entropy.fse.matchlengthCTable, + matchlengthNCount, matchlengthMaxValue, matchlengthLog, + workspace, HUF_WORKSPACE_SIZE)), + dictionary_corrupted, ""); + bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML); + dictPtr += matchlengthHeaderSize; + } + + { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); + RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); + RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( + bs->entropy.fse.litlengthCTable, + litlengthNCount, litlengthMaxValue, litlengthLog, + workspace, HUF_WORKSPACE_SIZE)), + dictionary_corrupted, ""); + bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL); + dictPtr += litlengthHeaderSize; + } + + RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); + bs->rep[0] = MEM_readLE32(dictPtr+0); + bs->rep[1] = MEM_readLE32(dictPtr+4); + bs->rep[2] = MEM_readLE32(dictPtr+8); + dictPtr += 12; + + { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); + U32 offcodeMax = MaxOff; + if (dictContentSize <= ((U32)-1) - 128 KB) { + U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ + offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ + } + /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */ + bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)); + + /* All repCodes must be <= dictContentSize and != 0 */ + { U32 u; + for (u=0; u<3; u++) { + RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, ""); + RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, ""); + } } } + + return (size_t)(dictPtr - (const BYTE*)dict); +} + +/* Dictionary format : + * See : + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format + */ +/*! ZSTD_loadZstdDictionary() : + * @return : dictID, or an error code + * assumptions : magic number supposed already checked + * dictSize supposed >= 8 + */ +static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, + ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_CCtx_params const* params, + const void* dict, size_t dictSize, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp, + void* workspace) +{ + const BYTE* dictPtr = (const BYTE*)dict; + const BYTE* const dictEnd = dictPtr + dictSize; + size_t dictID; + size_t eSize; + ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= 8); + assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY); + + dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ ); + eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize); + FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed"); + dictPtr += eSize; + + { + size_t const dictContentSize = (size_t)(dictEnd - dictPtr); + FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( + ms, NULL, ws, params, dictPtr, dictContentSize, dtlm, tfp), ""); + } + return dictID; +} + +/** ZSTD_compress_insertDictionary() : +* @return : dictID, or an error code */ +static size_t +ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, + ZSTD_MatchState_t* ms, + ldmState_t* ls, + ZSTD_cwksp* ws, + const ZSTD_CCtx_params* params, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp, + void* workspace) +{ + DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); + if ((dict==NULL) || (dictSize<8)) { + RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); + return 0; + } + + ZSTD_reset_compressedBlockState(bs); + + /* dict restricted modes */ + if (dictContentType == ZSTD_dct_rawContent) + return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm, tfp); + + if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { + if (dictContentType == ZSTD_dct_auto) { + DEBUGLOG(4, "raw content dictionary detected"); + return ZSTD_loadDictionaryContent( + ms, ls, ws, params, dict, dictSize, dtlm, tfp); + } + RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); + assert(0); /* impossible */ + } + + /* dict as full zstd dictionary */ + return ZSTD_loadZstdDictionary( + bs, ms, ws, params, dict, dictSize, dtlm, tfp, workspace); +} + +#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB) +#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL) + +/*! ZSTD_compressBegin_internal() : + * Assumption : either @dict OR @cdict (or none) is non-NULL, never both + * @return : 0, or an error code */ +static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, U64 pledgedSrcSize, + ZSTD_buffered_policy_e zbuff) +{ + size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize; +#if ZSTD_TRACE + cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0; +#endif + DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog); + /* params are supposed to be fully validated at this point */ + assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); + assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + if ( (cdict) + && (cdict->dictContentSize > 0) + && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF + || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER + || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN + || cdict->compressionLevel == 0) + && (params->attachDictPref != ZSTD_dictForceLoad) ) { + return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff); + } + + FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, + dictContentSize, + ZSTDcrp_makeClean, zbuff) , ""); + { size_t const dictID = cdict ? + ZSTD_compress_insertDictionary( + cctx->blockState.prevCBlock, &cctx->blockState.matchState, + &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, + cdict->dictContentSize, cdict->dictContentType, dtlm, + ZSTD_tfp_forCCtx, cctx->tmpWorkspace) + : ZSTD_compress_insertDictionary( + cctx->blockState.prevCBlock, &cctx->blockState.matchState, + &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, + dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->tmpWorkspace); + FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); + assert(dictID <= UINT_MAX); + cctx->dictID = (U32)dictID; + cctx->dictContentSize = dictContentSize; + } + return 0; +} + +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, + unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog); + /* compression parameters verification and optimization */ + FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , ""); + return ZSTD_compressBegin_internal(cctx, + dict, dictSize, dictContentType, dtlm, + cdict, + params, pledgedSrcSize, + ZSTDb_not_buffered); +} + +/*! ZSTD_compressBegin_advanced() : +* @return : 0, or an error code */ +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + ZSTD_CCtx_params cctxParams; + ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, ZSTD_NO_CLEVEL); + return ZSTD_compressBegin_advanced_internal(cctx, + dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, + NULL /*cdict*/, + &cctxParams, pledgedSrcSize); +} + +static size_t +ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_CCtx_params cctxParams; + { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); + ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel); + } + DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); + return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, + &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); +} + +size_t +ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +{ + return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel); +} + +size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) +{ + return ZSTD_compressBegin_usingDict_deprecated(cctx, NULL, 0, compressionLevel); +} + + +/*! ZSTD_writeEpilogue() : +* Ends a frame. +* @return : nb of bytes written into dst (or an error code) */ +static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) +{ + BYTE* const ostart = (BYTE*)dst; + BYTE* op = ostart; + + DEBUGLOG(4, "ZSTD_writeEpilogue"); + RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing"); + + /* special case : empty frame */ + if (cctx->stage == ZSTDcs_init) { + size_t fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); + FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); + dstCapacity -= fhSize; + op += fhSize; + cctx->stage = ZSTDcs_ongoing; + } + + if (cctx->stage != ZSTDcs_ending) { + /* write one last empty block, make it the "last" block */ + U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; + ZSTD_STATIC_ASSERT(ZSTD_BLOCKHEADERSIZE == 3); + RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "no room for epilogue"); + MEM_writeLE24(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + } + + if (cctx->appliedParams.fParams.checksumFlag) { + U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); + RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); + DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum); + MEM_writeLE32(op, checksum); + op += 4; + } + + cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ + return (size_t)(op-ostart); +} + +void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) +{ +#if ZSTD_TRACE + if (cctx->traceCtx && ZSTD_trace_compress_end != NULL) { + int const streaming = cctx->inBuffSize > 0 || cctx->outBuffSize > 0 || cctx->appliedParams.nbWorkers > 0; + ZSTD_Trace trace; + ZSTD_memset(&trace, 0, sizeof(trace)); + trace.version = ZSTD_VERSION_NUMBER; + trace.streaming = streaming; + trace.dictionaryID = cctx->dictID; + trace.dictionarySize = cctx->dictContentSize; + trace.uncompressedSize = cctx->consumedSrcSize; + trace.compressedSize = cctx->producedCSize + extraCSize; + trace.params = &cctx->appliedParams; + trace.cctx = cctx; + ZSTD_trace_compress_end(cctx->traceCtx, &trace); + } + cctx->traceCtx = 0; +#else + (void)cctx; + (void)extraCSize; +#endif +} + +size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t endResult; + size_t const cSize = ZSTD_compressContinue_internal(cctx, + dst, dstCapacity, src, srcSize, + 1 /* frame mode */, 1 /* last chunk */); + FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed"); + endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); + FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed"); + assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); + if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ + ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); + DEBUGLOG(4, "end of frame : controlling src size"); + RETURN_ERROR_IF( + cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1, + srcSize_wrong, + "error : pledgedSrcSize = %u, while realSrcSize = %u", + (unsigned)cctx->pledgedSrcSizePlusOne-1, + (unsigned)cctx->consumedSrcSize); + } + ZSTD_CCtx_trace(cctx, endResult); + return cSize + endResult; +} + +/* NOTE: Must just wrap ZSTD_compressEnd_public() */ +size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); +} + +size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params) +{ + DEBUGLOG(4, "ZSTD_compress_advanced"); + FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); + ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, ZSTD_NO_CLEVEL); + return ZSTD_compress_advanced_internal(cctx, + dst, dstCapacity, + src, srcSize, + dict, dictSize, + &cctx->simpleApiParams); +} + +/* Internal */ +size_t ZSTD_compress_advanced_internal( + ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + const ZSTD_CCtx_params* params) +{ + DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize); + FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, + dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, + params, srcSize, ZSTDb_not_buffered) , ""); + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); +} + +size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel) +{ + { + ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict); + assert(params.fParams.contentSizeFlag == 1); + ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel); + } + DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize); + return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams); +} + +size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel) +{ + DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize); + assert(cctx != NULL); + return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); +} + +size_t ZSTD_compress(void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel) +{ + size_t result; +#if ZSTD_COMPRESS_HEAPMODE + ZSTD_CCtx* cctx = ZSTD_createCCtx(); + RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed"); + result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel); + ZSTD_freeCCtx(cctx); +#else + ZSTD_CCtx ctxBody; + ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); + result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); + ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */ +#endif + return result; +} + + +/* ===== Dictionary API ===== */ + +/*! ZSTD_estimateCDictSize_advanced() : + * Estimate amount of memory that will be needed to create a dictionary with following arguments */ +size_t ZSTD_estimateCDictSize_advanced( + size_t dictSize, ZSTD_compressionParameters cParams, + ZSTD_dictLoadMethod_e dictLoadMethod) +{ + DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict)); + return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small + * in case we are using DDS with row-hash. */ + + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams), + /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0) + + (dictLoadMethod == ZSTD_dlm_byRef ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *)))); +} + +size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) +{ + ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); + return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); +} + +size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; /* support sizeof on NULL */ + DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict)); + /* cdict may be in the workspace */ + return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict)) + + ZSTD_cwksp_sizeof(&cdict->workspace); +} + +static size_t ZSTD_initCDict_internal( + ZSTD_CDict* cdict, + const void* dictBuffer, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_CCtx_params params) +{ + DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType); + assert(!ZSTD_checkCParams(params.cParams)); + cdict->matchState.cParams = params.cParams; + cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch; + if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { + cdict->dictContent = dictBuffer; + } else { + void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*))); + RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!"); + cdict->dictContent = internalBuffer; + ZSTD_memcpy(internalBuffer, dictBuffer, dictSize); + } + cdict->dictContentSize = dictSize; + cdict->dictContentType = dictContentType; + + cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE); + + + /* Reset the state to no dictionary */ + ZSTD_reset_compressedBlockState(&cdict->cBlockState); + FORWARD_IF_ERROR(ZSTD_reset_matchState( + &cdict->matchState, + &cdict->workspace, + ¶ms.cParams, + params.useRowMatchFinder, + ZSTDcrp_makeClean, + ZSTDirp_reset, + ZSTD_resetTarget_CDict), ""); + /* (Maybe) load the dictionary + * Skips loading the dictionary if it is < 8 bytes. + */ + { params.compressionLevel = ZSTD_CLEVEL_DEFAULT; + params.fParams.contentSizeFlag = 1; + { size_t const dictID = ZSTD_compress_insertDictionary( + &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace, + ¶ms, cdict->dictContent, cdict->dictContentSize, + dictContentType, ZSTD_dtlm_full, ZSTD_tfp_forCDict, cdict->entropyWorkspace); + FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); + assert(dictID <= (size_t)(U32)-1); + cdict->dictID = (U32)dictID; + } + } + + return 0; +} + +static ZSTD_CDict* +ZSTD_createCDict_advanced_internal(size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_compressionParameters cParams, + ZSTD_ParamSwitch_e useRowMatchFinder, + int enableDedicatedDictSearch, + ZSTD_customMem customMem) +{ + if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; + DEBUGLOG(3, "ZSTD_createCDict_advanced_internal (dictSize=%u)", (unsigned)dictSize); + + { size_t const workspaceSize = + ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + + ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) + + (dictLoadMethod == ZSTD_dlm_byRef ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))); + void* const workspace = ZSTD_customMalloc(workspaceSize, customMem); + ZSTD_cwksp ws; + ZSTD_CDict* cdict; + + if (!workspace) { + ZSTD_customFree(workspace, customMem); + return NULL; + } + + ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc); + + cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); + assert(cdict != NULL); + ZSTD_cwksp_move(&cdict->workspace, &ws); + cdict->customMem = customMem; + cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */ + cdict->useRowMatchFinder = useRowMatchFinder; + return cdict; + } +} + +ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams, + ZSTD_customMem customMem) +{ + ZSTD_CCtx_params cctxParams; + ZSTD_memset(&cctxParams, 0, sizeof(cctxParams)); + DEBUGLOG(3, "ZSTD_createCDict_advanced, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType); + ZSTD_CCtxParams_init(&cctxParams, 0); + cctxParams.cParams = cParams; + cctxParams.customMem = customMem; + return ZSTD_createCDict_advanced2( + dictBuffer, dictSize, + dictLoadMethod, dictContentType, + &cctxParams, customMem); +} + +ZSTD_CDict* ZSTD_createCDict_advanced2( + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + const ZSTD_CCtx_params* originalCctxParams, + ZSTD_customMem customMem) +{ + ZSTD_CCtx_params cctxParams = *originalCctxParams; + ZSTD_compressionParameters cParams; + ZSTD_CDict* cdict; + + DEBUGLOG(3, "ZSTD_createCDict_advanced2, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType); + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + + if (cctxParams.enableDedicatedDictSearch) { + cParams = ZSTD_dedicatedDictSearch_getCParams( + cctxParams.compressionLevel, dictSize); + ZSTD_overrideCParams(&cParams, &cctxParams.cParams); + } else { + cParams = ZSTD_getCParamsFromCCtxParams( + &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); + } + + if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) { + /* Fall back to non-DDSS params */ + cctxParams.enableDedicatedDictSearch = 0; + cParams = ZSTD_getCParamsFromCCtxParams( + &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); + } + + DEBUGLOG(3, "ZSTD_createCDict_advanced2: DedicatedDictSearch=%u", cctxParams.enableDedicatedDictSearch); + cctxParams.cParams = cParams; + cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); + + cdict = ZSTD_createCDict_advanced_internal(dictSize, + dictLoadMethod, cctxParams.cParams, + cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch, + customMem); + + if (!cdict || ZSTD_isError( ZSTD_initCDict_internal(cdict, + dict, dictSize, + dictLoadMethod, dictContentType, + cctxParams) )) { + ZSTD_freeCDict(cdict); + return NULL; + } + + return cdict; +} + +ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); + ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize, + ZSTD_dlm_byCopy, ZSTD_dct_auto, + cParams, ZSTD_defaultCMem); + if (cdict) + cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel; + return cdict; +} + +ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); + ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize, + ZSTD_dlm_byRef, ZSTD_dct_auto, + cParams, ZSTD_defaultCMem); + if (cdict) + cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel; + return cdict; +} + +size_t ZSTD_freeCDict(ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = cdict->customMem; + int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict); + ZSTD_cwksp_free(&cdict->workspace, cMem); + if (!cdictInWorkspace) { + ZSTD_customFree(cdict, cMem); + } + return 0; + } +} + +/*! ZSTD_initStaticCDict_advanced() : + * Generate a digested dictionary in provided memory area. + * workspace: The memory area to emplace the dictionary into. + * Provided pointer must 8-bytes aligned. + * It must outlive dictionary usage. + * workspaceSize: Use ZSTD_estimateCDictSize() + * to determine how large workspace must be. + * cParams : use ZSTD_getCParams() to transform a compression level + * into its relevant cParams. + * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) + * Note : there is no corresponding "free" function. + * Since workspace was allocated externally, it must be freed externally. + */ +const ZSTD_CDict* ZSTD_initStaticCDict( + void* workspace, size_t workspaceSize, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams) +{ + ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); + /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */ + size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0); + size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + + (dictLoadMethod == ZSTD_dlm_byRef ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))) + + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + + matchStateSize; + ZSTD_CDict* cdict; + ZSTD_CCtx_params params; + + DEBUGLOG(4, "ZSTD_initStaticCDict (dictSize==%u)", (unsigned)dictSize); + if ((size_t)workspace & 7) return NULL; /* 8-aligned */ + + { + ZSTD_cwksp ws; + ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); + cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); + if (cdict == NULL) return NULL; + ZSTD_cwksp_move(&cdict->workspace, &ws); + } + + if (workspaceSize < neededSize) return NULL; + + ZSTD_CCtxParams_init(¶ms, 0); + params.cParams = cParams; + params.useRowMatchFinder = useRowMatchFinder; + cdict->useRowMatchFinder = useRowMatchFinder; + cdict->compressionLevel = ZSTD_NO_CLEVEL; + + if (ZSTD_isError( ZSTD_initCDict_internal(cdict, + dict, dictSize, + dictLoadMethod, dictContentType, + params) )) + return NULL; + + return cdict; +} + +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) +{ + assert(cdict != NULL); + return cdict->matchState.cParams; +} + +/*! ZSTD_getDictID_fromCDict() : + * Provides the dictID of the dictionary loaded into `cdict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ +unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; + return cdict->dictID; +} + +/* ZSTD_compressBegin_usingCDict_internal() : + * Implementation of various ZSTD_compressBegin_usingCDict* functions. + */ +static size_t ZSTD_compressBegin_usingCDict_internal( + ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, + ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) +{ + ZSTD_CCtx_params cctxParams; + DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal"); + RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!"); + /* Initialize the cctxParams from the cdict */ + { + ZSTD_parameters params; + params.fParams = fParams; + params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF + || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER + || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN + || cdict->compressionLevel == 0 ) ? + ZSTD_getCParamsFromCDict(cdict) + : ZSTD_getCParams(cdict->compressionLevel, + pledgedSrcSize, + cdict->dictContentSize); + ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, cdict->compressionLevel); + } + /* Increase window log to fit the entire dictionary and source if the + * source size is known. Limit the increase to 19, which is the + * window log for compression level 1 with the largest source size. + */ + if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) { + U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19); + U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; + cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog); + } + return ZSTD_compressBegin_internal(cctx, + NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, + cdict, + &cctxParams, pledgedSrcSize, + ZSTDb_not_buffered); +} + + +/* ZSTD_compressBegin_usingCDict_advanced() : + * This function is DEPRECATED. + * cdict must be != NULL */ +size_t ZSTD_compressBegin_usingCDict_advanced( + ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, + ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) +{ + return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize); +} + +/* ZSTD_compressBegin_usingCDict() : + * cdict must be != NULL */ +size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +{ + ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); +} + +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +{ + return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict); +} + +/*! ZSTD_compress_usingCDict_internal(): + * Implementation of various ZSTD_compress_usingCDict* functions. + */ +static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) +{ + FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); +} + +/*! ZSTD_compress_usingCDict_advanced(): + * This function is DEPRECATED. + */ +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) +{ + return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); +} + +/*! ZSTD_compress_usingCDict() : + * Compression using a digested Dictionary. + * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. + * Note that compression parameters are decided at CDict creation time + * while frame parameters are hardcoded */ +size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict) +{ + ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); +} + + + +/* ****************************************************************** +* Streaming +********************************************************************/ + +ZSTD_CStream* ZSTD_createCStream(void) +{ + DEBUGLOG(3, "ZSTD_createCStream"); + return ZSTD_createCStream_advanced(ZSTD_defaultCMem); +} + +ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) +{ + return ZSTD_initStaticCCtx(workspace, workspaceSize); +} + +ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) +{ /* CStream and CCtx are now same object */ + return ZSTD_createCCtx_advanced(customMem); +} + +size_t ZSTD_freeCStream(ZSTD_CStream* zcs) +{ + return ZSTD_freeCCtx(zcs); /* same object */ +} + + + +/*====== Initialization ======*/ + +size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } + +size_t ZSTD_CStreamOutSize(void) +{ + return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; +} + +static ZSTD_CParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize) +{ + if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) + return ZSTD_cpm_attachDict; + else + return ZSTD_cpm_noAttachDict; +} + +/* ZSTD_resetCStream(): + * pledgedSrcSize == 0 means "unknown" */ +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss) +{ + /* temporary : 0 interpreted as "unknown" during transition period. + * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. + * 0 will be interpreted as "empty" in the future. + */ + U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; + DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); + return 0; +} + +/*! ZSTD_initCStream_internal() : + * Note : for lib/compress only. Used by zstdmt_compress.c. + * Assumption 1 : params are valid + * Assumption 2 : either dict, or cdict, is defined, not both */ +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, + unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_initCStream_internal"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); + assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); + zcs->requestedParams = *params; + assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + if (dict) { + FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); + } else { + /* Dictionary is cleared if !cdict */ + FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); + } + return 0; +} + +/* ZSTD_initCStream_usingCDict_advanced() : + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, + const ZSTD_CDict* cdict, + ZSTD_frameParameters fParams, + unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); + zcs->requestedParams.fParams = fParams; + FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); + return 0; +} + +/* note : cdict must outlive compression session */ +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) +{ + DEBUGLOG(4, "ZSTD_initCStream_usingCDict"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); + return 0; +} + + +/* ZSTD_initCStream_advanced() : + * pledgedSrcSize must be exact. + * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. + * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pss) +{ + /* for compatibility with older programs relying on this behavior. + * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. + * This line will be removed in the future. + */ + U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; + DEBUGLOG(4, "ZSTD_initCStream_advanced"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); + FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); + ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, ¶ms); + FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); + return 0; +} + +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) +{ + DEBUGLOG(4, "ZSTD_initCStream_usingDict"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); + return 0; +} + +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss) +{ + /* temporary : 0 interpreted as "unknown" during transition period. + * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. + * 0 will be interpreted as "empty" in the future. + */ + U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; + DEBUGLOG(4, "ZSTD_initCStream_srcSize"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); + return 0; +} + +size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) +{ + DEBUGLOG(4, "ZSTD_initCStream"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); + return 0; +} + +/*====== Compression ======*/ + +static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) +{ + if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { + return cctx->blockSizeMax - cctx->stableIn_notConsumed; + } + assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered); + { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; + if (hintInSize==0) hintInSize = cctx->blockSizeMax; + return hintInSize; + } +} + +/** ZSTD_compressStream_generic(): + * internal function for all *compressStream*() variants + * @return : hint size for next input to complete ongoing block */ +static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective const flushMode) +{ + const char* const istart = (assert(input != NULL), (const char*)input->src); + const char* const iend = (istart != NULL) ? istart + input->size : istart; + const char* ip = (istart != NULL) ? istart + input->pos : istart; + char* const ostart = (assert(output != NULL), (char*)output->dst); + char* const oend = (ostart != NULL) ? ostart + output->size : ostart; + char* op = (ostart != NULL) ? ostart + output->pos : ostart; + U32 someMoreWork = 1; + + /* check expectations */ + DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%i, srcSize = %zu", (int)flushMode, input->size - input->pos); + assert(zcs != NULL); + if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) { + assert(input->pos >= zcs->stableIn_notConsumed); + input->pos -= zcs->stableIn_notConsumed; + if (ip) ip -= zcs->stableIn_notConsumed; + zcs->stableIn_notConsumed = 0; + } + if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { + assert(zcs->inBuff != NULL); + assert(zcs->inBuffSize > 0); + } + if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) { + assert(zcs->outBuff != NULL); + assert(zcs->outBuffSize > 0); + } + if (input->src == NULL) assert(input->size == 0); + assert(input->pos <= input->size); + if (output->dst == NULL) assert(output->size == 0); + assert(output->pos <= output->size); + assert((U32)flushMode <= (U32)ZSTD_e_end); + + while (someMoreWork) { + switch(zcs->streamStage) + { + case zcss_init: + RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!"); + + case zcss_load: + if ( (flushMode == ZSTD_e_end) + && ( (size_t)(oend-op) >= ZSTD_compressBound((size_t)(iend-ip)) /* Enough output space */ + || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */ + && (zcs->inBuffPos == 0) ) { + /* shortcut to compression pass directly into output buffer */ + size_t const cSize = ZSTD_compressEnd_public(zcs, + op, (size_t)(oend-op), + ip, (size_t)(iend-ip)); + DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); + FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed"); + ip = iend; + op += cSize; + zcs->frameEnded = 1; + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + someMoreWork = 0; break; + } + /* complete loading into inBuffer in buffered mode */ + if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { + size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; + size_t const loaded = ZSTD_limitCopy( + zcs->inBuff + zcs->inBuffPos, toLoad, + ip, (size_t)(iend-ip)); + zcs->inBuffPos += loaded; + if (ip) ip += loaded; + if ( (flushMode == ZSTD_e_continue) + && (zcs->inBuffPos < zcs->inBuffTarget) ) { + /* not enough input to fill full block : stop here */ + someMoreWork = 0; break; + } + if ( (flushMode == ZSTD_e_flush) + && (zcs->inBuffPos == zcs->inToCompress) ) { + /* empty */ + someMoreWork = 0; break; + } + } else { + assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable); + if ( (flushMode == ZSTD_e_continue) + && ( (size_t)(iend - ip) < zcs->blockSizeMax) ) { + /* can't compress a full block : stop here */ + zcs->stableIn_notConsumed = (size_t)(iend - ip); + ip = iend; /* pretend to have consumed input */ + someMoreWork = 0; break; + } + if ( (flushMode == ZSTD_e_flush) + && (ip == iend) ) { + /* empty */ + someMoreWork = 0; break; + } + } + /* compress current block (note : this stage cannot be stopped in the middle) */ + DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); + { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered); + void* cDst; + size_t cSize; + size_t oSize = (size_t)(oend-op); + size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress + : MIN((size_t)(iend - ip), zcs->blockSizeMax); + if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) + cDst = op; /* compress into output buffer, to skip flush stage */ + else + cDst = zcs->outBuff, oSize = zcs->outBuffSize; + if (inputBuffered) { + unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); + cSize = lastBlock ? + ZSTD_compressEnd_public(zcs, cDst, oSize, + zcs->inBuff + zcs->inToCompress, iSize) : + ZSTD_compressContinue_public(zcs, cDst, oSize, + zcs->inBuff + zcs->inToCompress, iSize); + FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); + zcs->frameEnded = lastBlock; + /* prepare next block */ + zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax; + if (zcs->inBuffTarget > zcs->inBuffSize) + zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSizeMax; + DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", + (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize); + if (!lastBlock) + assert(zcs->inBuffTarget <= zcs->inBuffSize); + zcs->inToCompress = zcs->inBuffPos; + } else { /* !inputBuffered, hence ZSTD_bm_stable */ + unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend); + cSize = lastBlock ? + ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) : + ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); + /* Consume the input prior to error checking to mirror buffered mode. */ + if (ip) ip += iSize; + FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); + zcs->frameEnded = lastBlock; + if (lastBlock) assert(ip == iend); + } + if (cDst == op) { /* no need to flush */ + op += cSize; + if (zcs->frameEnded) { + DEBUGLOG(5, "Frame completed directly in outBuffer"); + someMoreWork = 0; + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + } + break; + } + zcs->outBuffContentSize = cSize; + zcs->outBuffFlushedSize = 0; + zcs->streamStage = zcss_flush; /* pass-through to flush stage */ + } + ZSTD_FALLTHROUGH; + case zcss_flush: + DEBUGLOG(5, "flush stage"); + assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered); + { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op), + zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", + (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed); + if (flushed) + op += flushed; + zcs->outBuffFlushedSize += flushed; + if (toFlush!=flushed) { + /* flush not fully completed, presumably because dst is too small */ + assert(op==oend); + someMoreWork = 0; + break; + } + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; + if (zcs->frameEnded) { + DEBUGLOG(5, "Frame completed on flush"); + someMoreWork = 0; + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + break; + } + zcs->streamStage = zcss_load; + break; + } + + default: /* impossible */ + assert(0); + } + } -size_t ZSTD_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - int compressionLevel) -{ - size_t result; - ZSTD_CCtx ctxBody; - ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); - result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); - ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */ - return result; + input->pos = (size_t)(ip - istart); + output->pos = (size_t)(op - ostart); + if (zcs->frameEnded) return 0; + return ZSTD_nextInputSizeHint(zcs); } +static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx) +{ +#ifdef ZSTD_MULTITHREAD + if (cctx->appliedParams.nbWorkers >= 1) { + assert(cctx->mtctx != NULL); + return ZSTDMT_nextInputSizeHint(cctx->mtctx); + } +#endif + return ZSTD_nextInputSizeHint(cctx); -/* ===== Dictionary API ===== */ +} -/*! ZSTD_estimateCDictSize_advanced() : - * Estimate amount of memory that will be needed to create a dictionary with following arguments */ -size_t ZSTD_estimateCDictSize_advanced( - size_t dictSize, ZSTD_compressionParameters cParams, - ZSTD_dictLoadMethod_e dictLoadMethod) +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { - DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict)); - return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) - + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); + FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , ""); + return ZSTD_nextInputSizeHint_MTorST(zcs); } -size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) +/* After a compression call set the expected input/output buffer. + * This is validated at the start of the next compression call. + */ +static void +ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, const ZSTD_outBuffer* output, const ZSTD_inBuffer* input) { - ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); - return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); + DEBUGLOG(5, "ZSTD_setBufferExpectations (for advanced stable in/out modes)"); + if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { + cctx->expectedInBuffer = *input; + } + if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { + cctx->expectedOutBufferSize = output->size - output->pos; + } } -size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) -{ - if (cdict==NULL) return 0; /* support sizeof on NULL */ - DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict)); - return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict); +/* Validate that the input/output buffers match the expectations set by + * ZSTD_setBufferExpectations. + */ +static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx, + ZSTD_outBuffer const* output, + ZSTD_inBuffer const* input, + ZSTD_EndDirective endOp) +{ + if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { + ZSTD_inBuffer const expect = cctx->expectedInBuffer; + if (expect.src != input->src || expect.pos != input->pos) + RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableInBuffer enabled but input differs!"); + } + (void)endOp; + if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { + size_t const outBufferSize = output->size - output->pos; + if (cctx->expectedOutBufferSize != outBufferSize) + RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableOutBuffer enabled but output size differs!"); + } + return 0; } -static size_t ZSTD_initCDict_internal( - ZSTD_CDict* cdict, - const void* dictBuffer, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams) -{ - DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType); - assert(!ZSTD_checkCParams(cParams)); - cdict->matchState.cParams = cParams; - if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { - cdict->dictBuffer = NULL; - cdict->dictContent = dictBuffer; - } else { - void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem); - cdict->dictBuffer = internalBuffer; - cdict->dictContent = internalBuffer; - RETURN_ERROR_IF(!internalBuffer, memory_allocation); - memcpy(internalBuffer, dictBuffer, dictSize); +/* + * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize. + * Otherwise, it's ignored. + * @return: 0 on success, or a ZSTD_error code otherwise. + */ +static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, + ZSTD_EndDirective endOp, + size_t inSize) +{ + ZSTD_CCtx_params params = cctx->requestedParams; + ZSTD_prefixDict const prefixDict = cctx->prefixDict; + FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ + ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ + assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ + if (cctx->cdict && !cctx->localDict.cdict) { + /* Let the cdict's compression level take priority over the requested params. + * But do not take the cdict's compression level if the "cdict" is actually a localDict + * generated from ZSTD_initLocalDict(). + */ + params.compressionLevel = cctx->cdict->compressionLevel; } - cdict->dictContentSize = dictSize; + DEBUGLOG(4, "ZSTD_CCtx_init_compressStream2 : transparent init stage"); + if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */ - /* Reset the state to no dictionary */ - ZSTD_reset_compressedBlockState(&cdict->cBlockState); - { void* const end = ZSTD_reset_matchState( - &cdict->matchState, - (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32, - &cParams, ZSTDcrp_continue, /* forCCtx */ 0); - assert(end == (char*)cdict->workspace + cdict->workspaceSize); - (void)end; + { size_t const dictSize = prefixDict.dict + ? prefixDict.dictSize + : (cctx->cdict ? cctx->cdict->dictContentSize : 0); + ZSTD_CParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); + params.cParams = ZSTD_getCParamsFromCCtxParams( + ¶ms, cctx->pledgedSrcSizePlusOne-1, + dictSize, mode); } - /* (Maybe) load the dictionary - * Skips loading the dictionary if it is <= 8 bytes. - */ - { ZSTD_CCtx_params params; - memset(¶ms, 0, sizeof(params)); - params.compressionLevel = ZSTD_CLEVEL_DEFAULT; - params.fParams.contentSizeFlag = 1; - params.cParams = cParams; - { size_t const dictID = ZSTD_compress_insertDictionary( - &cdict->cBlockState, &cdict->matchState, ¶ms, - cdict->dictContent, cdict->dictContentSize, - dictContentType, ZSTD_dtlm_full, cdict->workspace); - FORWARD_IF_ERROR(dictID); - assert(dictID <= (size_t)(U32)-1); - cdict->dictID = (U32)dictID; + + params.postBlockSplitter = ZSTD_resolveBlockSplitterMode(params.postBlockSplitter, ¶ms.cParams); + params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams); + params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams); + params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences); + params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize); + params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel); + +#ifdef ZSTD_MULTITHREAD + /* If external matchfinder is enabled, make sure to fail before checking job size (for consistency) */ + RETURN_ERROR_IF( + ZSTD_hasExtSeqProd(¶ms) && params.nbWorkers >= 1, + parameter_combination_unsupported, + "External sequence producer isn't supported with nbWorkers >= 1" + ); + + if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { + params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ + } + if (params.nbWorkers > 0) { +# if ZSTD_TRACE + cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0; +# endif + /* mt context creation */ + if (cctx->mtctx == NULL) { + DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u", + params.nbWorkers); + cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem, cctx->pool); + RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!"); + } + /* mt compression */ + DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers); + FORWARD_IF_ERROR( ZSTDMT_initCStream_internal( + cctx->mtctx, + prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, + cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , ""); + cctx->dictID = cctx->cdict ? cctx->cdict->dictID : 0; + cctx->dictContentSize = cctx->cdict ? cctx->cdict->dictContentSize : prefixDict.dictSize; + cctx->consumedSrcSize = 0; + cctx->producedCSize = 0; + cctx->streamStage = zcss_load; + cctx->appliedParams = params; + } else +#endif /* ZSTD_MULTITHREAD */ + { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, + prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast, + cctx->cdict, + ¶ms, pledgedSrcSize, + ZSTDb_buffered) , ""); + assert(cctx->appliedParams.nbWorkers == 0); + cctx->inToCompress = 0; + cctx->inBuffPos = 0; + if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) { + /* for small input: avoid automatic flush on reaching end of block, since + * it would require to add a 3-bytes null block to end frame + */ + cctx->inBuffTarget = cctx->blockSizeMax + (cctx->blockSizeMax == pledgedSrcSize); + } else { + cctx->inBuffTarget = 0; } + cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; + cctx->streamStage = zcss_load; + cctx->frameEnded = 0; } - return 0; } -ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams, ZSTD_customMem customMem) +/* @return provides a minimum amount of data remaining to be flushed from internal buffers + */ +size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective endOp) { - DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType); - if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp); + /* check conditions */ + RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer"); + RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer"); + RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective"); + assert(cctx != NULL); - { ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem); - size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0); - void* const workspace = ZSTD_malloc(workspaceSize, customMem); + /* transparent initialization stage */ + if (cctx->streamStage == zcss_init) { + size_t const inputSize = input->size - input->pos; /* no obligation to start from pos==0 */ + size_t const totalInputSize = inputSize + cctx->stableIn_notConsumed; + if ( (cctx->requestedParams.inBufferMode == ZSTD_bm_stable) /* input is presumed stable, across invocations */ + && (endOp == ZSTD_e_continue) /* no flush requested, more input to come */ + && (totalInputSize < ZSTD_BLOCKSIZE_MAX) ) { /* not even reached one block yet */ + if (cctx->stableIn_notConsumed) { /* not the first time */ + /* check stable source guarantees */ + RETURN_ERROR_IF(input->src != cctx->expectedInBuffer.src, stabilityCondition_notRespected, "stableInBuffer condition not respected: wrong src pointer"); + RETURN_ERROR_IF(input->pos != cctx->expectedInBuffer.size, stabilityCondition_notRespected, "stableInBuffer condition not respected: externally modified pos"); + } + /* pretend input was consumed, to give a sense forward progress */ + input->pos = input->size; + /* save stable inBuffer, for later control, and flush/end */ + cctx->expectedInBuffer = *input; + /* but actually input wasn't consumed, so keep track of position from where compression shall resume */ + cctx->stableIn_notConsumed += inputSize; + /* don't initialize yet, wait for the first block of flush() order, for better parameters adaptation */ + return ZSTD_FRAMEHEADERSIZE_MIN(cctx->requestedParams.format); /* at least some header to produce */ + } + FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize), "compressStream2 initialization failed"); + ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */ + } + /* end of transparent initialization stage */ - if (!cdict || !workspace) { - ZSTD_free(cdict, customMem); - ZSTD_free(workspace, customMem); - return NULL; + FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers"); + /* compression stage */ +#ifdef ZSTD_MULTITHREAD + if (cctx->appliedParams.nbWorkers > 0) { + size_t flushMin; + if (cctx->cParamsChanged) { + ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); + cctx->cParamsChanged = 0; } - cdict->customMem = customMem; - cdict->workspace = workspace; - cdict->workspaceSize = workspaceSize; - if (ZSTD_isError( ZSTD_initCDict_internal(cdict, - dictBuffer, dictSize, - dictLoadMethod, dictContentType, - cParams) )) { - ZSTD_freeCDict(cdict); - return NULL; + if (cctx->stableIn_notConsumed) { + assert(cctx->appliedParams.inBufferMode == ZSTD_bm_stable); + /* some early data was skipped - make it available for consumption */ + assert(input->pos >= cctx->stableIn_notConsumed); + input->pos -= cctx->stableIn_notConsumed; + cctx->stableIn_notConsumed = 0; } + for (;;) { + size_t const ipos = input->pos; + size_t const opos = output->pos; + flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); + cctx->consumedSrcSize += (U64)(input->pos - ipos); + cctx->producedCSize += (U64)(output->pos - opos); + if ( ZSTD_isError(flushMin) + || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */ + if (flushMin == 0) + ZSTD_CCtx_trace(cctx, 0); + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); + } + FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed"); - return cdict; + if (endOp == ZSTD_e_continue) { + /* We only require some progress with ZSTD_e_continue, not maximal progress. + * We're done if we've consumed or produced any bytes, or either buffer is + * full. + */ + if (input->pos != ipos || output->pos != opos || input->pos == input->size || output->pos == output->size) + break; + } else { + assert(endOp == ZSTD_e_flush || endOp == ZSTD_e_end); + /* We require maximal progress. We're done when the flush is complete or the + * output buffer is full. + */ + if (flushMin == 0 || output->pos == output->size) + break; + } + } + DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic"); + /* Either we don't require maximum forward progress, we've finished the + * flush, or we are out of output space. + */ + assert(endOp == ZSTD_e_continue || flushMin == 0 || output->pos == output->size); + ZSTD_setBufferExpectations(cctx, output, input); + return flushMin; } +#endif /* ZSTD_MULTITHREAD */ + FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , ""); + DEBUGLOG(5, "completed ZSTD_compressStream2"); + ZSTD_setBufferExpectations(cctx, output, input); + return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ } -ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) +size_t ZSTD_compressStream2_simpleArgs ( + ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, size_t* dstPos, + const void* src, size_t srcSize, size_t* srcPos, + ZSTD_EndDirective endOp) { - ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); - return ZSTD_createCDict_advanced(dict, dictSize, - ZSTD_dlm_byCopy, ZSTD_dct_auto, - cParams, ZSTD_defaultCMem); + ZSTD_outBuffer output; + ZSTD_inBuffer input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; + /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ + { size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; + } } -ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) +size_t ZSTD_compress2(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) { - ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); - return ZSTD_createCDict_advanced(dict, dictSize, - ZSTD_dlm_byRef, ZSTD_dct_auto, - cParams, ZSTD_defaultCMem); -} + ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode; + ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode; + DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize); + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); + /* Enable stable input/output buffers. */ + cctx->requestedParams.inBufferMode = ZSTD_bm_stable; + cctx->requestedParams.outBufferMode = ZSTD_bm_stable; + { size_t oPos = 0; + size_t iPos = 0; + size_t const result = ZSTD_compressStream2_simpleArgs(cctx, + dst, dstCapacity, &oPos, + src, srcSize, &iPos, + ZSTD_e_end); + /* Reset to the original values. */ + cctx->requestedParams.inBufferMode = originalInBufferMode; + cctx->requestedParams.outBufferMode = originalOutBufferMode; -size_t ZSTD_freeCDict(ZSTD_CDict* cdict) -{ - if (cdict==NULL) return 0; /* support free on NULL */ - { ZSTD_customMem const cMem = cdict->customMem; - ZSTD_free(cdict->workspace, cMem); - ZSTD_free(cdict->dictBuffer, cMem); - ZSTD_free(cdict, cMem); - return 0; + FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); + if (result != 0) { /* compression not completed, due to lack of output space */ + assert(oPos == dstCapacity); + RETURN_ERROR(dstSize_tooSmall, ""); + } + assert(iPos == srcSize); /* all input is expected consumed */ + return oPos; } } -/*! ZSTD_initStaticCDict_advanced() : - * Generate a digested dictionary in provided memory area. - * workspace: The memory area to emplace the dictionary into. - * Provided pointer must 8-bytes aligned. - * It must outlive dictionary usage. - * workspaceSize: Use ZSTD_estimateCDictSize() - * to determine how large workspace must be. - * cParams : use ZSTD_getCParams() to transform a compression level - * into its relevants cParams. - * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) - * Note : there is no corresponding "free" function. - * Since workspace was allocated externally, it must be freed externally. +/* ZSTD_validateSequence() : + * @offBase : must use the format required by ZSTD_storeSeq() + * @returns a ZSTD error code if sequence is not valid */ -const ZSTD_CDict* ZSTD_initStaticCDict( - void* workspace, size_t workspaceSize, - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams) +static size_t +ZSTD_validateSequence(U32 offBase, U32 matchLength, U32 minMatch, + size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer) +{ + U32 const windowSize = 1u << windowLog; + /* posInSrc represents the amount of data the decoder would decode up to this point. + * As long as the amount of data decoded is less than or equal to window size, offsets may be + * larger than the total length of output decoded in order to reference the dict, even larger than + * window size. After output surpasses windowSize, we're limited to windowSize offsets again. + */ + size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; + size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4; + RETURN_ERROR_IF(offBase > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!"); + /* Validate maxNbSeq is large enough for the given matchLength and minMatch */ + RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch"); + return 0; +} + +/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ +static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) { - size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0); - size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize) - + HUF_WORKSPACE_SIZE + matchStateSize; - ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace; - void* ptr; - if ((size_t)workspace & 7) return NULL; /* 8-aligned */ - DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", - (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize)); - if (workspaceSize < neededSize) return NULL; + U32 offBase = OFFSET_TO_OFFBASE(rawOffset); - if (dictLoadMethod == ZSTD_dlm_byCopy) { - memcpy(cdict+1, dict, dictSize); - dict = cdict+1; - ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize; - } else { - ptr = cdict+1; + if (!ll0 && rawOffset == rep[0]) { + offBase = REPCODE1_TO_OFFBASE; + } else if (rawOffset == rep[1]) { + offBase = REPCODE_TO_OFFBASE(2 - ll0); + } else if (rawOffset == rep[2]) { + offBase = REPCODE_TO_OFFBASE(3 - ll0); + } else if (ll0 && rawOffset == rep[0] - 1) { + offBase = REPCODE3_TO_OFFBASE; } - cdict->workspace = ptr; - cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize; - - if (ZSTD_isError( ZSTD_initCDict_internal(cdict, - dict, dictSize, - ZSTD_dlm_byRef, dictContentType, - cParams) )) - return NULL; - - return cdict; + return offBase; } -ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) -{ - assert(cdict != NULL); - return cdict->matchState.cParams; -} +/* This function scans through an array of ZSTD_Sequence, + * storing the sequences it reads, until it reaches a block delimiter. + * Note that the block delimiter includes the last literals of the block. + * @blockSize must be == sum(sequence_lengths). + * @returns @blockSize on success, and a ZSTD_error otherwise. + */ +static size_t +ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx, + ZSTD_SequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, + ZSTD_ParamSwitch_e externalRepSearch) +{ + U32 idx = seqPos->idx; + U32 const startIdx = idx; + BYTE const* ip = (BYTE const*)(src); + const BYTE* const iend = ip + blockSize; + Repcodes_t updatedRepcodes; + U32 dictSize; + + DEBUGLOG(5, "ZSTD_transferSequences_wBlockDelim (blockSize = %zu)", blockSize); + + if (cctx->cdict) { + dictSize = (U32)cctx->cdict->dictContentSize; + } else if (cctx->prefixDict.dict) { + dictSize = (U32)cctx->prefixDict.dictSize; + } else { + dictSize = 0; + } + ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); + for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) { + U32 const litLength = inSeqs[idx].litLength; + U32 const matchLength = inSeqs[idx].matchLength; + U32 offBase; + + if (externalRepSearch == ZSTD_ps_disable) { + offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset); + } else { + U32 const ll0 = (litLength == 0); + offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + } -/* ZSTD_compressBegin_usingCDict_advanced() : - * cdict must be != NULL */ -size_t ZSTD_compressBegin_usingCDict_advanced( - ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, - ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced"); - RETURN_ERROR_IF(cdict==NULL, dictionary_wrong); - { ZSTD_CCtx_params params = cctx->requestedParams; - params.cParams = ZSTD_getCParamsFromCDict(cdict); - /* Increase window log to fit the entire dictionary and source if the - * source size is known. Limit the increase to 19, which is the - * window log for compression level 1 with the largest source size. - */ - if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) { - U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19); - U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; - params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog); + DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); + if (cctx->appliedParams.validateSequences) { + seqPos->posInSrc += litLength + matchLength; + FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, + seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, dictSize, + ZSTD_hasExtSeqProd(&cctx->appliedParams)), + "Sequence validation failed"); + } + RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, + "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); + ip += matchLength + litLength; + } + RETURN_ERROR_IF(idx == inSeqsSize, externalSequences_invalid, "Block delimiter not found."); + + /* If we skipped repcode search while parsing, we need to update repcodes now */ + assert(externalRepSearch != ZSTD_ps_auto); + assert(idx >= startIdx); + if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) { + U32* const rep = updatedRepcodes.rep; + U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */ + + if (lastSeqIdx >= startIdx + 2) { + rep[2] = inSeqs[lastSeqIdx - 2].offset; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } else if (lastSeqIdx == startIdx + 1) { + rep[2] = rep[0]; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } else { + assert(lastSeqIdx == startIdx); + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = inSeqs[lastSeqIdx].offset; } - params.fParams = fParams; - return ZSTD_compressBegin_internal(cctx, - NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, - cdict, - params, pledgedSrcSize, - ZSTDb_not_buffered); } -} -/* ZSTD_compressBegin_usingCDict() : - * pledgedSrcSize=0 means "unknown" - * if pledgedSrcSize>0, it will enable contentSizeFlag */ -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) -{ - ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; - DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag); - return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); -} + ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); -size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) -{ - FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */ - return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); + if (inSeqs[idx].litLength) { + DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength); + ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength); + ip += inSeqs[idx].litLength; + seqPos->posInSrc += inSeqs[idx].litLength; + } + RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!"); + seqPos->idx = idx+1; + return blockSize; } -/*! ZSTD_compress_usingCDict() : - * Compression using a digested Dictionary. - * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. - * Note that compression parameters are decided at CDict creation time - * while frame parameters are hardcoded */ -size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict) -{ - ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; - return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); -} +/* + * This function attempts to scan through @blockSize bytes in @src + * represented by the sequences in @inSeqs, + * storing any (partial) sequences. + * + * Occasionally, we may want to reduce the actual number of bytes consumed from @src + * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH. + * + * @returns the number of bytes consumed from @src, necessarily <= @blockSize. + * Otherwise, it may return a ZSTD error if something went wrong. + */ +static size_t +ZSTD_transferSequences_noDelim(ZSTD_CCtx* cctx, + ZSTD_SequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, + ZSTD_ParamSwitch_e externalRepSearch) +{ + U32 idx = seqPos->idx; + U32 startPosInSequence = seqPos->posInSequence; + U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize; + size_t dictSize; + const BYTE* const istart = (const BYTE*)(src); + const BYTE* ip = istart; + const BYTE* iend = istart + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */ + Repcodes_t updatedRepcodes; + U32 bytesAdjustment = 0; + U32 finalMatchSplit = 0; + + /* TODO(embg) support fast parsing mode in noBlockDelim mode */ + (void)externalRepSearch; + + if (cctx->cdict) { + dictSize = cctx->cdict->dictContentSize; + } else if (cctx->prefixDict.dict) { + dictSize = cctx->prefixDict.dictSize; + } else { + dictSize = 0; + } + DEBUGLOG(5, "ZSTD_transferSequences_noDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); + DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); + ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); + while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) { + const ZSTD_Sequence currSeq = inSeqs[idx]; + U32 litLength = currSeq.litLength; + U32 matchLength = currSeq.matchLength; + U32 const rawOffset = currSeq.offset; + U32 offBase; + + /* Modify the sequence depending on where endPosInSequence lies */ + if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) { + if (startPosInSequence >= litLength) { + startPosInSequence -= litLength; + litLength = 0; + matchLength -= startPosInSequence; + } else { + litLength -= startPosInSequence; + } + /* Move to the next sequence */ + endPosInSequence -= currSeq.litLength + currSeq.matchLength; + startPosInSequence = 0; + } else { + /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence + does not reach the end of the match. So, we have to split the sequence */ + DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u", + currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence); + if (endPosInSequence > litLength) { + U32 firstHalfMatchLength; + litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence; + firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength; + if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) { + /* Only ever split the match if it is larger than the block size */ + U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence; + if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) { + /* Move the endPosInSequence backward so that it creates match of minMatch length */ + endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; + bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; + firstHalfMatchLength -= bytesAdjustment; + } + matchLength = firstHalfMatchLength; + /* Flag that we split the last match - after storing the sequence, exit the loop, + but keep the value of endPosInSequence */ + finalMatchSplit = 1; + } else { + /* Move the position in sequence backwards so that we don't split match, and break to store + * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence + * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so + * would cause the first half of the match to be too small + */ + bytesAdjustment = endPosInSequence - currSeq.litLength; + endPosInSequence = currSeq.litLength; + break; + } + } else { + /* This sequence ends inside the literals, break to store the last literals */ + break; + } + } + /* Check if this offset can be represented with a repcode */ + { U32 const ll0 = (litLength == 0); + offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + } + if (cctx->appliedParams.validateSequences) { + seqPos->posInSrc += litLength + matchLength; + FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)), + "Sequence validation failed"); + } + DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); + RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, + "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); + ip += matchLength + litLength; + if (!finalMatchSplit) + idx++; /* Next Sequence */ + } + DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); + assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); + seqPos->idx = idx; + seqPos->posInSequence = endPosInSequence; + ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); + + iend -= bytesAdjustment; + if (ip != iend) { + /* Store any last literals */ + U32 const lastLLSize = (U32)(iend - ip); + assert(ip <= iend); + DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize); + ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize); + seqPos->posInSrc += lastLLSize; + } + return (size_t)(iend-istart); +} -/* ****************************************************************** -* Streaming -********************************************************************/ +/* @seqPos represents a position within @inSeqs, + * it is read and updated by this function, + * once the goal to produce a block of size @blockSize is reached. + * @return: nb of bytes consumed from @src, necessarily <= @blockSize. + */ +typedef size_t (*ZSTD_SequenceCopier_f)(ZSTD_CCtx* cctx, + ZSTD_SequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, + ZSTD_ParamSwitch_e externalRepSearch); -ZSTD_CStream* ZSTD_createCStream(void) +static ZSTD_SequenceCopier_f ZSTD_selectSequenceCopier(ZSTD_SequenceFormat_e mode) { - DEBUGLOG(3, "ZSTD_createCStream"); - return ZSTD_createCStream_advanced(ZSTD_defaultCMem); + assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, (int)mode)); + if (mode == ZSTD_sf_explicitBlockDelimiters) { + return ZSTD_transferSequences_wBlockDelim; + } + assert(mode == ZSTD_sf_noBlockDelimiters); + return ZSTD_transferSequences_noDelim; } -ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) -{ - return ZSTD_initStaticCCtx(workspace, workspaceSize); +/* Discover the size of next block by searching for the delimiter. + * Note that a block delimiter **must** exist in this mode, + * otherwise it's an input error. + * The block size retrieved will be later compared to ensure it remains within bounds */ +static size_t +blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_SequencePosition seqPos) +{ + int end = 0; + size_t blockSize = 0; + size_t spos = seqPos.idx; + DEBUGLOG(6, "blockSize_explicitDelimiter : seq %zu / %zu", spos, inSeqsSize); + assert(spos <= inSeqsSize); + while (spos < inSeqsSize) { + end = (inSeqs[spos].offset == 0); + blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength; + if (end) { + if (inSeqs[spos].matchLength != 0) + RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0"); + break; + } + spos++; + } + if (!end) + RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter"); + return blockSize; } -ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) -{ /* CStream and CCtx are now same object */ - return ZSTD_createCCtx_advanced(customMem); +static size_t determine_blockSize(ZSTD_SequenceFormat_e mode, + size_t blockSize, size_t remaining, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + ZSTD_SequencePosition seqPos) +{ + DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining); + if (mode == ZSTD_sf_noBlockDelimiters) { + /* Note: more a "target" block size */ + return MIN(remaining, blockSize); + } + assert(mode == ZSTD_sf_explicitBlockDelimiters); + { size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); + FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters"); + if (explicitBlockSize > blockSize) + RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block"); + if (explicitBlockSize > remaining) + RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source"); + return explicitBlockSize; + } } -size_t ZSTD_freeCStream(ZSTD_CStream* zcs) +/* Compress all provided sequences, block-by-block. + * + * Returns the cumulative size of all compressed blocks (including their headers), + * otherwise a ZSTD error. + */ +static size_t +ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + const void* src, size_t srcSize) { - return ZSTD_freeCCtx(zcs); /* same object */ -} + size_t cSize = 0; + size_t remaining = srcSize; + ZSTD_SequencePosition seqPos = {0, 0, 0}; + const BYTE* ip = (BYTE const*)src; + BYTE* op = (BYTE*)dst; + ZSTD_SequenceCopier_f const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); + DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize); + /* Special case: empty frame */ + if (remaining == 0) { + U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1); + RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header"); + MEM_writeLE32(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + cSize += ZSTD_blockHeaderSize; + } -/*====== Initialization ======*/ + while (remaining) { + size_t compressedSeqsSize; + size_t cBlockSize; + size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters, + cctx->blockSizeMax, remaining, + inSeqs, inSeqsSize, seqPos); + U32 const lastBlock = (blockSize == remaining); + FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size"); + assert(blockSize <= remaining); + ZSTD_resetSeqStore(&cctx->seqStore); + + blockSize = sequenceCopier(cctx, + &seqPos, inSeqs, inSeqsSize, + ip, blockSize, + cctx->appliedParams.searchForExternalRepcodes); + FORWARD_IF_ERROR(blockSize, "Bad sequence copy"); + + /* If blocks are too small, emit as a nocompress block */ + /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding + * additional 1. We need to revisit and change this logic to be more consistent */ + if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { + cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); + FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); + DEBUGLOG(5, "Block too small (%zu): data remains uncompressed: cSize=%zu", blockSize, cBlockSize); + cSize += cBlockSize; + ip += blockSize; + op += cBlockSize; + remaining -= blockSize; + dstCapacity -= cBlockSize; + continue; + } -size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block"); + compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore, + &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, + &cctx->appliedParams, + op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, + blockSize, + cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */, + cctx->bmi2); + FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); + DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); + + if (!cctx->isFirstBlock && + ZSTD_maybeRLE(&cctx->seqStore) && + ZSTD_isRLE(ip, blockSize)) { + /* Note: don't emit the first block as RLE even if it qualifies because + * doing so will cause the decoder (cli <= v1.4.3 only) to throw an (invalid) error + * "should consume all input error." + */ + compressedSeqsSize = 1; + } -size_t ZSTD_CStreamOutSize(void) -{ - return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; -} + if (compressedSeqsSize == 0) { + /* ZSTD_noCompressBlock writes the block header as well */ + cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); + FORWARD_IF_ERROR(cBlockSize, "ZSTD_noCompressBlock failed"); + DEBUGLOG(5, "Writing out nocompress block, size: %zu", cBlockSize); + } else if (compressedSeqsSize == 1) { + cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); + FORWARD_IF_ERROR(cBlockSize, "ZSTD_rleCompressBlock failed"); + DEBUGLOG(5, "Writing out RLE block, size: %zu", cBlockSize); + } else { + U32 cBlockHeader; + /* Error checking and repcodes update */ + ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); + if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + + /* Write block header into beginning of block*/ + cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); + MEM_writeLE24(op, cBlockHeader); + cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; + DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize); + } -static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx, - const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType, - const ZSTD_CDict* const cdict, - ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTD_resetCStream_internal"); - /* Finalize the compression parameters */ - params.cParams = ZSTD_getCParamsFromCCtxParams(¶ms, pledgedSrcSize, dictSize); - /* params are supposed to be fully validated at this point */ - assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); - assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + cSize += cBlockSize; - FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, - dict, dictSize, dictContentType, ZSTD_dtlm_fast, - cdict, - params, pledgedSrcSize, - ZSTDb_buffered) ); + if (lastBlock) { + break; + } else { + ip += blockSize; + op += cBlockSize; + remaining -= blockSize; + dstCapacity -= cBlockSize; + cctx->isFirstBlock = 0; + } + DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity); + } - cctx->inToCompress = 0; - cctx->inBuffPos = 0; - cctx->inBuffTarget = cctx->blockSize - + (cctx->blockSize == pledgedSrcSize); /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */ - cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; - cctx->streamStage = zcss_load; - cctx->frameEnded = 0; - return 0; /* ready to go */ + DEBUGLOG(4, "cSize final total: %zu", cSize); + return cSize; } -/* ZSTD_resetCStream(): - * pledgedSrcSize == 0 means "unknown" */ -size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss) +size_t ZSTD_compressSequences(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + const void* src, size_t srcSize) { - /* temporary : 0 interpreted as "unknown" during transition period. - * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. - * 0 will be interpreted as "empty" in the future. - */ - U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; - DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) ); - FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) ); - return 0; -} + BYTE* op = (BYTE*)dst; + size_t cSize = 0; -/*! ZSTD_initCStream_internal() : - * Note : for lib/compress only. Used by zstdmt_compress.c. - * Assumption 1 : params are valid - * Assumption 2 : either dict, or cdict, is defined, not both */ -size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, - const void* dict, size_t dictSize, const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, unsigned long long pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTD_initCStream_internal"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) ); - FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) ); - assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); - zcs->requestedParams = params; - assert(!((dict) && (cdict))); /* either dict or cdict, not both */ - if (dict) { - FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) ); - } else { - /* Dictionary is cleared if !cdict */ - FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) ); + /* Transparent initialization stage, same as compressStream2() */ + DEBUGLOG(4, "ZSTD_compressSequences (nbSeqs=%zu,dstCapacity=%zu)", inSeqsSize, dstCapacity); + assert(cctx != NULL); + FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed"); + + /* Begin writing output, starting with frame header */ + { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, + &cctx->appliedParams, srcSize, cctx->dictID); + op += frameHeaderSize; + assert(frameHeaderSize <= dstCapacity); + dstCapacity -= frameHeaderSize; + cSize += frameHeaderSize; + } + if (cctx->appliedParams.fParams.checksumFlag && srcSize) { + XXH64_update(&cctx->xxhState, src, srcSize); } - return 0; -} -/* ZSTD_initCStream_usingCDict_advanced() : - * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ -size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, - const ZSTD_CDict* cdict, - ZSTD_frameParameters fParams, - unsigned long long pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) ); - FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) ); - zcs->requestedParams.fParams = fParams; - FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) ); - return 0; -} + /* Now generate compressed blocks */ + { size_t const cBlocksSize = ZSTD_compressSequences_internal(cctx, + op, dstCapacity, + inSeqs, inSeqsSize, + src, srcSize); + FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!"); + cSize += cBlocksSize; + assert(cBlocksSize <= dstCapacity); + dstCapacity -= cBlocksSize; + } -/* note : cdict must outlive compression session */ -size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) -{ - DEBUGLOG(4, "ZSTD_initCStream_usingCDict"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) ); - FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) ); - return 0; + /* Complete with frame checksum, if needed */ + if (cctx->appliedParams.fParams.checksumFlag) { + U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); + RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); + DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum); + MEM_writeLE32((char*)dst + cSize, checksum); + cSize += 4; + } + + DEBUGLOG(4, "Final compressed size: %zu", cSize); + return cSize; } -/* ZSTD_initCStream_advanced() : - * pledgedSrcSize must be exact. - * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. - * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */ -size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, - const void* dict, size_t dictSize, - ZSTD_parameters params, unsigned long long pss) -{ - /* for compatibility with older programs relying on this behavior. - * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. - * This line will be removed in the future. - */ - U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; - DEBUGLOG(4, "ZSTD_initCStream_advanced"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) ); - FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) ); - FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) ); - zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params); - FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) ); - return 0; -} +#if defined(ZSTD_ARCH_X86_AVX2) -size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) -{ - DEBUGLOG(4, "ZSTD_initCStream_usingDict"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) ); - FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) ); - FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) ); - return 0; -} +#include /* AVX2 intrinsics */ -size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss) -{ - /* temporary : 0 interpreted as "unknown" during transition period. - * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. - * 0 will be interpreted as "empty" in the future. +/* + * Convert 2 sequences per iteration, using AVX2 intrinsics: + * - offset -> offBase = offset + 2 + * - litLength -> (U16) litLength + * - matchLength -> (U16)(matchLength - 3) + * - rep is ignored + * Store only 8 bytes per SeqDef (offBase[4], litLength[2], mlBase[2]). + * + * At the end, instead of extracting two __m128i, + * we use _mm256_permute4x64_epi64(..., 0xE8) to move lane2 into lane1, + * then store the lower 16 bytes in one go. + * + * @returns 0 on succes, with no long length detected + * @returns > 0 if there is one long length (> 65535), + * indicating the position, and type. + */ +size_t convertSequences_noRepcodes( + SeqDef* dstSeqs, + const ZSTD_Sequence* inSeqs, + size_t nbSequences) +{ + /* + * addition: + * For each 128-bit half: (offset+2, litLength+0, matchLength-3, rep+0) */ - U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; - DEBUGLOG(4, "ZSTD_initCStream_srcSize"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) ); - FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) ); - FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) ); - FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) ); - return 0; -} - -size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) -{ - DEBUGLOG(4, "ZSTD_initCStream"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) ); - FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) ); - FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) ); - return 0; -} + const __m256i addition = _mm256_setr_epi32( + ZSTD_REP_NUM, 0, -MINMATCH, 0, /* for sequence i */ + ZSTD_REP_NUM, 0, -MINMATCH, 0 /* for sequence i+1 */ + ); + + /* limit: check if there is a long length */ + const __m256i limit = _mm256_set1_epi32(65535); + + /* + * shuffle mask for byte-level rearrangement in each 128-bit half: + * + * Input layout (after addition) per 128-bit half: + * [ offset+2 (4 bytes) | litLength (4 bytes) | matchLength (4 bytes) | rep (4 bytes) ] + * We only need: + * offBase (4 bytes) = offset+2 + * litLength (2 bytes) = low 2 bytes of litLength + * mlBase (2 bytes) = low 2 bytes of (matchLength) + * => Bytes [0..3, 4..5, 8..9], zero the rest. + */ + const __m256i mask = _mm256_setr_epi8( + /* For the lower 128 bits => sequence i */ + 0, 1, 2, 3, /* offset+2 */ + 4, 5, /* litLength (16 bits) */ + 8, 9, /* matchLength (16 bits) */ + (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, + (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, + + /* For the upper 128 bits => sequence i+1 */ + 16,17,18,19, /* offset+2 */ + 20,21, /* litLength */ + 24,25, /* matchLength */ + (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, + (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80 + ); + + /* + * Next, we'll use _mm256_permute4x64_epi64(vshf, 0xE8). + * Explanation of 0xE8 = 11101000b => [lane0, lane2, lane2, lane3]. + * So the lower 128 bits become [lane0, lane2] => combining seq0 and seq1. + */ +#define PERM_LANE_0X_E8 0xE8 /* [0,2,2,3] in lane indices */ + + size_t longLen = 0, i = 0; + + /* AVX permutation depends on the specific definition of target structures */ + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); + + /* Process 2 sequences per loop iteration */ + for (; i + 1 < nbSequences; i += 2) { + /* Load 2 ZSTD_Sequence (32 bytes) */ + __m256i vin = _mm256_loadu_si256((const __m256i*)(const void*)&inSeqs[i]); + + /* Add {2, 0, -3, 0} in each 128-bit half */ + __m256i vadd = _mm256_add_epi32(vin, addition); + + /* Check for long length */ + __m256i ll_cmp = _mm256_cmpgt_epi32(vadd, limit); /* 0xFFFFFFFF for element > 65535 */ + int ll_res = _mm256_movemask_epi8(ll_cmp); + + /* Shuffle bytes so each half gives us the 8 bytes we need */ + __m256i vshf = _mm256_shuffle_epi8(vadd, mask); + /* + * Now: + * Lane0 = seq0's 8 bytes + * Lane1 = 0 + * Lane2 = seq1's 8 bytes + * Lane3 = 0 + */ -/*====== Compression ======*/ + /* Permute 64-bit lanes => move Lane2 down into Lane1. */ + __m256i vperm = _mm256_permute4x64_epi64(vshf, PERM_LANE_0X_E8); + /* + * Now the lower 16 bytes (Lane0+Lane1) = [seq0, seq1]. + * The upper 16 bytes are [Lane2, Lane3] = [seq1, 0], but we won't use them. + */ -static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) -{ - size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; - if (hintInSize==0) hintInSize = cctx->blockSize; - return hintInSize; -} + /* Store only the lower 16 bytes => 2 SeqDef (8 bytes each) */ + _mm_storeu_si128((__m128i *)(void*)&dstSeqs[i], _mm256_castsi256_si128(vperm)); + /* + * This writes out 16 bytes total: + * - offset 0..7 => seq0 (offBase, litLength, mlBase) + * - offset 8..15 => seq1 (offBase, litLength, mlBase) + */ -static size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, - const void* src, size_t srcSize) -{ - size_t const length = MIN(dstCapacity, srcSize); - if (length) memcpy(dst, src, length); - return length; -} + /* check (unlikely) long lengths > 65535 + * indices for lengths correspond to bits [4..7], [8..11], [20..23], [24..27] + * => combined mask = 0x0FF00FF0 + */ + if (UNLIKELY((ll_res & 0x0FF00FF0) != 0)) { + /* long length detected: let's figure out which one*/ + if (inSeqs[i].matchLength > 65535+MINMATCH) { + assert(longLen == 0); + longLen = i + 1; + } + if (inSeqs[i].litLength > 65535) { + assert(longLen == 0); + longLen = i + nbSequences + 1; + } + if (inSeqs[i+1].matchLength > 65535+MINMATCH) { + assert(longLen == 0); + longLen = i + 1 + 1; + } + if (inSeqs[i+1].litLength > 65535) { + assert(longLen == 0); + longLen = i + 1 + nbSequences + 1; + } + } + } -/** ZSTD_compressStream_generic(): - * internal function for all *compressStream*() variants - * non-static, because can be called from zstdmt_compress.c - * @return : hint size for next input */ -static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, - ZSTD_outBuffer* output, - ZSTD_inBuffer* input, - ZSTD_EndDirective const flushMode) -{ - const char* const istart = (const char*)input->src; - const char* const iend = istart + input->size; - const char* ip = istart + input->pos; - char* const ostart = (char*)output->dst; - char* const oend = ostart + output->size; - char* op = ostart + output->pos; - U32 someMoreWork = 1; + /* Handle leftover if @nbSequences is odd */ + if (i < nbSequences) { + /* process last sequence */ + assert(i == nbSequences - 1); + dstSeqs[i].offBase = OFFSET_TO_OFFBASE(inSeqs[i].offset); + dstSeqs[i].litLength = (U16)inSeqs[i].litLength; + dstSeqs[i].mlBase = (U16)(inSeqs[i].matchLength - MINMATCH); + /* check (unlikely) long lengths > 65535 */ + if (UNLIKELY(inSeqs[i].matchLength > 65535+MINMATCH)) { + assert(longLen == 0); + longLen = i + 1; + } + if (UNLIKELY(inSeqs[i].litLength > 65535)) { + assert(longLen == 0); + longLen = i + nbSequences + 1; + } + } - /* check expectations */ - DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode); - assert(zcs->inBuff != NULL); - assert(zcs->inBuffSize > 0); - assert(zcs->outBuff != NULL); - assert(zcs->outBuffSize > 0); - assert(output->pos <= output->size); - assert(input->pos <= input->size); + return longLen; +} - while (someMoreWork) { - switch(zcs->streamStage) +#elif defined (ZSTD_ARCH_RISCV_RVV) +#include +/* + * Convert `vl` sequences per iteration, using RVV intrinsics: + * - offset -> offBase = offset + 2 + * - litLength -> (U16) litLength + * - matchLength -> (U16)(matchLength - 3) + * - rep is ignored + * Store only 8 bytes per SeqDef (offBase[4], litLength[2], mlBase[2]). + * + * @returns 0 on succes, with no long length detected + * @returns > 0 if there is one long length (> 65535), + * indicating the position, and type. + */ +size_t convertSequences_noRepcodes(SeqDef* dstSeqs, const ZSTD_Sequence* inSeqs, size_t nbSequences) { + size_t longLen = 0; + size_t vl = 0; + typedef uint32_t __attribute__((may_alias)) aliased_u32; + /* RVV depends on the specific definition of target structures */ + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); + + for (size_t i = 0; i < nbSequences; i += vl) { + + vl = __riscv_vsetvl_e32m2(nbSequences-i); { - case zcss_init: - RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!"); - - case zcss_load: - if ( (flushMode == ZSTD_e_end) - && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip)) /* enough dstCapacity */ - && (zcs->inBuffPos == 0) ) { - /* shortcut to compression pass directly into output buffer */ - size_t const cSize = ZSTD_compressEnd(zcs, - op, oend-op, ip, iend-ip); - DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); - FORWARD_IF_ERROR(cSize); - ip = iend; - op += cSize; - zcs->frameEnded = 1; - ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - someMoreWork = 0; break; - } - /* complete loading into inBuffer */ - { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; - size_t const loaded = ZSTD_limitCopy( - zcs->inBuff + zcs->inBuffPos, toLoad, - ip, iend-ip); - zcs->inBuffPos += loaded; - ip += loaded; - if ( (flushMode == ZSTD_e_continue) - && (zcs->inBuffPos < zcs->inBuffTarget) ) { - /* not enough input to fill full block : stop here */ - someMoreWork = 0; break; - } - if ( (flushMode == ZSTD_e_flush) - && (zcs->inBuffPos == zcs->inToCompress) ) { - /* empty */ - someMoreWork = 0; break; - } - } - /* compress current block (note : this stage cannot be stopped in the middle) */ - DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); - { void* cDst; - size_t cSize; - size_t const iSize = zcs->inBuffPos - zcs->inToCompress; - size_t oSize = oend-op; - unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); - if (oSize >= ZSTD_compressBound(iSize)) - cDst = op; /* compress into output buffer, to skip flush stage */ - else - cDst = zcs->outBuff, oSize = zcs->outBuffSize; - cSize = lastBlock ? - ZSTD_compressEnd(zcs, cDst, oSize, - zcs->inBuff + zcs->inToCompress, iSize) : - ZSTD_compressContinue(zcs, cDst, oSize, - zcs->inBuff + zcs->inToCompress, iSize); - FORWARD_IF_ERROR(cSize); - zcs->frameEnded = lastBlock; - /* prepare next block */ - zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; - if (zcs->inBuffTarget > zcs->inBuffSize) - zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; - DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", - (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize); - if (!lastBlock) - assert(zcs->inBuffTarget <= zcs->inBuffSize); - zcs->inToCompress = zcs->inBuffPos; - if (cDst == op) { /* no need to flush */ - op += cSize; - if (zcs->frameEnded) { - DEBUGLOG(5, "Frame completed directly in outBuffer"); - someMoreWork = 0; - ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - } - break; - } - zcs->outBuffContentSize = cSize; - zcs->outBuffFlushedSize = 0; - zcs->streamStage = zcss_flush; /* pass-through to flush stage */ + // Loading structure member variables + vuint32m2x4_t v_tuple = __riscv_vlseg4e32_v_u32m2x4( + (const aliased_u32*)((const void*)&inSeqs[i]), + vl + ); + vuint32m2_t v_offset = __riscv_vget_v_u32m2x4_u32m2(v_tuple, 0); + vuint32m2_t v_lit = __riscv_vget_v_u32m2x4_u32m2(v_tuple, 1); + vuint32m2_t v_match = __riscv_vget_v_u32m2x4_u32m2(v_tuple, 2); + // offset + ZSTD_REP_NUM + vuint32m2_t v_offBase = __riscv_vadd_vx_u32m2(v_offset, ZSTD_REP_NUM, vl); + // Check for integer overflow + // Cast to a 16-bit variable + vbool16_t lit_overflow = __riscv_vmsgtu_vx_u32m2_b16(v_lit, 65535, vl); + vuint16m1_t v_lit_clamped = __riscv_vncvt_x_x_w_u16m1(v_lit, vl); + + vbool16_t ml_overflow = __riscv_vmsgtu_vx_u32m2_b16(v_match, 65535+MINMATCH, vl); + vuint16m1_t v_ml_clamped = __riscv_vncvt_x_x_w_u16m1(__riscv_vsub_vx_u32m2(v_match, MINMATCH, vl), vl); + + // Pack two 16-bit fields into a 32-bit value (little-endian) + // The lower 16 bits contain litLength, and the upper 16 bits contain mlBase + vuint32m2_t v_lit_ml_combined = __riscv_vsll_vx_u32m2( + __riscv_vwcvtu_x_x_v_u32m2(v_ml_clamped, vl), // Convert matchLength to 32-bit + 16, + vl + ); + v_lit_ml_combined = __riscv_vor_vv_u32m2( + v_lit_ml_combined, + __riscv_vwcvtu_x_x_v_u32m2(v_lit_clamped, vl), + vl + ); + { + // Create a vector of SeqDef structures + // Store the offBase, litLength, and mlBase in a vector of SeqDef + vuint32m2x2_t store_data = __riscv_vcreate_v_u32m2x2( + v_offBase, + v_lit_ml_combined + ); + __riscv_vsseg2e32_v_u32m2x2( + (aliased_u32*)((void*)&dstSeqs[i]), + store_data, + vl + ); } - /* fall-through */ - case zcss_flush: - DEBUGLOG(5, "flush stage"); - { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; - size_t const flushed = ZSTD_limitCopy(op, oend-op, - zcs->outBuff + zcs->outBuffFlushedSize, toFlush); - DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", - (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed); - op += flushed; - zcs->outBuffFlushedSize += flushed; - if (toFlush!=flushed) { - /* flush not fully completed, presumably because dst is too small */ - assert(op==oend); - someMoreWork = 0; - break; + { + // Find the first index where an overflow occurs + int first_ml = __riscv_vfirst_m_b16(ml_overflow, vl); + int first_lit = __riscv_vfirst_m_b16(lit_overflow, vl); + + if (UNLIKELY(first_ml != -1)) { + assert(longLen == 0); + longLen = i + first_ml + 1; } - zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; - if (zcs->frameEnded) { - DEBUGLOG(5, "Frame completed on flush"); - someMoreWork = 0; - ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - break; + if (UNLIKELY(first_lit != -1)) { + assert(longLen == 0); + longLen = i + first_lit + 1 + nbSequences; } - zcs->streamStage = zcss_load; - break; } + } + } + return longLen; +} - default: /* impossible */ - assert(0); +/* the vector implementation could also be ported to SSSE3, + * but since this implementation is targeting modern systems (>= Sapphire Rapid), + * it's not useful to develop and maintain code for older pre-AVX2 platforms */ + +#elif defined(ZSTD_ARCH_ARM_SVE2) + +/* + * Checks if any active element in a signed 8-bit integer vector is greater + * than zero. + * + * @param g Governing predicate selecting active lanes. + * @param a Input vector of signed 8-bit integers. + * + * @return True if any active element in `a` is > 0, false otherwise. + */ +FORCE_INLINE_TEMPLATE int cmpgtz_any_s8(svbool_t g, svint8_t a) +{ + svbool_t ptest = svcmpgt_n_s8(g, a, 0); + return svptest_any(ptest, ptest); +} + +size_t convertSequences_noRepcodes( + SeqDef* dstSeqs, + const ZSTD_Sequence* inSeqs, + size_t nbSequences) +{ + /* Process the input with `8 * VL / element` lanes. */ + const size_t lanes = 8 * svcntb() / sizeof(ZSTD_Sequence); + size_t longLen = 0; + size_t n = 0; + + /* SVE permutation depends on the specific definition of target structures. */ + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); + + if (nbSequences >= lanes) { + const svbool_t ptrue = svptrue_b8(); + /* 16-bit of {ZSTD_REP_NUM, 0, -MINMATCH, 0} extended to 32-bit lanes. */ + const svuint32_t vaddition = svreinterpret_u32( + svunpklo_s32(svreinterpret_s16(svdup_n_u64(ZSTD_REP_NUM | (((U64)(U16)-MINMATCH) << 32))))); + /* For permutation of 16-bit units: 0, 1, 2, 4, 8, 9, 10, 12, ... */ + const svuint16_t vmask = svreinterpret_u16( + svindex_u64(0x0004000200010000, 0x0008000800080008)); + /* Upper bytes of `litLength` and `matchLength` will be packed into the + * middle of overflow check vector. */ + const svbool_t pmid = svcmpne_n_u8( + ptrue, svreinterpret_u8(svdup_n_u64(0x0000FFFFFFFF0000)), 0); + + do { + /* Load `lanes` number of `ZSTD_Sequence` into 8 vectors. */ + const svuint32_t vin0 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 0); + const svuint32_t vin1 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 1); + const svuint32_t vin2 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 2); + const svuint32_t vin3 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 3); + const svuint32_t vin4 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 4); + const svuint32_t vin5 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 5); + const svuint32_t vin6 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 6); + const svuint32_t vin7 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 7); + + /* Add {ZSTD_REP_NUM, 0, -MINMATCH, 0} to each structures. */ + const svuint16x2_t vadd01 = svcreate2_u16( + svreinterpret_u16(svadd_u32_x(ptrue, vin0, vaddition)), + svreinterpret_u16(svadd_u32_x(ptrue, vin1, vaddition))); + const svuint16x2_t vadd23 = svcreate2_u16( + svreinterpret_u16(svadd_u32_x(ptrue, vin2, vaddition)), + svreinterpret_u16(svadd_u32_x(ptrue, vin3, vaddition))); + const svuint16x2_t vadd45 = svcreate2_u16( + svreinterpret_u16(svadd_u32_x(ptrue, vin4, vaddition)), + svreinterpret_u16(svadd_u32_x(ptrue, vin5, vaddition))); + const svuint16x2_t vadd67 = svcreate2_u16( + svreinterpret_u16(svadd_u32_x(ptrue, vin6, vaddition)), + svreinterpret_u16(svadd_u32_x(ptrue, vin7, vaddition))); + + /* Shuffle and pack bytes so each vector contains SeqDef structures. */ + const svuint16_t vout01 = svtbl2_u16(vadd01, vmask); + const svuint16_t vout23 = svtbl2_u16(vadd23, vmask); + const svuint16_t vout45 = svtbl2_u16(vadd45, vmask); + const svuint16_t vout67 = svtbl2_u16(vadd67, vmask); + + /* Pack the upper 16-bits of 32-bit lanes for overflow check. */ + const svuint16_t voverflow01 = svuzp2_u16(svget2_u16(vadd01, 0), + svget2_u16(vadd01, 1)); + const svuint16_t voverflow23 = svuzp2_u16(svget2_u16(vadd23, 0), + svget2_u16(vadd23, 1)); + const svuint16_t voverflow45 = svuzp2_u16(svget2_u16(vadd45, 0), + svget2_u16(vadd45, 1)); + const svuint16_t voverflow67 = svuzp2_u16(svget2_u16(vadd67, 0), + svget2_u16(vadd67, 1)); + + /* We don't need the whole 16 bits of the overflow part. Only 1 bit + * is needed, so we pack tightly and merge multiple vectors to be + * able to use a single comparison to handle the overflow case. + * However, we also need to handle the possible negative values of + * matchLength parts, so we use signed comparison later. */ + const svint8_t voverflow = + svmax_s8_x(pmid, + svtrn1_s8(svreinterpret_s8(voverflow01), + svreinterpret_s8(voverflow23)), + svtrn1_s8(svreinterpret_s8(voverflow45), + svreinterpret_s8(voverflow67))); + + /* Store `lanes` number of `SeqDef` structures from 4 vectors. */ + svst1_vnum_u32(ptrue, &dstSeqs[n].offBase, 0, svreinterpret_u32(vout01)); + svst1_vnum_u32(ptrue, &dstSeqs[n].offBase, 1, svreinterpret_u32(vout23)); + svst1_vnum_u32(ptrue, &dstSeqs[n].offBase, 2, svreinterpret_u32(vout45)); + svst1_vnum_u32(ptrue, &dstSeqs[n].offBase, 3, svreinterpret_u32(vout67)); + + /* Check if any enabled lanes of the overflow vector is larger than + * zero, only one such may happen. */ + if (UNLIKELY(cmpgtz_any_s8(pmid, voverflow))) { + /* Scalar search for long match is needed because we merged + * multiple overflow bytes with `max`. */ + size_t i; + for (i = n; i < n + lanes; i++) { + if (inSeqs[i].matchLength > 65535 + MINMATCH) { + assert(longLen == 0); + longLen = i + 1; + } + if (inSeqs[i].litLength > 65535) { + assert(longLen == 0); + longLen = i + nbSequences + 1; + } + } + } + + n += lanes; + } while(n <= nbSequences - lanes); + } + + /* Handle remaining elements. */ + for (; n < nbSequences; n++) { + dstSeqs[n].offBase = OFFSET_TO_OFFBASE(inSeqs[n].offset); + dstSeqs[n].litLength = (U16)inSeqs[n].litLength; + dstSeqs[n].mlBase = (U16)(inSeqs[n].matchLength - MINMATCH); + /* Check for long length > 65535. */ + if (UNLIKELY(inSeqs[n].matchLength > 65535 + MINMATCH)) { + assert(longLen == 0); + longLen = n + 1; + } + if (UNLIKELY(inSeqs[n].litLength > 65535)) { + assert(longLen == 0); + longLen = n + nbSequences + 1; } } + return longLen; +} + +#elif defined(ZSTD_ARCH_ARM_NEON) && (defined(__aarch64__) || defined(_M_ARM64)) + +size_t convertSequences_noRepcodes( + SeqDef* dstSeqs, + const ZSTD_Sequence* inSeqs, + size_t nbSequences) +{ + size_t longLen = 0; + size_t n = 0; + + /* Neon permutation depends on the specific definition of target structures. */ + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); + + if (nbSequences > 3) { + static const ZSTD_ALIGNED(16) U32 constAddition[4] = { + ZSTD_REP_NUM, 0, -MINMATCH, 0 + }; + static const ZSTD_ALIGNED(16) U8 constMask[16] = { + 0, 1, 2, 3, 4, 5, 8, 9, 16, 17, 18, 19, 20, 21, 24, 25 + }; + static const ZSTD_ALIGNED(16) U16 constCounter[8] = { + 1, 1, 1, 1, 2, 2, 2, 2 + }; + + const uint32x4_t vaddition = vld1q_u32(constAddition); + const uint8x16_t vmask = vld1q_u8(constMask); + uint16x8_t vcounter = vld1q_u16(constCounter); + uint16x8_t vindex01 = vdupq_n_u16(0); + uint16x8_t vindex23 = vdupq_n_u16(0); - input->pos = ip - istart; - output->pos = op - ostart; - if (zcs->frameEnded) return 0; - return ZSTD_nextInputSizeHint(zcs); -} + do { + /* Load 4 ZSTD_Sequence (64 bytes). */ + const uint32x4_t vin0 = vld1q_u32(&inSeqs[n + 0].offset); + const uint32x4_t vin1 = vld1q_u32(&inSeqs[n + 1].offset); + const uint32x4_t vin2 = vld1q_u32(&inSeqs[n + 2].offset); + const uint32x4_t vin3 = vld1q_u32(&inSeqs[n + 3].offset); + + /* Add {ZSTD_REP_NUM, 0, -MINMATCH, 0} to each vector. */ + const uint8x16x2_t vadd01 = { { + vreinterpretq_u8_u32(vaddq_u32(vin0, vaddition)), + vreinterpretq_u8_u32(vaddq_u32(vin1, vaddition)), + } }; + const uint8x16x2_t vadd23 = { { + vreinterpretq_u8_u32(vaddq_u32(vin2, vaddition)), + vreinterpretq_u8_u32(vaddq_u32(vin3, vaddition)), + } }; + + /* Shuffle and pack bytes so each vector contains 2 SeqDef structures. */ + const uint8x16_t vout01 = vqtbl2q_u8(vadd01, vmask); + const uint8x16_t vout23 = vqtbl2q_u8(vadd23, vmask); + + /* Pack the upper 16-bits of 32-bit lanes for overflow check. */ + uint16x8_t voverflow01 = vuzp2q_u16(vreinterpretq_u16_u8(vadd01.val[0]), + vreinterpretq_u16_u8(vadd01.val[1])); + uint16x8_t voverflow23 = vuzp2q_u16(vreinterpretq_u16_u8(vadd23.val[0]), + vreinterpretq_u16_u8(vadd23.val[1])); + + /* Store 4 SeqDef structures. */ + vst1q_u32(&dstSeqs[n + 0].offBase, vreinterpretq_u32_u8(vout01)); + vst1q_u32(&dstSeqs[n + 2].offBase, vreinterpretq_u32_u8(vout23)); + + /* Create masks in case of overflow. */ + voverflow01 = vcgtzq_s16(vreinterpretq_s16_u16(voverflow01)); + voverflow23 = vcgtzq_s16(vreinterpretq_s16_u16(voverflow23)); + + /* Update overflow indices. */ + vindex01 = vbslq_u16(voverflow01, vcounter, vindex01); + vindex23 = vbslq_u16(voverflow23, vcounter, vindex23); + + /* Update counter for overflow check. */ + vcounter = vaddq_u16(vcounter, vdupq_n_u16(4)); + + n += 4; + } while(n < nbSequences - 3); + + /* Fixup indices in the second vector, we saved an additional counter + in the loop to update the second overflow index, we need to add 2 + here when the indices are not 0. */ + { uint16x8_t nonzero = vtstq_u16(vindex23, vindex23); + vindex23 = vsubq_u16(vindex23, nonzero); + vindex23 = vsubq_u16(vindex23, nonzero); + } -static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx) -{ -#ifdef ZSTD_MULTITHREAD - if (cctx->appliedParams.nbWorkers >= 1) { - assert(cctx->mtctx != NULL); - return ZSTDMT_nextInputSizeHint(cctx->mtctx); + /* Merge indices in the vectors, maximums are needed. */ + vindex01 = vmaxq_u16(vindex01, vindex23); + vindex01 = vmaxq_u16(vindex01, vextq_u16(vindex01, vindex01, 4)); + + /* Compute `longLen`, maximums of matchLength and litLength + with a preference on litLength. */ + { U64 maxLitMatchIndices = vgetq_lane_u64(vreinterpretq_u64_u16(vindex01), 0); + size_t maxLitIndex = (maxLitMatchIndices >> 16) & 0xFFFF; + size_t maxMatchIndex = (maxLitMatchIndices >> 32) & 0xFFFF; + longLen = maxLitIndex > maxMatchIndex ? maxLitIndex + nbSequences + : maxMatchIndex; + } + } + + /* Handle remaining elements. */ + for (; n < nbSequences; n++) { + dstSeqs[n].offBase = OFFSET_TO_OFFBASE(inSeqs[n].offset); + dstSeqs[n].litLength = (U16)inSeqs[n].litLength; + dstSeqs[n].mlBase = (U16)(inSeqs[n].matchLength - MINMATCH); + /* Check for long length > 65535. */ + if (UNLIKELY(inSeqs[n].matchLength > 65535 + MINMATCH)) { + assert(longLen == 0); + longLen = n + 1; + } + if (UNLIKELY(inSeqs[n].litLength > 65535)) { + assert(longLen == 0); + longLen = n + nbSequences + 1; + } + } + return longLen; +} + +#else /* No vectorization. */ + +size_t convertSequences_noRepcodes( + SeqDef* dstSeqs, + const ZSTD_Sequence* inSeqs, + size_t nbSequences) +{ + size_t longLen = 0; + size_t n; + for (n=0; n 65535. */ + if (UNLIKELY(inSeqs[n].matchLength > 65535+MINMATCH)) { + assert(longLen == 0); + longLen = n + 1; + } + if (UNLIKELY(inSeqs[n].litLength > 65535)) { + assert(longLen == 0); + longLen = n + nbSequences + 1; + } } + return longLen; +} + #endif - return ZSTD_nextInputSizeHint(cctx); +/* + * Precondition: Sequences must end on an explicit Block Delimiter + * @return: 0 on success, or an error code. + * Note: Sequence validation functionality has been disabled (removed). + * This is helpful to generate a lean main pipeline, improving performance. + * It may be re-inserted later. + */ +size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx, + const ZSTD_Sequence* const inSeqs, size_t nbSequences, + int repcodeResolution) +{ + Repcodes_t updatedRepcodes; + size_t seqNb = 0; + + DEBUGLOG(5, "ZSTD_convertBlockSequences (nbSequences = %zu)", nbSequences); + + RETURN_ERROR_IF(nbSequences >= cctx->seqStore.maxNbSeq, externalSequences_invalid, + "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); + + ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); + + /* check end condition */ + assert(nbSequences >= 1); + assert(inSeqs[nbSequences-1].matchLength == 0); + assert(inSeqs[nbSequences-1].offset == 0); + + /* Convert Sequences from public format to internal format */ + if (!repcodeResolution) { + size_t const longl = convertSequences_noRepcodes(cctx->seqStore.sequencesStart, inSeqs, nbSequences-1); + cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences-1; + if (longl) { + DEBUGLOG(5, "long length"); + assert(cctx->seqStore.longLengthType == ZSTD_llt_none); + if (longl <= nbSequences-1) { + DEBUGLOG(5, "long match length detected at pos %zu", longl-1); + cctx->seqStore.longLengthType = ZSTD_llt_matchLength; + cctx->seqStore.longLengthPos = (U32)(longl-1); + } else { + DEBUGLOG(5, "long literals length detected at pos %zu", longl-nbSequences); + assert(longl <= 2* (nbSequences-1)); + cctx->seqStore.longLengthType = ZSTD_llt_literalLength; + cctx->seqStore.longLengthPos = (U32)(longl-(nbSequences-1)-1); + } + } + } else { + for (seqNb = 0; seqNb < nbSequences - 1 ; seqNb++) { + U32 const litLength = inSeqs[seqNb].litLength; + U32 const matchLength = inSeqs[seqNb].matchLength; + U32 const ll0 = (litLength == 0); + U32 const offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0); + + DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); + ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + } + } + + /* If we skipped repcode search while parsing, we need to update repcodes now */ + if (!repcodeResolution && nbSequences > 1) { + U32* const rep = updatedRepcodes.rep; + + if (nbSequences >= 4) { + U32 lastSeqIdx = (U32)nbSequences - 2; /* index of last full sequence */ + rep[2] = inSeqs[lastSeqIdx - 2].offset; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } else if (nbSequences == 3) { + rep[2] = rep[0]; + rep[1] = inSeqs[0].offset; + rep[0] = inSeqs[1].offset; + } else { + assert(nbSequences == 2); + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = inSeqs[0].offset; + } + } + + ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); + + return 0; } -size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) -{ - FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) ); - return ZSTD_nextInputSizeHint_MTorST(zcs); +#if defined(ZSTD_ARCH_X86_AVX2) + +BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) +{ + size_t i; + __m256i const zeroVec = _mm256_setzero_si256(); + __m256i sumVec = zeroVec; /* accumulates match+lit in 32-bit lanes */ + ZSTD_ALIGNED(32) U32 tmp[8]; /* temporary buffer for reduction */ + size_t mSum = 0, lSum = 0; + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + + /* Process 2 structs (32 bytes) at a time */ + for (i = 0; i + 2 <= nbSeqs; i += 2) { + /* Load two consecutive ZSTD_Sequence (8×4 = 32 bytes) */ + __m256i data = _mm256_loadu_si256((const __m256i*)(const void*)&seqs[i]); + /* check end of block signal */ + __m256i cmp = _mm256_cmpeq_epi32(data, zeroVec); + int cmp_res = _mm256_movemask_epi8(cmp); + /* indices for match lengths correspond to bits [8..11], [24..27] + * => combined mask = 0x0F000F00 */ + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + if (cmp_res & 0x0F000F00) break; + /* Accumulate in sumVec */ + sumVec = _mm256_add_epi32(sumVec, data); + } + + /* Horizontal reduction */ + _mm256_store_si256((__m256i*)tmp, sumVec); + lSum = tmp[1] + tmp[5]; + mSum = tmp[2] + tmp[6]; + + /* Handle the leftover */ + for (; i < nbSeqs; i++) { + lSum += seqs[i].litLength; + mSum += seqs[i].matchLength; + if (seqs[i].matchLength == 0) break; /* end of block */ + } + + if (i==nbSeqs) { + /* reaching end of sequences: end of block signal was not present */ + BlockSummary bs; + bs.nbSequences = ERROR(externalSequences_invalid); + return bs; + } + { BlockSummary bs; + bs.nbSequences = i+1; + bs.blockSize = lSum + mSum; + bs.litSize = lSum; + return bs; + } } +#elif defined (ZSTD_ARCH_RISCV_RVV) + +BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) +{ + size_t totalMatchSize = 0; + size_t litSize = 0; + size_t i = 0; + int found_terminator = 0; + size_t vl_max = __riscv_vsetvlmax_e32m1(); + typedef uint32_t __attribute__((may_alias)) aliased_u32; + vuint32m1_t v_lit_sum = __riscv_vmv_v_x_u32m1(0, vl_max); + vuint32m1_t v_match_sum = __riscv_vmv_v_x_u32m1(0, vl_max); + + for (; i < nbSeqs; ) { + size_t vl = __riscv_vsetvl_e32m2(nbSeqs - i); + + vuint32m2x4_t v_tuple = __riscv_vlseg4e32_v_u32m2x4( + (const aliased_u32*)((const void*)&seqs[i]), + vl + ); + vuint32m2_t v_lit = __riscv_vget_v_u32m2x4_u32m2(v_tuple, 1); + vuint32m2_t v_match = __riscv_vget_v_u32m2x4_u32m2(v_tuple, 2); + + // Check if any element has a matchLength of 0 + vbool16_t mask = __riscv_vmseq_vx_u32m2_b16(v_match, 0, vl); + int first_zero = __riscv_vfirst_m_b16(mask, vl); + + if (first_zero >= 0) { + // Find the first zero byte and set the effective length to that index + 1 to + // recompute the cumulative vector length of literals and matches + vl = first_zero + 1; + + // recompute the cumulative vector length of literals and matches + v_lit_sum = __riscv_vredsum_vs_u32m2_u32m1(__riscv_vslidedown_vx_u32m2(v_lit, 0, vl), v_lit_sum, vl); + v_match_sum = __riscv_vredsum_vs_u32m2_u32m1(__riscv_vslidedown_vx_u32m2(v_match, 0, vl), v_match_sum, vl); + + i += vl; + found_terminator = 1; + assert(seqs[i - 1].offset == 0); + break; + } else { + + v_lit_sum = __riscv_vredsum_vs_u32m2_u32m1(v_lit, v_lit_sum, vl); + v_match_sum = __riscv_vredsum_vs_u32m2_u32m1(v_match, v_match_sum, vl); + i += vl; + } + } + litSize = __riscv_vmv_x_s_u32m1_u32(v_lit_sum); + totalMatchSize = __riscv_vmv_x_s_u32m1_u32(v_match_sum); -size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, - ZSTD_outBuffer* output, - ZSTD_inBuffer* input, - ZSTD_EndDirective endOp) + if (!found_terminator && i==nbSeqs) { + BlockSummary bs; + bs.nbSequences = ERROR(externalSequences_invalid); + return bs; + } + { BlockSummary bs; + bs.nbSequences = i; + bs.blockSize = litSize + totalMatchSize; + bs.litSize = litSize; + return bs; + } +} + +#else + +/* + * The function assumes `litMatchLength` is a packed 64-bit value where the + * lower 32 bits represent the match length. The check varies based on the + * system's endianness: + * - On little-endian systems, it verifies if the entire 64-bit value is at most + * 0xFFFFFFFF, indicating the match length (lower 32 bits) is zero. + * - On big-endian systems, it directly checks if the lower 32 bits are zero. + * + * @returns 1 if the match length is zero, 0 otherwise. + */ +FORCE_INLINE_TEMPLATE int matchLengthHalfIsZero(U64 litMatchLength) { - DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp); - /* check conditions */ - RETURN_ERROR_IF(output->pos > output->size, GENERIC); - RETURN_ERROR_IF(input->pos > input->size, GENERIC); - assert(cctx!=NULL); + if (MEM_isLittleEndian()) { + return litMatchLength <= 0xFFFFFFFFULL; + } else { + return (U32)litMatchLength == 0; + } +} - /* transparent initialization stage */ - if (cctx->streamStage == zcss_init) { - ZSTD_CCtx_params params = cctx->requestedParams; - ZSTD_prefixDict const prefixDict = cctx->prefixDict; - FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) ); /* Init the local dict if present. */ - memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ - assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ - DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); - if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1; /* auto-fix pledgedSrcSize */ - params.cParams = ZSTD_getCParamsFromCCtxParams( - &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/); +BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) +{ + /* Use multiple accumulators for efficient use of wide out-of-order machines. */ + U64 litMatchSize0 = 0; + U64 litMatchSize1 = 0; + U64 litMatchSize2 = 0; + U64 litMatchSize3 = 0; + size_t n = 0; + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) + 4 == offsetof(ZSTD_Sequence, matchLength)); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) + 4 == offsetof(ZSTD_Sequence, rep)); + assert(seqs); -#ifdef ZSTD_MULTITHREAD - if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { - params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ - } - if (params.nbWorkers > 0) { - /* mt context creation */ - if (cctx->mtctx == NULL) { - DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u", - params.nbWorkers); - cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem); - RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation); + if (nbSeqs > 3) { + /* Process the input in 4 independent streams to reach high throughput. */ + do { + /* Load `litLength` and `matchLength` as a packed `U64`. It is safe + * to use 64-bit unsigned arithmetic here because the sum of `litLength` + * and `matchLength` cannot exceed the block size, so the 32-bit + * subparts will never overflow. */ + U64 litMatchLength = MEM_read64(&seqs[n].litLength); + litMatchSize0 += litMatchLength; + if (matchLengthHalfIsZero(litMatchLength)) { + assert(seqs[n].offset == 0); + goto _out; } - /* mt compression */ - DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers); - FORWARD_IF_ERROR( ZSTDMT_initCStream_internal( - cctx->mtctx, - prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent, - cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) ); - cctx->streamStage = zcss_load; - cctx->appliedParams.nbWorkers = params.nbWorkers; - } else -#endif - { FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx, - prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, - cctx->cdict, - params, cctx->pledgedSrcSizePlusOne-1) ); - assert(cctx->streamStage == zcss_load); - assert(cctx->appliedParams.nbWorkers == 0); - } } - /* end of transparent initialization stage */ - /* compression stage */ -#ifdef ZSTD_MULTITHREAD - if (cctx->appliedParams.nbWorkers > 0) { - int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end); - size_t flushMin; - assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */); - if (cctx->cParamsChanged) { - ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); - cctx->cParamsChanged = 0; - } - do { - flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); - if ( ZSTD_isError(flushMin) - || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */ - ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); + litMatchLength = MEM_read64(&seqs[n + 1].litLength); + litMatchSize1 += litMatchLength; + if (matchLengthHalfIsZero(litMatchLength)) { + n += 1; + assert(seqs[n].offset == 0); + goto _out; } - FORWARD_IF_ERROR(flushMin); - } while (forceMaxProgress && flushMin != 0 && output->pos < output->size); - DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic"); - /* Either we don't require maximum forward progress, we've finished the - * flush, or we are out of output space. - */ - assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size); - return flushMin; + + litMatchLength = MEM_read64(&seqs[n + 2].litLength); + litMatchSize2 += litMatchLength; + if (matchLengthHalfIsZero(litMatchLength)) { + n += 2; + assert(seqs[n].offset == 0); + goto _out; + } + + litMatchLength = MEM_read64(&seqs[n + 3].litLength); + litMatchSize3 += litMatchLength; + if (matchLengthHalfIsZero(litMatchLength)) { + n += 3; + assert(seqs[n].offset == 0); + goto _out; + } + + n += 4; + } while(n < nbSeqs - 3); + } + + for (; n < nbSeqs; n++) { + U64 litMatchLength = MEM_read64(&seqs[n].litLength); + litMatchSize0 += litMatchLength; + if (matchLengthHalfIsZero(litMatchLength)) { + assert(seqs[n].offset == 0); + goto _out; + } + } + /* At this point n == nbSeqs, so no end terminator. */ + { BlockSummary bs; + bs.nbSequences = ERROR(externalSequences_invalid); + return bs; + } +_out: + litMatchSize0 += litMatchSize1 + litMatchSize2 + litMatchSize3; + { BlockSummary bs; + bs.nbSequences = n + 1; + if (MEM_isLittleEndian()) { + bs.litSize = (U32)litMatchSize0; + bs.blockSize = bs.litSize + (litMatchSize0 >> 32); + } else { + bs.litSize = litMatchSize0 >> 32; + bs.blockSize = bs.litSize + (U32)litMatchSize0; + } + return bs; } -#endif - FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) ); - DEBUGLOG(5, "completed ZSTD_compressStream2"); - return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ } +#endif -size_t ZSTD_compressStream2_simpleArgs ( - ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, size_t* dstPos, - const void* src, size_t srcSize, size_t* srcPos, - ZSTD_EndDirective endOp) + +static size_t +ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t nbSequences, + const void* literals, size_t litSize, size_t srcSize) { - ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; - ZSTD_inBuffer input = { src, srcSize, *srcPos }; - /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ - size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; + size_t remaining = srcSize; + size_t cSize = 0; + BYTE* op = (BYTE*)dst; + int const repcodeResolution = (cctx->appliedParams.searchForExternalRepcodes == ZSTD_ps_enable); + assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_ps_auto); + + DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals_internal: nbSeqs=%zu, litSize=%zu", nbSequences, litSize); + RETURN_ERROR_IF(nbSequences == 0, externalSequences_invalid, "Requires at least 1 end-of-block"); + + /* Special case: empty frame */ + if ((nbSequences == 1) && (inSeqs[0].litLength == 0)) { + U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1); + RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "No room for empty frame block header"); + MEM_writeLE24(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + cSize += ZSTD_blockHeaderSize; + } + + while (nbSequences) { + size_t compressedSeqsSize, cBlockSize, conversionStatus; + BlockSummary const block = ZSTD_get1BlockSummary(inSeqs, nbSequences); + U32 const lastBlock = (block.nbSequences == nbSequences); + FORWARD_IF_ERROR(block.nbSequences, "Error while trying to determine nb of sequences for a block"); + assert(block.nbSequences <= nbSequences); + RETURN_ERROR_IF(block.litSize > litSize, externalSequences_invalid, "discrepancy: Sequences require more literals than present in buffer"); + ZSTD_resetSeqStore(&cctx->seqStore); + + conversionStatus = ZSTD_convertBlockSequences(cctx, + inSeqs, block.nbSequences, + repcodeResolution); + FORWARD_IF_ERROR(conversionStatus, "Bad sequence conversion"); + inSeqs += block.nbSequences; + nbSequences -= block.nbSequences; + remaining -= block.blockSize; + + /* Note: when blockSize is very small, other variant send it uncompressed. + * Here, we still send the sequences, because we don't have the original source to send it uncompressed. + * One could imagine in theory reproducing the source from the sequences, + * but that's complex and costly memory intensive, and goes against the objectives of this variant. */ + + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block"); + + compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal( + op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, + literals, block.litSize, + &cctx->seqStore, + &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, + &cctx->appliedParams, + cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */, + cctx->bmi2); + FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); + /* note: the spec forbids for any compressed block to be larger than maximum block size */ + if (compressedSeqsSize > cctx->blockSizeMax) compressedSeqsSize = 0; + DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); + litSize -= block.litSize; + literals = (const char*)literals + block.litSize; + + /* Note: difficult to check source for RLE block when only Literals are provided, + * but it could be considered from analyzing the sequence directly */ + + if (compressedSeqsSize == 0) { + /* Sending uncompressed blocks is out of reach, because the source is not provided. + * In theory, one could use the sequences to regenerate the source, like a decompressor, + * but it's complex, and memory hungry, killing the purpose of this variant. + * Current outcome: generate an error code. + */ + RETURN_ERROR(cannotProduce_uncompressedBlock, "ZSTD_compressSequencesAndLiterals cannot generate an uncompressed block"); + } else { + U32 cBlockHeader; + assert(compressedSeqsSize > 1); /* no RLE */ + /* Error checking and repcodes update */ + ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); + if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + + /* Write block header into beginning of block*/ + cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); + MEM_writeLE24(op, cBlockHeader); + cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; + DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize); + } + + cSize += cBlockSize; + op += cBlockSize; + dstCapacity -= cBlockSize; + cctx->isFirstBlock = 0; + DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity); + + if (lastBlock) { + assert(nbSequences == 0); + break; + } + } + + RETURN_ERROR_IF(litSize != 0, externalSequences_invalid, "literals must be entirely and exactly consumed"); + RETURN_ERROR_IF(remaining != 0, externalSequences_invalid, "Sequences must represent a total of exactly srcSize=%zu", srcSize); + DEBUGLOG(4, "cSize final total: %zu", cSize); + return cSize; } -size_t ZSTD_compress2(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) +size_t +ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + const void* literals, size_t litSize, size_t litCapacity, + size_t decompressedSize) { - ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); - { size_t oPos = 0; - size_t iPos = 0; - size_t const result = ZSTD_compressStream2_simpleArgs(cctx, - dst, dstCapacity, &oPos, - src, srcSize, &iPos, - ZSTD_e_end); - FORWARD_IF_ERROR(result); - if (result != 0) { /* compression not completed, due to lack of output space */ - assert(oPos == dstCapacity); - RETURN_ERROR(dstSize_tooSmall); - } - assert(iPos == srcSize); /* all input is expected consumed */ - return oPos; + BYTE* op = (BYTE*)dst; + size_t cSize = 0; + + /* Transparent initialization stage, same as compressStream2() */ + DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals (dstCapacity=%zu)", dstCapacity); + assert(cctx != NULL); + if (litCapacity < litSize) { + RETURN_ERROR(workSpace_tooSmall, "literals buffer is not large enough: must be at least 8 bytes larger than litSize (risk of read out-of-bound)"); + } + FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, decompressedSize), "CCtx initialization failed"); + + if (cctx->appliedParams.blockDelimiters == ZSTD_sf_noBlockDelimiters) { + RETURN_ERROR(frameParameter_unsupported, "This mode is only compatible with explicit delimiters"); } + if (cctx->appliedParams.validateSequences) { + RETURN_ERROR(parameter_unsupported, "This mode is not compatible with Sequence validation"); + } + if (cctx->appliedParams.fParams.checksumFlag) { + RETURN_ERROR(frameParameter_unsupported, "this mode is not compatible with frame checksum"); + } + + /* Begin writing output, starting with frame header */ + { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, + &cctx->appliedParams, decompressedSize, cctx->dictID); + op += frameHeaderSize; + assert(frameHeaderSize <= dstCapacity); + dstCapacity -= frameHeaderSize; + cSize += frameHeaderSize; + } + + /* Now generate compressed blocks */ + { size_t const cBlocksSize = ZSTD_compressSequencesAndLiterals_internal(cctx, + op, dstCapacity, + inSeqs, inSeqsSize, + literals, litSize, decompressedSize); + FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!"); + cSize += cBlocksSize; + assert(cBlocksSize <= dstCapacity); + dstCapacity -= cBlocksSize; + } + + DEBUGLOG(4, "Final compressed size: %zu", cSize); + return cSize; } /*====== Finalize ======*/ +static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs) +{ + const ZSTD_inBuffer nullInput = { NULL, 0, 0 }; + const int stableInput = (zcs->appliedParams.inBufferMode == ZSTD_bm_stable); + return stableInput ? zcs->expectedInBuffer : nullInput; +} + /*! ZSTD_flushStream() : * @return : amount of data remaining to flush */ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { - ZSTD_inBuffer input = { NULL, 0, 0 }; + ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); + input.size = input.pos; /* do not ingest more input during flush */ return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); } - size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { - ZSTD_inBuffer input = { NULL, 0, 0 }; + ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end); - FORWARD_IF_ERROR( remainingToFlush ); + FORWARD_IF_ERROR(remainingToFlush , "ZSTD_compressStream2(,,ZSTD_e_end) failed"); if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */ /* single thread mode : attempt to calculate remaining to flush more precisely */ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; - size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4; + size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4); size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize; DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush); return toFlush; @@ -4270,147 +8187,176 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) /*-===== Pre-defined compression levels =====-*/ +#include "clevels.h" -#define ZSTD_MAX_CLEVEL 22 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; } +int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; } -static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { -{ /* "default" - for any srcSize > 256 KB */ - /* W, C, H, S, L, TL, strat */ - { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ - { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */ - { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */ - { 21, 16, 17, 1, 5, 1, ZSTD_dfast }, /* level 3 */ - { 21, 18, 18, 1, 5, 1, ZSTD_dfast }, /* level 4 */ - { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */ - { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */ - { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */ - { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ - { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */ - { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ - { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ - { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ - { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */ - { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ - { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ - { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */ - { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */ - { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */ - { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */ - { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */ - { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */ - { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */ -}, -{ /* for srcSize <= 256 KB */ - /* W, C, H, S, L, T, strat */ - { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ - { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */ - { 18, 14, 14, 1, 5, 1, ZSTD_dfast }, /* level 2 */ - { 18, 16, 16, 1, 4, 1, ZSTD_dfast }, /* level 3 */ - { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/ - { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/ - { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/ - { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */ - { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ - { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ - { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ - { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/ - { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/ - { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */ - { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ - { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/ - { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/ - { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/ - { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/ - { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ - { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/ - { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/ - { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/ -}, -{ /* for srcSize <= 128 KB */ - /* W, C, H, S, L, T, strat */ - { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ - { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */ - { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */ - { 17, 15, 16, 2, 5, 1, ZSTD_dfast }, /* level 3 */ - { 17, 17, 17, 2, 4, 1, ZSTD_dfast }, /* level 4 */ - { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */ - { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */ - { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */ - { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ - { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ - { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ - { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */ - { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */ - { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/ - { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ - { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/ - { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/ - { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/ - { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/ - { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/ - { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/ - { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ - { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/ -}, -{ /* for srcSize <= 16 KB */ - /* W, C, H, S, L, T, strat */ - { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ - { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */ - { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */ - { 14, 14, 15, 2, 4, 1, ZSTD_dfast }, /* level 3 */ - { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */ - { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/ - { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */ - { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */ - { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/ - { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/ - { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/ - { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/ - { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/ - { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/ - { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/ - { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/ - { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/ - { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/ - { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/ - { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ - { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/ - { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ - { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/ -}, -}; +static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize) +{ + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict); + switch (cParams.strategy) { + case ZSTD_fast: + case ZSTD_dfast: + break; + case ZSTD_greedy: + case ZSTD_lazy: + case ZSTD_lazy2: + cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG; + break; + case ZSTD_btlazy2: + case ZSTD_btopt: + case ZSTD_btultra: + case ZSTD_btultra2: + break; + } + return cParams; +} -/*! ZSTD_getCParams() : +static int ZSTD_dedicatedDictSearch_isSupported( + ZSTD_compressionParameters const* cParams) +{ + return (cParams->strategy >= ZSTD_greedy) + && (cParams->strategy <= ZSTD_lazy2) + && (cParams->hashLog > cParams->chainLog) + && (cParams->chainLog <= 24); +} + +/** + * Reverses the adjustment applied to cparams when enabling dedicated dict + * search. This is used to recover the params set to be used in the working + * context. (Otherwise, those tables would also grow.) + */ +static void ZSTD_dedicatedDictSearch_revertCParams( + ZSTD_compressionParameters* cParams) { + switch (cParams->strategy) { + case ZSTD_fast: + case ZSTD_dfast: + break; + case ZSTD_greedy: + case ZSTD_lazy: + case ZSTD_lazy2: + cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG; + if (cParams->hashLog < ZSTD_HASHLOG_MIN) { + cParams->hashLog = ZSTD_HASHLOG_MIN; + } + break; + case ZSTD_btlazy2: + case ZSTD_btopt: + case ZSTD_btultra: + case ZSTD_btultra2: + break; + } +} + +static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) +{ + switch (mode) { + case ZSTD_cpm_unknown: + case ZSTD_cpm_noAttachDict: + case ZSTD_cpm_createCDict: + break; + case ZSTD_cpm_attachDict: + dictSize = 0; + break; + default: + assert(0); + break; + } + { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN; + size_t const addedSize = unknown && dictSize > 0 ? 500 : 0; + return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize; + } +} + +/*! ZSTD_getCParams_internal() : * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. - * Size values are optional, provide 0 if not known or unused */ -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) + * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. + * Use dictSize == 0 for unknown or unused. + * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */ +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) { - size_t const addedSize = srcSizeHint ? 0 : 500; - U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : ZSTD_CONTENTSIZE_UNKNOWN; /* intentional overflow for srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN */ + U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); - int row = compressionLevel; - DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel); + int row; + DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel); + + /* row */ if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ - if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ - if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; + else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ + else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; + else row = compressionLevel; + { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; - if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */ - return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); /* refine parameters based on srcSize & dictSize */ + DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy); + /* acceleration factor */ + if (compressionLevel < 0) { + int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel); + cp.targetLength = (unsigned)(-clampedCompressionLevel); + } + /* refine parameters based on srcSize & dictSize */ + return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_ps_auto); } } +/*! ZSTD_getCParams() : + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. + * Size values are optional, provide 0 if not known or unused */ +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) +{ + if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; + return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); +} + /*! ZSTD_getParams() : * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ -ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { +static ZSTD_parameters +ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) +{ ZSTD_parameters params; - ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize); + ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode); DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); - memset(¶ms, 0, sizeof(params)); + ZSTD_memset(¶ms, 0, sizeof(params)); params.cParams = cParams; params.fParams.contentSizeFlag = 1; return params; } + +/*! ZSTD_getParams() : + * same idea as ZSTD_getCParams() + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). + * Fields of `ZSTD_frameParameters` are set to default values */ +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) +{ + if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; + return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); +} + +void ZSTD_registerSequenceProducer( + ZSTD_CCtx* zc, + void* extSeqProdState, + ZSTD_sequenceProducer_F extSeqProdFunc) +{ + assert(zc != NULL); + ZSTD_CCtxParams_registerSequenceProducer( + &zc->requestedParams, extSeqProdState, extSeqProdFunc + ); +} + +void ZSTD_CCtxParams_registerSequenceProducer( + ZSTD_CCtx_params* params, + void* extSeqProdState, + ZSTD_sequenceProducer_F extSeqProdFunc) +{ + assert(params != NULL); + if (extSeqProdFunc != NULL) { + params->extSeqProdFunc = extSeqProdFunc; + params->extSeqProdState = extSeqProdState; + } else { + params->extSeqProdFunc = NULL; + params->extSeqProdState = NULL; + } +} diff --git a/lib/compress/zstd_compress_internal.h b/lib/compress/zstd_compress_internal.h index 55304fa317f..13a394b3816 100644 --- a/lib/compress/zstd_compress_internal.h +++ b/lib/compress/zstd_compress_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -18,15 +18,13 @@ /*-************************************* * Dependencies ***************************************/ -#include "zstd_internal.h" +#include "../common/zstd_internal.h" +#include "zstd_cwksp.h" #ifdef ZSTD_MULTITHREAD # include "zstdmt_compress.h" #endif - -#if defined (__cplusplus) -extern "C" { -#endif - +#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */ +#include "zstd_preSplit.h" /* ZSTD_SLIPBLOCK_WORKSPACESIZE */ /*-************************************* * Constants @@ -38,7 +36,7 @@ extern "C" { It's not a big deal though : candidate will just be sorted again. Additionally, candidate position 1 will be lost. But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss. - The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy. + The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table reuse with a different strategy. This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */ @@ -63,7 +61,7 @@ typedef struct { } ZSTD_localDict; typedef struct { - U32 CTable[HUF_CTABLE_SIZE_U32(255)]; + HUF_CElt CTable[HUF_CTABLE_SIZE_ST(255)]; HUF_repeat repeatMode; } ZSTD_hufCTables_t; @@ -81,29 +79,159 @@ typedef struct { ZSTD_fseCTables_t fse; } ZSTD_entropyCTables_t; +/*********************************************** +* Sequences * +***********************************************/ +typedef struct SeqDef_s { + U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ + U16 litLength; + U16 mlBase; /* mlBase == matchLength - MINMATCH */ +} SeqDef; + +/* Controls whether seqStore has a single "long" litLength or matchLength. See SeqStore_t. */ +typedef enum { + ZSTD_llt_none = 0, /* no longLengthType */ + ZSTD_llt_literalLength = 1, /* represents a long literal */ + ZSTD_llt_matchLength = 2 /* represents a long match */ +} ZSTD_longLengthType_e; + typedef struct { - U32 off; - U32 len; + SeqDef* sequencesStart; + SeqDef* sequences; /* ptr to end of sequences */ + BYTE* litStart; + BYTE* lit; /* ptr to end of literals */ + BYTE* llCode; + BYTE* mlCode; + BYTE* ofCode; + size_t maxNbSeq; + size_t maxNbLit; + + /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength + * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment + * the existing value of the litLength or matchLength by 0x10000. + */ + ZSTD_longLengthType_e longLengthType; + U32 longLengthPos; /* Index of the sequence to apply long length modification to */ +} SeqStore_t; + +typedef struct { + U32 litLength; + U32 matchLength; +} ZSTD_SequenceLength; + +/** + * Returns the ZSTD_SequenceLength for the given sequences. It handles the decoding of long sequences + * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. + */ +MEM_STATIC ZSTD_SequenceLength ZSTD_getSequenceLength(SeqStore_t const* seqStore, SeqDef const* seq) +{ + ZSTD_SequenceLength seqLen; + seqLen.litLength = seq->litLength; + seqLen.matchLength = seq->mlBase + MINMATCH; + if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { + if (seqStore->longLengthType == ZSTD_llt_literalLength) { + seqLen.litLength += 0x10000; + } + if (seqStore->longLengthType == ZSTD_llt_matchLength) { + seqLen.matchLength += 0x10000; + } + } + return seqLen; +} + +const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ +int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ + + +/*********************************************** +* Entropy buffer statistics structs and funcs * +***********************************************/ +/** ZSTD_hufCTablesMetadata_t : + * Stores Literals Block Type for a super-block in hType, and + * huffman tree description in hufDesBuffer. + * hufDesSize refers to the size of huffman tree description in bytes. + * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */ +typedef struct { + SymbolEncodingType_e hType; + BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE]; + size_t hufDesSize; +} ZSTD_hufCTablesMetadata_t; + +/** ZSTD_fseCTablesMetadata_t : + * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and + * fse tables in fseTablesBuffer. + * fseTablesSize refers to the size of fse tables in bytes. + * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */ +typedef struct { + SymbolEncodingType_e llType; + SymbolEncodingType_e ofType; + SymbolEncodingType_e mlType; + BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE]; + size_t fseTablesSize; + size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ +} ZSTD_fseCTablesMetadata_t; + +typedef struct { + ZSTD_hufCTablesMetadata_t hufMetadata; + ZSTD_fseCTablesMetadata_t fseMetadata; +} ZSTD_entropyCTablesMetadata_t; + +/** ZSTD_buildBlockEntropyStats() : + * Builds entropy for the block. + * @return : 0 on success or error code */ +size_t ZSTD_buildBlockEntropyStats( + const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize); + +/********************************* +* Compression internals structs * +*********************************/ + +typedef struct { + U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */ + U32 len; /* Raw length of match */ } ZSTD_match_t; typedef struct { - int price; - U32 off; - U32 mlen; - U32 litlen; - U32 rep[ZSTD_REP_NUM]; + U32 offset; /* Offset of sequence */ + U32 litLength; /* Length of literals prior to match */ + U32 matchLength; /* Raw length of match */ +} rawSeq; + +typedef struct { + rawSeq* seq; /* The start of the sequences */ + size_t pos; /* The index in seq where reading stopped. pos <= size. */ + size_t posInSequence; /* The position within the sequence at seq[pos] where reading + stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */ + size_t size; /* The number of sequences. <= capacity. */ + size_t capacity; /* The capacity starting from `seq` pointer */ +} RawSeqStore_t; + +UNUSED_ATTR static const RawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0}; + +typedef struct { + int price; /* price from beginning of segment to this position */ + U32 off; /* offset of previous match */ + U32 mlen; /* length of previous match */ + U32 litlen; /* nb of literals since previous match */ + U32 rep[ZSTD_REP_NUM]; /* offset history after previous match */ } ZSTD_optimal_t; typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e; +#define ZSTD_OPT_SIZE (ZSTD_OPT_NUM+3) typedef struct { /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */ unsigned* litFreq; /* table of literals statistics, of size 256 */ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */ - ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */ - ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */ + ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_SIZE */ + ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */ U32 litSum; /* nb of literals */ U32 litLengthSum; /* nb of litLength codes */ @@ -115,7 +243,7 @@ typedef struct { U32 offCodeSumBasePrice; /* to compare to log2(offreq) */ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */ - ZSTD_literalCompressionMode_e literalCompressionMode; + ZSTD_ParamSwitch_e literalCompressionMode; } optState_t; typedef struct { @@ -124,31 +252,72 @@ typedef struct { } ZSTD_compressedBlockState_t; typedef struct { - BYTE const* nextSrc; /* next block here to continue on current prefix */ - BYTE const* base; /* All regular indexes relative to this position */ - BYTE const* dictBase; /* extDict indexes relative to this position */ - U32 dictLimit; /* below that point, need extDict */ - U32 lowLimit; /* below that point, no more valid data */ + BYTE const* nextSrc; /* next block here to continue on current prefix */ + BYTE const* base; /* All regular indexes relative to this position */ + BYTE const* dictBase; /* extDict indexes relative to this position */ + U32 dictLimit; /* below that point, need extDict */ + U32 lowLimit; /* below that point, no more valid data */ + U32 nbOverflowCorrections; /* Number of times overflow correction has run since + * ZSTD_window_init(). Useful for debugging coredumps + * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY. + */ } ZSTD_window_t; -typedef struct ZSTD_matchState_t ZSTD_matchState_t; -struct ZSTD_matchState_t { +#define ZSTD_WINDOW_START_INDEX 2 + +typedef struct ZSTD_MatchState_t ZSTD_MatchState_t; + +#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */ + +struct ZSTD_MatchState_t { ZSTD_window_t window; /* State for window round buffer management */ - U32 loadedDictEnd; /* index of end of dictionary, within context's referential. When dict referential is copied into active context (i.e. not attached), effectively same value as dictSize, since referential starts from zero */ + U32 loadedDictEnd; /* index of end of dictionary, within context's referential. + * When loadedDictEnd != 0, a dictionary is in use, and still valid. + * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance. + * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity(). + * When dict referential is copied into active context (i.e. not attached), + * loadedDictEnd == dictSize, since referential starts from zero. + */ U32 nextToUpdate; /* index from which to continue table update */ - U32 hashLog3; /* dispatch table : larger == faster, more memory */ + U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */ + + U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/ + BYTE* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */ + U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */ + U64 hashSalt; /* For row-based matchFinder: salts the hash for reuse of tag table */ + U32 hashSaltEntropy; /* For row-based matchFinder: collects entropy for salt generation */ + U32* hashTable; U32* hashTable3; U32* chainTable; + + int forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */ + + int dedicatedDictSearch; /* Indicates whether this matchState is using the + * dedicated dictionary search structure. + */ optState_t opt; /* optimal parser state */ - const ZSTD_matchState_t* dictMatchState; + const ZSTD_MatchState_t* dictMatchState; ZSTD_compressionParameters cParams; + const RawSeqStore_t* ldmSeqStore; + + /* Controls prefetching in some dictMatchState matchfinders. + * This behavior is controlled from the cctx ms. + * This parameter has no effect in the cdict ms. */ + int prefetchCDictTables; + + /* When == 0, lazy match finders insert every position. + * When != 0, lazy match finders only insert positions they search. + * This allows them to skip much faster over incompressible data, + * at a small cost to compression ratio. + */ + int lazySkipping; }; typedef struct { ZSTD_compressedBlockState_t* prevCBlock; ZSTD_compressedBlockState_t* nextCBlock; - ZSTD_matchState_t matchState; + ZSTD_MatchState_t matchState; } ZSTD_blockState_t; typedef struct { @@ -156,16 +325,26 @@ typedef struct { U32 checksum; } ldmEntry_t; +typedef struct { + BYTE const* split; + U32 hash; + U32 checksum; + ldmEntry_t* bucket; +} ldmMatchCandidate_t; + +#define LDM_BATCH_SIZE 64 + typedef struct { ZSTD_window_t window; /* State for the window round buffer management */ ldmEntry_t* hashTable; + U32 loadedDictEnd; BYTE* bucketOffsets; /* Next position in bucket to insert entry */ - U64 hashPower; /* Used to compute the rolling hash. - * Depends on ldmParams.minMatchLength */ + size_t splitIndices[LDM_BATCH_SIZE]; + ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE]; } ldmState_t; typedef struct { - U32 enableLdm; /* 1 if enable long distance matching */ + ZSTD_ParamSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ U32 hashLog; /* Log size of hashTable */ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */ U32 minMatchLength; /* Minimum match length */ @@ -174,17 +353,11 @@ typedef struct { } ldmParams_t; typedef struct { - U32 offset; - U32 litLength; - U32 matchLength; -} rawSeq; - -typedef struct { - rawSeq* seq; /* The start of the sequences */ - size_t pos; /* The position where reading stopped. <= size. */ - size_t size; /* The number of sequences. <= capacity. */ - size_t capacity; /* The capacity starting from `seq` pointer */ -} rawSeqStore_t; + int collectSequences; + ZSTD_Sequence* seqStart; + size_t seqIndex; + size_t maxSequences; +} SeqCollector; struct ZSTD_CCtx_params_s { ZSTD_format_e format; @@ -194,9 +367,15 @@ struct ZSTD_CCtx_params_s { int compressionLevel; int forceWindow; /* force back-references to respect limit of * 1< 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase]; } +/* ZSTD_cParam_withinBounds: + * @return 1 if value is within cParam bounds, + * 0 otherwise */ +MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) +{ + ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); + if (ZSTD_isError(bounds.error)) return 0; + if (value < bounds.lowerBound) return 0; + if (value > bounds.upperBound) return 0; + return 1; +} + +/* ZSTD_selectAddr: + * @return index >= lowLimit ? candidate : backup, + * tries to force branchless codegen. */ +MEM_STATIC const BYTE* +ZSTD_selectAddr(U32 index, U32 lowLimit, const BYTE* candidate, const BYTE* backup) +{ +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ( + "cmp %1, %2\n" + "cmova %3, %0\n" + : "+r"(candidate) + : "r"(index), "r"(lowLimit), "r"(backup) + ); + return candidate; +#else + return index >= lowLimit ? candidate : backup; +#endif +} + +/* ZSTD_noCompressBlock() : + * Writes uncompressed block to dst buffer from given src. + * Returns the size of the block */ +MEM_STATIC size_t +ZSTD_noCompressBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) +{ + U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3); + DEBUGLOG(5, "ZSTD_noCompressBlock (srcSize=%zu, dstCapacity=%zu)", srcSize, dstCapacity); + RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity, + dstSize_tooSmall, "dst buf too small for uncompressed block"); + MEM_writeLE24(dst, cBlockHeader24); + ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); + return ZSTD_blockHeaderSize + srcSize; +} + +MEM_STATIC size_t +ZSTD_rleCompressBlock(void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock) +{ + BYTE* const op = (BYTE*)dst; + U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3); + RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, ""); + MEM_writeLE24(op, cBlockHeader); + op[3] = src; + return 4; +} + + +/* ZSTD_minGain() : + * minimum compression required + * to generate a compress block or a compressed literals section. + * note : use same formula for both situations */ +MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) +{ + U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6; + ZSTD_STATIC_ASSERT(ZSTD_btultra == 8); + assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat)); + return (srcSize >> minlog) + 2; +} + +MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams) +{ + switch (cctxParams->literalCompressionMode) { + case ZSTD_ps_enable: + return 0; + case ZSTD_ps_disable: + return 1; + default: + assert(0 /* impossible: pre-validated */); + ZSTD_FALLTHROUGH; + case ZSTD_ps_auto: + return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); + } +} + +/*! ZSTD_safecopyLiterals() : + * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w. + * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single + * large copies. + */ +static void +ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) +{ + assert(iend > ilimit_w); + if (ip <= ilimit_w) { + ZSTD_wildcopy(op, ip, (size_t)(ilimit_w - ip), ZSTD_no_overlap); + op += ilimit_w - ip; + ip = ilimit_w; + } + while (ip < iend) *op++ = *ip++; +} + + +#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1) +#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2) +#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3) +#define REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */ +#define OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM) +#define OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM) +#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM) +#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM) +#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */ + +/*! ZSTD_storeSeqOnly() : + * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. + * Literals themselves are not copied, but @litPtr is updated. + * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). + * @matchLength : must be >= MINMATCH +*/ +HINT_INLINE UNUSED_ATTR void +ZSTD_storeSeqOnly(SeqStore_t* seqStorePtr, + size_t litLength, + U32 offBase, + size_t matchLength) +{ + assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); + + /* literal Length */ + assert(litLength <= ZSTD_BLOCKSIZE_MAX); + if (UNLIKELY(litLength>0xFFFF)) { + assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ + seqStorePtr->longLengthType = ZSTD_llt_literalLength; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].litLength = (U16)litLength; + + /* match offset */ + seqStorePtr->sequences[0].offBase = offBase; + + /* match Length */ + assert(matchLength <= ZSTD_BLOCKSIZE_MAX); + assert(matchLength >= MINMATCH); + { size_t const mlBase = matchLength - MINMATCH; + if (UNLIKELY(mlBase>0xFFFF)) { + assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ + seqStorePtr->longLengthType = ZSTD_llt_matchLength; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].mlBase = (U16)mlBase; + } + + seqStorePtr->sequences++; +} + /*! ZSTD_storeSeq() : - * Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. - * `offsetCode` : distance to match + 3 (values 1-3 are repCodes). - * `mlBase` : matchLength - MINMATCH + * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. + * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). + * @matchLength : must be >= MINMATCH + * Allowed to over-read literals up to litLimit. */ -MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase) +HINT_INLINE UNUSED_ATTR void +ZSTD_storeSeq(SeqStore_t* seqStorePtr, + size_t litLength, const BYTE* literals, const BYTE* litLimit, + U32 offBase, + size_t matchLength) { + BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH; + BYTE const* const litEnd = literals + litLength; #if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6) static const BYTE* g_start = NULL; if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */ { U32 const pos = (U32)((const BYTE*)literals - g_start); - DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u", - pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode); + DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offBase%7u", + pos, (U32)litLength, (U32)matchLength, (U32)offBase); } #endif assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); /* copy Literals */ assert(seqStorePtr->maxNbLit <= 128 KB); assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit); - ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); - seqStorePtr->lit += litLength; - - /* literal Length */ - if (litLength>0xFFFF) { - assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */ - seqStorePtr->longLengthID = 1; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + assert(literals + litLength <= litLimit); + if (litEnd <= litLimit_w) { + /* Common case we can use wildcopy. + * First copy 16 bytes, because literals are likely short. + */ + ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16); + ZSTD_copy16(seqStorePtr->lit, literals); + if (litLength > 16) { + ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, litLength-16, ZSTD_no_overlap); + } + } else { + ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w); } - seqStorePtr->sequences[0].litLength = (U16)litLength; + seqStorePtr->lit += litLength; - /* match offset */ - seqStorePtr->sequences[0].offset = offsetCode + 1; + ZSTD_storeSeqOnly(seqStorePtr, litLength, offBase, matchLength); +} - /* match Length */ - if (mlBase>0xFFFF) { - assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */ - seqStorePtr->longLengthID = 2; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); +/* ZSTD_updateRep() : + * updates in-place @rep (array of repeat offsets) + * @offBase : sum-type, using numeric representation of ZSTD_storeSeq() + */ +MEM_STATIC void +ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) +{ + if (OFFBASE_IS_OFFSET(offBase)) { /* full offset */ + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = OFFBASE_TO_OFFSET(offBase); + } else { /* repcode */ + U32 const repCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; + if (repCode > 0) { /* note : if repCode==0, no change */ + U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; + rep[2] = (repCode >= 2) ? rep[1] : rep[2]; + rep[1] = rep[0]; + rep[0] = currentOffset; + } else { /* repCode == 0 */ + /* nothing to do */ + } } - seqStorePtr->sequences[0].matchLength = (U16)mlBase; +} - seqStorePtr->sequences++; +typedef struct repcodes_s { + U32 rep[3]; +} Repcodes_t; + +MEM_STATIC Repcodes_t +ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) +{ + Repcodes_t newReps; + ZSTD_memcpy(&newReps, rep, sizeof(newReps)); + ZSTD_updateRep(newReps.rep, offBase, ll0); + return newReps; } /*-************************************* * Match length counter ***************************************/ -static unsigned ZSTD_NbCommonBytes (size_t val) -{ - if (MEM_isLittleEndian()) { - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) - unsigned long r = 0; - _BitScanForward64( &r, (U64)val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (__builtin_ctzll((U64)val) >> 3); -# else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, - 0, 3, 1, 3, 1, 4, 2, 7, - 0, 2, 3, 6, 1, 5, 3, 5, - 1, 3, 4, 4, 2, 5, 6, 7, - 7, 0, 1, 2, 3, 3, 4, 6, - 2, 6, 5, 5, 3, 4, 5, 6, - 7, 1, 2, 4, 6, 4, 4, 5, - 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - unsigned long r=0; - _BitScanForward( &r, (U32)val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_ctz((U32)val) >> 3); -# else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, - 3, 2, 2, 1, 3, 2, 0, 1, - 3, 3, 1, 2, 2, 2, 2, 0, - 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -# endif - } - } else { /* Big Endian CPU */ - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) - unsigned long r = 0; - _BitScanReverse64( &r, val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (__builtin_clzll(val) >> 3); -# else - unsigned r; - const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ - if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - unsigned long r = 0; - _BitScanReverse( &r, (unsigned long)val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_clz((U32)val) >> 3); -# else - unsigned r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; -# endif - } } -} - - MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) { const BYTE* const pStart = pIn; @@ -454,8 +884,8 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match, size_t const matchLength = ZSTD_count(ip, match, vEnd); if (match + matchLength != mEnd) return matchLength; DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength); - DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match); - DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip); + DEBUGLOG(7, "distance from match beginning to end dictionary = %i", (int)(mEnd - match)); + DEBUGLOG(7, "distance from current pos to end buffer = %i", (int)(iEnd - ip)); DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart); DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd)); return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd); @@ -466,31 +896,43 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match, * Hashes ***************************************/ static const U32 prime3bytes = 506832829U; -static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; } -MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ +static U32 ZSTD_hash3(U32 u, U32 h, U32 s) { assert(h <= 32); return (((u << (32-24)) * prime3bytes) ^ s) >> (32-h) ; } +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h, 0); } /* only in zstd_opt.h */ +MEM_STATIC size_t ZSTD_hash3PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash3(MEM_readLE32(ptr), h, s); } static const U32 prime4bytes = 2654435761U; -static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } -static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } +static U32 ZSTD_hash4(U32 u, U32 h, U32 s) { assert(h <= 32); return ((u * prime4bytes) ^ s) >> (32-h) ; } +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h, 0); } +static size_t ZSTD_hash4PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash4(MEM_readLE32(ptr), h, s); } static const U64 prime5bytes = 889523592379ULL; -static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } -static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } +static size_t ZSTD_hash5(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-40)) * prime5bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash5PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash5(MEM_readLE64(p), h, s); } static const U64 prime6bytes = 227718039650203ULL; -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } -static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } +static size_t ZSTD_hash6(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-48)) * prime6bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash6PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash6(MEM_readLE64(p), h, s); } static const U64 prime7bytes = 58295818150454627ULL; -static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } -static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } +static size_t ZSTD_hash7(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-56)) * prime7bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash7PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash7(MEM_readLE64(p), h, s); } static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } -static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } +static size_t ZSTD_hash8(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u) * prime8bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash8PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash8(MEM_readLE64(p), h, s); } + -MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) +MEM_STATIC FORCE_INLINE_ATTR +size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) { + /* Although some of these hashes do support hBits up to 64, some do not. + * To be on the safe side, always avoid hBits > 32. */ + assert(hBits <= 32); + switch(mls) { default: @@ -502,6 +944,24 @@ MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) } } +MEM_STATIC FORCE_INLINE_ATTR +size_t ZSTD_hashPtrSalted(const void* p, U32 hBits, U32 mls, const U64 hashSalt) { + /* Although some of these hashes do support hBits up to 64, some do not. + * To be on the safe side, always avoid hBits > 32. */ + assert(hBits <= 32); + + switch(mls) + { + default: + case 4: return ZSTD_hash4PtrS(p, hBits, (U32)hashSalt); + case 5: return ZSTD_hash5PtrS(p, hBits, hashSalt); + case 6: return ZSTD_hash6PtrS(p, hBits, hashSalt); + case 7: return ZSTD_hash7PtrS(p, hBits, hashSalt); + case 8: return ZSTD_hash8PtrS(p, hBits, hashSalt); + } +} + + /** ZSTD_ipow() : * Return base^exponent. */ @@ -563,8 +1023,12 @@ MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 /*-************************************* * Round buffer management ***************************************/ -/* Max current allowed */ -#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX)) +/* Max @current value allowed: + * In 32-bit mode: we want to avoid crossing the 2 GB limit, + * reducing risks of side effects in case of signed operations on indexes. + * In 64-bit mode: we want to ensure that adding the maximum job size (512 MB) + * doesn't overflow U32 index capacity (4 GB) */ +#define ZSTD_CURRENT_MAX (MEM_64bits() ? 3500U MB : 2000U MB) /* Maximum chunk size before overflow correction needs to be called again */ #define ZSTD_CHUNKSIZE_MAX \ ( ((U32)-1) /* Maximum ending current index */ \ @@ -583,6 +1047,13 @@ MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window) window->dictLimit = end; } +MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window) +{ + return window.dictLimit == ZSTD_WINDOW_START_INDEX && + window.lowLimit == ZSTD_WINDOW_START_INDEX && + (window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX; +} + /** * ZSTD_window_hasExtDict(): * Returns non-zero if the window has a non-empty extDict. @@ -597,25 +1068,81 @@ MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window) * Inspects the provided matchState and figures out what dictMode should be * passed to the compressor. */ -MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms) +MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_MatchState_t *ms) { return ZSTD_window_hasExtDict(ms->window) ? ZSTD_extDict : ms->dictMatchState != NULL ? - ZSTD_dictMatchState : + (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) : ZSTD_noDict; } +/* Defining this macro to non-zero tells zstd to run the overflow correction + * code much more frequently. This is very inefficient, and should only be + * used for tests and fuzzers. + */ +#ifndef ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY +# ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION +# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1 +# else +# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0 +# endif +#endif + +/** + * ZSTD_window_canOverflowCorrect(): + * Returns non-zero if the indices are large enough for overflow correction + * to work correctly without impacting compression ratio. + */ +MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window, + U32 cycleLog, + U32 maxDist, + U32 loadedDictEnd, + void const* src) +{ + U32 const cycleSize = 1u << cycleLog; + U32 const curr = (U32)((BYTE const*)src - window.base); + U32 const minIndexToOverflowCorrect = cycleSize + + MAX(maxDist, cycleSize) + + ZSTD_WINDOW_START_INDEX; + + /* Adjust the min index to backoff the overflow correction frequency, + * so we don't waste too much CPU in overflow correction. If this + * computation overflows we don't really care, we just need to make + * sure it is at least minIndexToOverflowCorrect. + */ + U32 const adjustment = window.nbOverflowCorrections + 1; + U32 const adjustedIndex = MAX(minIndexToOverflowCorrect * adjustment, + minIndexToOverflowCorrect); + U32 const indexLargeEnough = curr > adjustedIndex; + + /* Only overflow correct early if the dictionary is invalidated already, + * so we don't hurt compression ratio. + */ + U32 const dictionaryInvalidated = curr > maxDist + loadedDictEnd; + + return indexLargeEnough && dictionaryInvalidated; +} + /** * ZSTD_window_needOverflowCorrection(): * Returns non-zero if the indices are getting too large and need overflow * protection. */ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, + U32 cycleLog, + U32 maxDist, + U32 loadedDictEnd, + void const* src, void const* srcEnd) { - U32 const current = (U32)((BYTE const*)srcEnd - window.base); - return current > ZSTD_CURRENT_MAX; + U32 const curr = (U32)((BYTE const*)srcEnd - window.base); + if (ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) { + if (ZSTD_window_canOverflowCorrect(window, cycleLog, maxDist, loadedDictEnd, src)) { + return 1; + } + } + return curr > ZSTD_CURRENT_MAX; } /** @@ -626,9 +1153,10 @@ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, * * The least significant cycleLog bits of the indices must remain the same, * which may be 0. Every index up to maxDist in the past must be valid. - * NOTE: (maxDist & cycleMask) must be zero. */ -MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, U32 maxDist, void const* src) { /* preemptive overflow correction: @@ -650,19 +1178,51 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, * 3. (cctx->lowLimit + 1< 3<<29 + 1<base); - U32 const newCurrent = (current & cycleMask) + maxDist; - U32 const correction = current - newCurrent; - assert((maxDist & cycleMask) == 0); - assert(current > newCurrent); - /* Loose bound, should be around 1<<29 (see above) */ - assert(correction > 1<<28); + U32 const cycleSize = 1u << cycleLog; + U32 const cycleMask = cycleSize - 1; + U32 const curr = (U32)((BYTE const*)src - window->base); + U32 const currentCycle = curr & cycleMask; + /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */ + U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX + ? MAX(cycleSize, ZSTD_WINDOW_START_INDEX) + : 0; + U32 const newCurrent = currentCycle + + currentCycleCorrection + + MAX(maxDist, cycleSize); + U32 const correction = curr - newCurrent; + /* maxDist must be a power of two so that: + * (newCurrent & cycleMask) == (curr & cycleMask) + * This is required to not corrupt the chains / binary tree. + */ + assert((maxDist & (maxDist - 1)) == 0); + assert((curr & cycleMask) == (newCurrent & cycleMask)); + assert(curr > newCurrent); + if (!ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) { + /* Loose bound, should be around 1<<29 (see above) */ + assert(correction > 1<<28); + } window->base += correction; window->dictBase += correction; - window->lowLimit -= correction; - window->dictLimit -= correction; + if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) { + window->lowLimit = ZSTD_WINDOW_START_INDEX; + } else { + window->lowLimit -= correction; + } + if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) { + window->dictLimit = ZSTD_WINDOW_START_INDEX; + } else { + window->dictLimit -= correction; + } + + /* Ensure we can still reference the full window. */ + assert(newCurrent >= maxDist); + assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX); + /* Ensure that lowLimit and dictLimit didn't underflow. */ + assert(window->lowLimit <= newCurrent); + assert(window->dictLimit <= newCurrent); + + ++window->nbOverflowCorrections; DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction, window->lowLimit); @@ -697,7 +1257,7 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window, const void* blockEnd, U32 maxDist, U32* loadedDictEndPtr, - const ZSTD_matchState_t** dictMatchStatePtr) + const ZSTD_MatchState_t** dictMatchStatePtr) { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0; @@ -733,24 +1293,53 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window, /* Similar to ZSTD_window_enforceMaxDist(), * but only invalidates dictionary - * when input progresses beyond window size. */ + * when input progresses beyond window size. + * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL) + * loadedDictEnd uses same referential as window->base + * maxDist is the window size */ MEM_STATIC void -ZSTD_checkDictValidity(ZSTD_window_t* window, +ZSTD_checkDictValidity(const ZSTD_window_t* window, const void* blockEnd, U32 maxDist, U32* loadedDictEndPtr, - const ZSTD_matchState_t** dictMatchStatePtr) + const ZSTD_MatchState_t** dictMatchStatePtr) { - U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); - U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0; - DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", - (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); + assert(loadedDictEndPtr != NULL); + assert(dictMatchStatePtr != NULL); + { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); + U32 const loadedDictEnd = *loadedDictEndPtr; + DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", + (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); + assert(blockEndIdx >= loadedDictEnd); + + if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) { + /* On reaching window size, dictionaries are invalidated. + * For simplification, if window size is reached anywhere within next block, + * the dictionary is invalidated for the full block. + * + * We also have to invalidate the dictionary if ZSTD_window_update() has detected + * non-contiguous segments, which means that loadedDictEnd != window->dictLimit. + * loadedDictEnd may be 0, if forceWindow is true, but in that case we never use + * dictMatchState, so setting it to NULL is not a problem. + */ + DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)"); + *loadedDictEndPtr = 0; + *dictMatchStatePtr = NULL; + } else { + if (*loadedDictEndPtr != 0) { + DEBUGLOG(6, "dictionary considered valid for current block"); + } } } +} - if (loadedDictEnd && (blockEndIdx > maxDist + loadedDictEnd)) { - /* On reaching window size, dictionaries are invalidated */ - if (loadedDictEndPtr) *loadedDictEndPtr = 0; - if (dictMatchStatePtr) *dictMatchStatePtr = NULL; - } +MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) { + ZSTD_memset(window, 0, sizeof(*window)); + window->base = (BYTE const*)" "; + window->dictBase = (BYTE const*)" "; + ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */ + window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */ + window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */ + window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */ + window->nbOverflowCorrections = 0; } /** @@ -760,14 +1349,21 @@ ZSTD_checkDictValidity(ZSTD_window_t* window, * forget about the extDict. Handles overlap of the prefix and extDict. * Returns non-zero if the segment is contiguous. */ -MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, - void const* src, size_t srcSize) +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_window_update(ZSTD_window_t* window, + const void* src, size_t srcSize, + int forceNonContiguous) { BYTE const* const ip = (BYTE const*)src; U32 contiguous = 1; DEBUGLOG(5, "ZSTD_window_update"); + if (srcSize == 0) + return contiguous; + assert(window->base != NULL); + assert(window->dictBase != NULL); /* Check if blocks follow each other */ - if (src != window->nextSrc) { + if (src != window->nextSrc || forceNonContiguous) { /* not contiguous */ size_t const distanceFromBase = (size_t)(window->nextSrc - window->base); DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit); @@ -776,7 +1372,7 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, window->dictLimit = (U32)distanceFromBase; window->dictBase = window->base; window->base = ip - distanceFromBase; - // ms->nextToUpdate = window->dictLimit; + /* ms->nextToUpdate = window->dictLimit; */ if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */ contiguous = 0; } @@ -784,14 +1380,56 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ if ( (ip+srcSize > window->dictBase + window->lowLimit) & (ip < window->dictBase + window->dictLimit)) { - ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase; - U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; + size_t const highInputIdx = (size_t)((ip + srcSize) - window->dictBase); + U32 const lowLimitMax = (highInputIdx > (size_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; + assert(highInputIdx < UINT_MAX); window->lowLimit = lowLimitMax; DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit); } return contiguous; } +/** + * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. + */ +MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog) +{ + U32 const maxDistance = 1U << windowLog; + U32 const lowestValid = ms->window.lowLimit; + U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; + U32 const isDictionary = (ms->loadedDictEnd != 0); + /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary + * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't + * valid for the entire block. So this check is sufficient to find the lowest valid match index. + */ + U32 const matchLowest = isDictionary ? lowestValid : withinWindow; + return matchLowest; +} + +/** + * Returns the lowest allowed match index in the prefix. + */ +MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog) +{ + U32 const maxDistance = 1U << windowLog; + U32 const lowestValid = ms->window.dictLimit; + U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; + U32 const isDictionary = (ms->loadedDictEnd != 0); + /* When computing the lowest prefix index we need to take the dictionary into account to handle + * the edge case where the dictionary and the source are contiguous in memory. + */ + U32 const matchLowest = isDictionary ? lowestValid : withinWindow; + return matchLowest; +} + +/* index_safety_check: + * intentional underflow : ensure repIndex isn't overlapping dict + prefix + * @return 1 if values are not overlapping, + * 0 otherwise */ +MEM_STATIC int ZSTD_index_overlap_check(const U32 prefixLowestIndex, const U32 repIndex) { + return ((U32)((prefixLowestIndex-1) - repIndex) >= 3); +} + /* debug functions */ #if (DEBUGLEVEL>=2) @@ -824,11 +1462,76 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max) #endif +/* Short Cache */ -#if defined (__cplusplus) +/* Normally, zstd matchfinders follow this flow: + * 1. Compute hash at ip + * 2. Load index from hashTable[hash] + * 3. Check if *ip == *(base + index) + * In dictionary compression, loading *(base + index) is often an L2 or even L3 miss. + * + * Short cache is an optimization which allows us to avoid step 3 most of the time + * when the data doesn't actually match. With short cache, the flow becomes: + * 1. Compute (hash, currentTag) at ip. currentTag is an 8-bit independent hash at ip. + * 2. Load (index, matchTag) from hashTable[hash]. See ZSTD_writeTaggedIndex to understand how this works. + * 3. Only if currentTag == matchTag, check *ip == *(base + index). Otherwise, continue. + * + * Currently, short cache is only implemented in CDict hashtables. Thus, its use is limited to + * dictMatchState matchfinders. + */ +#define ZSTD_SHORT_CACHE_TAG_BITS 8 +#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1) + +/* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable. + * Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */ +MEM_STATIC void ZSTD_writeTaggedIndex(U32* const hashTable, size_t hashAndTag, U32 index) { + size_t const hash = hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS; + U32 const tag = (U32)(hashAndTag & ZSTD_SHORT_CACHE_TAG_MASK); + assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0); + hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag; } -#endif +/* Helper function for short cache matchfinders. + * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */ +MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) { + U32 const tag1 = packedTag1 & ZSTD_SHORT_CACHE_TAG_MASK; + U32 const tag2 = packedTag2 & ZSTD_SHORT_CACHE_TAG_MASK; + return tag1 == tag2; +} + +/* =============================================================== + * Shared internal declarations + * These prototypes may be called from sources not in lib/compress + * =============================================================== */ + +/* ZSTD_loadCEntropy() : + * dict : must point at beginning of a valid zstd dictionary. + * return : size of dictionary header (size of magic number + dict ID + entropy tables) + * assumptions : magic number supposed already checked + * and dictSize >= 8 */ +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, + const void* const dict, size_t dictSize); + +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs); + +typedef struct { + U32 idx; /* Index in array of ZSTD_Sequence */ + U32 posInSequence; /* Position within sequence at idx */ + size_t posInSrc; /* Number of bytes given by sequences provided so far */ +} ZSTD_SequencePosition; + +/* for benchmark */ +size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx, + const ZSTD_Sequence* const inSeqs, size_t nbSequences, + int repcodeResolution); + +typedef struct { + size_t nbSequences; + size_t blockSize; + size_t litSize; +} BlockSummary; + +BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs); /* ============================================================== * Private declarations @@ -838,9 +1541,10 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max) /* ZSTD_getCParamsFromCCtxParams() : * cParams are built depending on compressionLevel, src size hints, * LDM and manually set compression parameters. + * Note: srcSizeHint == 0 means 0! */ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( - const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize); + const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); /*! ZSTD_initCStream_internal() : * Private use only. Init streaming operation. @@ -850,9 +1554,9 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, const void* dict, size_t dictSize, const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); + const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize); -void ZSTD_resetSeqStore(seqStore_t* ssPtr); +void ZSTD_resetSeqStore(SeqStore_t* ssPtr); /*! ZSTD_getCParamsFromCDict() : * as the name implies */ @@ -865,7 +1569,7 @@ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, + const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize); /* ZSTD_compress_advanced_internal() : @@ -874,7 +1578,7 @@ size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, - ZSTD_CCtx_params params); + const ZSTD_CCtx_params* params); /* ZSTD_writeLastEmptyBlock() : @@ -891,11 +1595,42 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity); * This cannot be used when long range matching is enabled. * Zstd will use these sequences, and pass the literals to a secondary block * compressor. - * @return : An error code on failure. * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory * access and data corruption. */ -size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq); +void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq); + +/** ZSTD_cycleLog() : + * condition for correct operation : hashLog > 1 */ +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat); + +/** ZSTD_CCtx_trace() : + * Trace the end of a compression call. + */ +void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize); + +/* Returns 1 if an external sequence producer is registered, otherwise returns 0. */ +MEM_STATIC int ZSTD_hasExtSeqProd(const ZSTD_CCtx_params* params) { + return params->extSeqProdFunc != NULL; +} + +/* =============================================================== + * Deprecated definitions that are still used internally to avoid + * deprecation warnings. These functions are exactly equivalent to + * their public variants, but avoid the deprecation warnings. + * =============================================================== */ + +size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); + +size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); #endif /* ZSTD_COMPRESS_H */ diff --git a/lib/compress/zstd_compress_literals.c b/lib/compress/zstd_compress_literals.c new file mode 100644 index 00000000000..06036de5da5 --- /dev/null +++ b/lib/compress/zstd_compress_literals.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + /*-************************************* + * Dependencies + ***************************************/ +#include "zstd_compress_literals.h" + + +/* ************************************************************** +* Debug Traces +****************************************************************/ +#if DEBUGLEVEL >= 2 + +static size_t showHexa(const void* src, size_t srcSize) +{ + const BYTE* const ip = (const BYTE*)src; + size_t u; + for (u=0; u31) + (srcSize>4095); + + DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity); + + RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, ""); + + switch(flSize) + { + case 1: /* 2 - 1 - 5 */ + ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); + break; + case 2: /* 2 - 2 - 12 */ + MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); + break; + case 3: /* 2 - 2 - 20 */ + MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); + break; + default: /* not necessary : flSize is {1,2,3} */ + assert(0); + } + + ZSTD_memcpy(ostart + flSize, src, srcSize); + DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize)); + return srcSize + flSize; +} + +static int allBytesIdentical(const void* src, size_t srcSize) +{ + assert(srcSize >= 1); + assert(src != NULL); + { const BYTE b = ((const BYTE*)src)[0]; + size_t p; + for (p=1; p31) + (srcSize>4095); + + assert(dstCapacity >= 4); (void)dstCapacity; + assert(allBytesIdentical(src, srcSize)); + + switch(flSize) + { + case 1: /* 2 - 1 - 5 */ + ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); + break; + case 2: /* 2 - 2 - 12 */ + MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); + break; + case 3: /* 2 - 2 - 20 */ + MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); + break; + default: /* not necessary : flSize is {1,2,3} */ + assert(0); + } + + ostart[flSize] = *(const BYTE*)src; + DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1); + return flSize+1; +} + +/* ZSTD_minLiteralsToCompress() : + * returns minimal amount of literals + * for literal compression to even be attempted. + * Minimum is made tighter as compression strategy increases. + */ +static size_t +ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat) +{ + assert((int)strategy >= 0); + assert((int)strategy <= 9); + /* btultra2 : min 8 bytes; + * then 2x larger for each successive compression strategy + * max threshold 64 bytes */ + { int const shift = MIN(9-(int)strategy, 3); + size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift; + DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc); + return mintc; + } +} + +size_t ZSTD_compressLiterals ( + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + void* entropyWorkspace, size_t entropyWorkspaceSize, + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, + int disableLiteralCompression, + int suspectUncompressible, + int bmi2) +{ + size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); + BYTE* const ostart = (BYTE*)dst; + U32 singleStream = srcSize < 256; + SymbolEncodingType_e hType = set_compressed; + size_t cLitSize; + + DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)", + disableLiteralCompression, (U32)srcSize, dstCapacity); + + DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize)); + + /* Prepare nextEntropy assuming reusing the existing table */ + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + + if (disableLiteralCompression) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + + /* if too small, don't even attempt compression (speed opt) */ + if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode)) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + + RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression"); + { HUF_repeat repeat = prevHuf->repeatMode; + int const flags = 0 + | (bmi2 ? HUF_flags_bmi2 : 0) + | (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0) + | (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0) + | (suspectUncompressible ? HUF_flags_suspectUncompressible : 0); + + typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int); + huf_compress_f huf_compress; + if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; + huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat; + cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize, + src, srcSize, + HUF_SYMBOLVALUE_MAX, LitHufLog, + entropyWorkspace, entropyWorkspaceSize, + (HUF_CElt*)nextHuf->CTable, + &repeat, flags); + DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize); + if (repeat != HUF_repeat_none) { + /* reused the existing table */ + DEBUGLOG(5, "reusing statistics from previous huffman block"); + hType = set_repeat; + } + } + + { size_t const minGain = ZSTD_minGain(srcSize, strategy); + if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) { + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } } + if (cLitSize==1) { + /* A return value of 1 signals that the alphabet consists of a single symbol. + * However, in some rare circumstances, it could be the compressed size (a single byte). + * For that outcome to have a chance to happen, it's necessary that `srcSize < 8`. + * (it's also necessary to not generate statistics). + * Therefore, in such a case, actively check that all bytes are identical. */ + if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) { + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); + } } + + if (hType == set_compressed) { + /* using a newly constructed table */ + nextHuf->repeatMode = HUF_repeat_check; + } + + /* Build header */ + switch(lhSize) + { + case 3: /* 2 - 2 - 10 - 10 */ + if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); + { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); + MEM_writeLE24(ostart, lhc); + break; + } + case 4: /* 2 - 2 - 14 - 14 */ + assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); + { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); + MEM_writeLE32(ostart, lhc); + break; + } + case 5: /* 2 - 2 - 18 - 18 */ + assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); + { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (BYTE)(cLitSize >> 10); + break; + } + default: /* not possible : lhSize is {3,4,5} */ + assert(0); + } + DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize)); + return lhSize+cLitSize; +} diff --git a/lib/compress/zstd_compress_literals.h b/lib/compress/zstd_compress_literals.h new file mode 100644 index 00000000000..b060c8ad218 --- /dev/null +++ b/lib/compress/zstd_compress_literals.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMPRESS_LITERALS_H +#define ZSTD_COMPRESS_LITERALS_H + +#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */ + + +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize); + +/* ZSTD_compressRleLiteralsBlock() : + * Conditions : + * - All bytes in @src are identical + * - dstCapacity >= 4 */ +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize); + +/* ZSTD_compressLiterals(): + * @entropyWorkspace: must be aligned on 4-bytes boundaries + * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE + * @suspectUncompressible: sampling checks, to potentially skip huffman coding + */ +size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + void* entropyWorkspace, size_t entropyWorkspaceSize, + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, int disableLiteralCompression, + int suspectUncompressible, + int bmi2); + +#endif /* ZSTD_COMPRESS_LITERALS_H */ diff --git a/lib/compress/zstd_compress_sequences.c b/lib/compress/zstd_compress_sequences.c new file mode 100644 index 00000000000..7beb9daa603 --- /dev/null +++ b/lib/compress/zstd_compress_sequences.c @@ -0,0 +1,442 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + /*-************************************* + * Dependencies + ***************************************/ +#include "zstd_compress_sequences.h" + +/** + * -log2(x / 256) lookup table for x in [0, 256). + * If x == 0: Return 0 + * Else: Return floor(-log2(x / 256) * 256) + */ +static unsigned const kInverseProbabilityLog256[256] = { + 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162, + 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889, + 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734, + 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626, + 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542, + 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473, + 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415, + 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366, + 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322, + 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282, + 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247, + 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215, + 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185, + 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157, + 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132, + 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108, + 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85, + 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64, + 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44, + 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25, + 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7, + 5, 4, 2, 1, +}; + +static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) { + void const* ptr = ctable; + U16 const* u16ptr = (U16 const*)ptr; + U32 const maxSymbolValue = MEM_read16(u16ptr + 1); + return maxSymbolValue; +} + +/** + * Returns true if we should use ncount=-1 else we should + * use ncount=1 for low probability symbols instead. + */ +static unsigned ZSTD_useLowProbCount(size_t const nbSeq) +{ + /* Heuristic: This should cover most blocks <= 16K and + * start to fade out after 16K to about 32K depending on + * compressibility. + */ + return nbSeq >= 2048; +} + +/** + * Returns the cost in bytes of encoding the normalized count header. + * Returns an error if any of the helper functions return an error. + */ +static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max, + size_t const nbSeq, unsigned const FSELog) +{ + BYTE wksp[FSE_NCOUNTBOUND]; + S16 norm[MaxSeq + 1]; + const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), ""); + return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog); +} + +/** + * Returns the cost in bits of encoding the distribution described by count + * using the entropy bound. + */ +static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total) +{ + unsigned cost = 0; + unsigned s; + + assert(total > 0); + for (s = 0; s <= max; ++s) { + unsigned norm = (unsigned)((256 * count[s]) / total); + if (count[s] != 0 && norm == 0) + norm = 1; + assert(count[s] < total); + cost += count[s] * kInverseProbabilityLog256[norm]; + } + return cost >> 8; +} + +/** + * Returns the cost in bits of encoding the distribution in count using ctable. + * Returns an error if ctable cannot represent all the symbols in count. + */ +size_t ZSTD_fseBitCost( + FSE_CTable const* ctable, + unsigned const* count, + unsigned const max) +{ + unsigned const kAccuracyLog = 8; + size_t cost = 0; + unsigned s; + FSE_CState_t cstate; + FSE_initCState(&cstate, ctable); + if (ZSTD_getFSEMaxSymbolValue(ctable) < max) { + DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u", + ZSTD_getFSEMaxSymbolValue(ctable), max); + return ERROR(GENERIC); + } + for (s = 0; s <= max; ++s) { + unsigned const tableLog = cstate.stateLog; + unsigned const badCost = (tableLog + 1) << kAccuracyLog; + unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog); + if (count[s] == 0) + continue; + if (bitCost >= badCost) { + DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s); + return ERROR(GENERIC); + } + cost += (size_t)count[s] * bitCost; + } + return cost >> kAccuracyLog; +} + +/** + * Returns the cost in bits of encoding the distribution in count using the + * table described by norm. The max symbol support by norm is assumed >= max. + * norm must be valid for every symbol with non-zero probability in count. + */ +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, + unsigned const* count, unsigned const max) +{ + unsigned const shift = 8 - accuracyLog; + size_t cost = 0; + unsigned s; + assert(accuracyLog <= 8); + for (s = 0; s <= max; ++s) { + unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1; + unsigned const norm256 = normAcc << shift; + assert(norm256 > 0); + assert(norm256 < 256); + cost += count[s] * kInverseProbabilityLog256[norm256]; + } + return cost >> 8; +} + +SymbolEncodingType_e +ZSTD_selectEncodingType( + FSE_repeat* repeatMode, unsigned const* count, unsigned const max, + size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, + FSE_CTable const* prevCTable, + short const* defaultNorm, U32 defaultNormLog, + ZSTD_DefaultPolicy_e const isDefaultAllowed, + ZSTD_strategy const strategy) +{ + ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); + if (mostFrequent == nbSeq) { + *repeatMode = FSE_repeat_none; + if (isDefaultAllowed && nbSeq <= 2) { + /* Prefer set_basic over set_rle when there are 2 or fewer symbols, + * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. + * If basic encoding isn't possible, always choose RLE. + */ + DEBUGLOG(5, "Selected set_basic"); + return set_basic; + } + DEBUGLOG(5, "Selected set_rle"); + return set_rle; + } + if (strategy < ZSTD_lazy) { + if (isDefaultAllowed) { + size_t const staticFse_nbSeq_max = 1000; + size_t const mult = 10 - strategy; + size_t const baseLog = 3; + size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */ + assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */ + assert(mult <= 9 && mult >= 7); + if ( (*repeatMode == FSE_repeat_valid) + && (nbSeq < staticFse_nbSeq_max) ) { + DEBUGLOG(5, "Selected set_repeat"); + return set_repeat; + } + if ( (nbSeq < dynamicFse_nbSeq_min) + || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) { + DEBUGLOG(5, "Selected set_basic"); + /* The format allows default tables to be repeated, but it isn't useful. + * When using simple heuristics to select encoding type, we don't want + * to confuse these tables with dictionaries. When running more careful + * analysis, we don't need to waste time checking both repeating tables + * and default tables. + */ + *repeatMode = FSE_repeat_none; + return set_basic; + } + } + } else { + size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC); + size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC); + size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); + size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); + + if (isDefaultAllowed) { + assert(!ZSTD_isError(basicCost)); + assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost))); + } + assert(!ZSTD_isError(NCountCost)); + assert(compressedCost < ERROR(maxCode)); + DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u", + (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost); + if (basicCost <= repeatCost && basicCost <= compressedCost) { + DEBUGLOG(5, "Selected set_basic"); + assert(isDefaultAllowed); + *repeatMode = FSE_repeat_none; + return set_basic; + } + if (repeatCost <= compressedCost) { + DEBUGLOG(5, "Selected set_repeat"); + assert(!ZSTD_isError(repeatCost)); + return set_repeat; + } + assert(compressedCost < basicCost && compressedCost < repeatCost); + } + DEBUGLOG(5, "Selected set_compressed"); + *repeatMode = FSE_repeat_check; + return set_compressed; +} + +typedef struct { + S16 norm[MaxSeq + 1]; + U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)]; +} ZSTD_BuildCTableWksp; + +size_t +ZSTD_buildCTable(void* dst, size_t dstCapacity, + FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type, + unsigned* count, U32 max, + const BYTE* codeTable, size_t nbSeq, + const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, + const FSE_CTable* prevCTable, size_t prevCTableSize, + void* entropyWorkspace, size_t entropyWorkspaceSize) +{ + BYTE* op = (BYTE*)dst; + const BYTE* const oend = op + dstCapacity; + DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity); + + switch (type) { + case set_rle: + FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), ""); + RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space"); + *op = codeTable[0]; + return 1; + case set_repeat: + ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize); + return 0; + case set_basic: + FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */ + return 0; + case set_compressed: { + ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace; + size_t nbSeq_1 = nbSeq; + const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + if (count[codeTable[nbSeq-1]] > 1) { + count[codeTable[nbSeq-1]]--; + nbSeq_1--; + } + assert(nbSeq_1 > 1); + assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp)); + (void)entropyWorkspaceSize; + FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed"); + assert(oend >= op); + { size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */ + FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed"); + FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed"); + return NCountSize; + } + } + default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach"); + } +} + +FORCE_INLINE_TEMPLATE size_t +ZSTD_encodeSequences_body( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + SeqDef const* sequences, size_t nbSeq, int longOffsets) +{ + BIT_CStream_t blockStream; + FSE_CState_t stateMatchLength; + FSE_CState_t stateOffsetBits; + FSE_CState_t stateLitLength; + + RETURN_ERROR_IF( + ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)), + dstSize_tooSmall, "not enough space remaining"); + DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)", + (int)(blockStream.endPtr - blockStream.startPtr), + (unsigned)dstCapacity); + + /* first symbols */ + FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); + FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); + FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); + BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); + if (MEM_32bits()) BIT_flushBits(&blockStream); + BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]); + if (MEM_32bits()) BIT_flushBits(&blockStream); + if (longOffsets) { + U32 const ofBits = ofCodeTable[nbSeq-1]; + unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); + if (extraBits) { + BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits); + BIT_flushBits(&blockStream); + } + BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits, + ofBits - extraBits); + } else { + BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]); + } + BIT_flushBits(&blockStream); + + { size_t n; + for (n=nbSeq-2 ; n= 64-7-(LLFSELog+MLFSELog+OffFSELog))) + BIT_flushBits(&blockStream); /* (7)*/ + BIT_addBits(&blockStream, sequences[n].litLength, llBits); + if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); + BIT_addBits(&blockStream, sequences[n].mlBase, mlBits); + if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); + if (longOffsets) { + unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); + if (extraBits) { + BIT_addBits(&blockStream, sequences[n].offBase, extraBits); + BIT_flushBits(&blockStream); /* (7)*/ + } + BIT_addBits(&blockStream, sequences[n].offBase >> extraBits, + ofBits - extraBits); /* 31 */ + } else { + BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */ + } + BIT_flushBits(&blockStream); /* (7)*/ + DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr)); + } } + + DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog); + FSE_flushCState(&blockStream, &stateMatchLength); + DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog); + FSE_flushCState(&blockStream, &stateOffsetBits); + DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog); + FSE_flushCState(&blockStream, &stateLitLength); + + { size_t const streamSize = BIT_closeCStream(&blockStream); + RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space"); + return streamSize; + } +} + +static size_t +ZSTD_encodeSequences_default( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + SeqDef const* sequences, size_t nbSeq, int longOffsets) +{ + return ZSTD_encodeSequences_body(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); +} + + +#if DYNAMIC_BMI2 + +static BMI2_TARGET_ATTRIBUTE size_t +ZSTD_encodeSequences_bmi2( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + SeqDef const* sequences, size_t nbSeq, int longOffsets) +{ + return ZSTD_encodeSequences_body(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); +} + +#endif + +size_t ZSTD_encodeSequences( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) +{ + DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity); +#if DYNAMIC_BMI2 + if (bmi2) { + return ZSTD_encodeSequences_bmi2(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); + } +#endif + (void)bmi2; + return ZSTD_encodeSequences_default(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); +} diff --git a/lib/compress/zstd_compress_sequences.h b/lib/compress/zstd_compress_sequences.h new file mode 100644 index 00000000000..4be8e91f248 --- /dev/null +++ b/lib/compress/zstd_compress_sequences.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMPRESS_SEQUENCES_H +#define ZSTD_COMPRESS_SEQUENCES_H + +#include "zstd_compress_internal.h" /* SeqDef */ +#include "../common/fse.h" /* FSE_repeat, FSE_CTable */ +#include "../common/zstd_internal.h" /* SymbolEncodingType_e, ZSTD_strategy */ + +typedef enum { + ZSTD_defaultDisallowed = 0, + ZSTD_defaultAllowed = 1 +} ZSTD_DefaultPolicy_e; + +SymbolEncodingType_e +ZSTD_selectEncodingType( + FSE_repeat* repeatMode, unsigned const* count, unsigned const max, + size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, + FSE_CTable const* prevCTable, + short const* defaultNorm, U32 defaultNormLog, + ZSTD_DefaultPolicy_e const isDefaultAllowed, + ZSTD_strategy const strategy); + +size_t +ZSTD_buildCTable(void* dst, size_t dstCapacity, + FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type, + unsigned* count, U32 max, + const BYTE* codeTable, size_t nbSeq, + const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, + const FSE_CTable* prevCTable, size_t prevCTableSize, + void* entropyWorkspace, size_t entropyWorkspaceSize); + +size_t ZSTD_encodeSequences( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2); + +size_t ZSTD_fseBitCost( + FSE_CTable const* ctable, + unsigned const* count, + unsigned const max); + +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, + unsigned const* count, unsigned const max); +#endif /* ZSTD_COMPRESS_SEQUENCES_H */ diff --git a/lib/compress/zstd_compress_superblock.c b/lib/compress/zstd_compress_superblock.c new file mode 100644 index 00000000000..6f57345be62 --- /dev/null +++ b/lib/compress/zstd_compress_superblock.c @@ -0,0 +1,688 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + /*-************************************* + * Dependencies + ***************************************/ +#include "zstd_compress_superblock.h" + +#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */ +#include "hist.h" /* HIST_countFast_wksp */ +#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */ +#include "zstd_compress_sequences.h" +#include "zstd_compress_literals.h" + +/** ZSTD_compressSubBlock_literal() : + * Compresses literals section for a sub-block. + * When we have to write the Huffman table we will sometimes choose a header + * size larger than necessary. This is because we have to pick the header size + * before we know the table size + compressed size, so we have a bound on the + * table size. If we guessed incorrectly, we fall back to uncompressed literals. + * + * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded + * in writing the header, otherwise it is set to 0. + * + * hufMetadata->hType has literals block type info. + * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. + * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. + * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block + * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block + * and the following sub-blocks' literals sections will be Treeless_Literals_Block. + * @return : compressed size of literals section of a sub-block + * Or 0 if unable to compress. + * Or error code */ +static size_t +ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, + const ZSTD_hufCTablesMetadata_t* hufMetadata, + const BYTE* literals, size_t litSize, + void* dst, size_t dstSize, + const int bmi2, int writeEntropy, int* entropyWritten) +{ + size_t const header = writeEntropy ? 200 : 0; + size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart + lhSize; + U32 const singleStream = lhSize == 3; + SymbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; + size_t cLitSize = 0; + + DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); + + *entropyWritten = 0; + if (litSize == 0 || hufMetadata->hType == set_basic) { + DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal"); + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); + } else if (hufMetadata->hType == set_rle) { + DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal"); + return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); + } + + assert(litSize > 0); + assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat); + + if (writeEntropy && hufMetadata->hType == set_compressed) { + ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize); + op += hufMetadata->hufDesSize; + cLitSize += hufMetadata->hufDesSize; + DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); + } + + { int const flags = bmi2 ? HUF_flags_bmi2 : 0; + const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags) + : HUF_compress4X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags); + op += cSize; + cLitSize += cSize; + if (cSize == 0 || ERR_isError(cSize)) { + DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize)); + return 0; + } + /* If we expand and we aren't writing a header then emit uncompressed */ + if (!writeEntropy && cLitSize >= litSize) { + DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible"); + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); + } + /* If we are writing headers then allow expansion that doesn't change our header size. */ + if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) { + assert(cLitSize > litSize); + DEBUGLOG(5, "Literals expanded beyond allowed header size"); + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); + } + DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize); + } + + /* Build header */ + switch(lhSize) + { + case 3: /* 2 - 2 - 10 - 10 */ + { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); + MEM_writeLE24(ostart, lhc); + break; + } + case 4: /* 2 - 2 - 14 - 14 */ + { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18); + MEM_writeLE32(ostart, lhc); + break; + } + case 5: /* 2 - 2 - 18 - 18 */ + { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (BYTE)(cLitSize >> 10); + break; + } + default: /* not possible : lhSize is {3,4,5} */ + assert(0); + } + *entropyWritten = 1; + DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart)); + return (size_t)(op-ostart); +} + +static size_t +ZSTD_seqDecompressedSize(SeqStore_t const* seqStore, + const SeqDef* sequences, size_t nbSeqs, + size_t litSize, int lastSubBlock) +{ + size_t matchLengthSum = 0; + size_t litLengthSum = 0; + size_t n; + for (n=0; nllType, fseMetadata->ofType, and fseMetadata->mlType have + * symbol compression modes for the super-block. + * The first successfully compressed block will have these in its header. + * We set entropyWritten=1 when we succeed in compressing the sequences. + * The following sub-blocks will always have repeat mode. + * @return : compressed size of sequences section of a sub-block + * Or 0 if it is unable to compress + * Or error code. */ +static size_t +ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, + const ZSTD_fseCTablesMetadata_t* fseMetadata, + const SeqDef* sequences, size_t nbSeq, + const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + const int bmi2, int writeEntropy, int* entropyWritten) +{ + const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + BYTE* seqHead; + + DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets); + + *entropyWritten = 0; + /* Sequences Header */ + RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, + dstSize_tooSmall, ""); + if (nbSeq < 128) + *op++ = (BYTE)nbSeq; + else if (nbSeq < LONGNBSEQ) + op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; + else + op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; + if (nbSeq==0) { + return (size_t)(op - ostart); + } + + /* seqHead : flags for FSE encoding type */ + seqHead = op++; + + DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart)); + + if (writeEntropy) { + const U32 LLtype = fseMetadata->llType; + const U32 Offtype = fseMetadata->ofType; + const U32 MLtype = fseMetadata->mlType; + DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize); + *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); + ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize); + op += fseMetadata->fseTablesSize; + } else { + const U32 repeat = set_repeat; + *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2)); + } + + { size_t const bitstreamSize = ZSTD_encodeSequences( + op, (size_t)(oend - op), + fseTables->matchlengthCTable, mlCode, + fseTables->offcodeCTable, ofCode, + fseTables->litlengthCTable, llCode, + sequences, nbSeq, + longOffsets, bmi2); + FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); + op += bitstreamSize; + /* zstd versions <= 1.3.4 mistakenly report corruption when + * FSE_readNCount() receives a buffer < 4 bytes. + * Fixed by https://github.com/facebook/zstd/pull/1146. + * This can happen when the last set_compressed table present is 2 + * bytes and the bitstream is only one byte. + * In this exceedingly rare case, we will simply emit an uncompressed + * block, since it isn't worth optimizing. + */ +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) { + /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ + assert(fseMetadata->lastCountSize + bitstreamSize == 3); + DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " + "emitting an uncompressed block."); + return 0; + } +#endif + DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize); + } + + /* zstd versions <= 1.4.0 mistakenly report error when + * sequences section body size is less than 3 bytes. + * Fixed by https://github.com/facebook/zstd/pull/1664. + * This can happen when the previous sequences section block is compressed + * with rle mode and the current block's sequences section is compressed + * with repeat mode where sequences section body size can be 1 byte. + */ +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + if (op-seqHead < 4) { + DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting " + "an uncompressed block when sequences are < 4 bytes"); + return 0; + } +#endif + + *entropyWritten = 1; + return (size_t)(op - ostart); +} + +/** ZSTD_compressSubBlock() : + * Compresses a single sub-block. + * @return : compressed size of the sub-block + * Or 0 if it failed to compress. */ +static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, + const SeqDef* sequences, size_t nbSeq, + const BYTE* literals, size_t litSize, + const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + const int bmi2, + int writeLitEntropy, int writeSeqEntropy, + int* litEntropyWritten, int* seqEntropyWritten, + U32 lastBlock) +{ + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart + ZSTD_blockHeaderSize; + DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)", + litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock); + { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable, + &entropyMetadata->hufMetadata, literals, litSize, + op, (size_t)(oend-op), + bmi2, writeLitEntropy, litEntropyWritten); + FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed"); + if (cLitSize == 0) return 0; + op += cLitSize; + } + { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, + &entropyMetadata->fseMetadata, + sequences, nbSeq, + llCode, mlCode, ofCode, + cctxParams, + op, (size_t)(oend-op), + bmi2, writeSeqEntropy, seqEntropyWritten); + FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed"); + if (cSeqSize == 0) return 0; + op += cSeqSize; + } + /* Write block header */ + { size_t cSize = (size_t)(op-ostart) - ZSTD_blockHeaderSize; + U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); + MEM_writeLE24(ostart, cBlockHeader24); + } + return (size_t)(op-ostart); +} + +static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, + const ZSTD_hufCTables_t* huf, + const ZSTD_hufCTablesMetadata_t* hufMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) +{ + unsigned* const countWksp = (unsigned*)workspace; + unsigned maxSymbolValue = 255; + size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ + + if (hufMetadata->hType == set_basic) return litSize; + else if (hufMetadata->hType == set_rle) return 1; + else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { + size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); + if (ZSTD_isError(largest)) return litSize; + { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); + if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; + return cLitSizeEstimate + literalSectionHeaderSize; + } } + assert(0); /* impossible */ + return 0; +} + +static size_t ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e type, + const BYTE* codeTable, unsigned maxCode, + size_t nbSeq, const FSE_CTable* fseCTable, + const U8* additionalBits, + short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, + void* workspace, size_t wkspSize) +{ + unsigned* const countWksp = (unsigned*)workspace; + const BYTE* ctp = codeTable; + const BYTE* const ctStart = ctp; + const BYTE* const ctEnd = ctStart + nbSeq; + size_t cSymbolTypeSizeEstimateInBits = 0; + unsigned max = maxCode; + + HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ + if (type == set_basic) { + /* We selected this encoding type, so it must be valid. */ + assert(max <= defaultMax); + cSymbolTypeSizeEstimateInBits = max <= defaultMax + ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) + : ERROR(GENERIC); + } else if (type == set_rle) { + cSymbolTypeSizeEstimateInBits = 0; + } else if (type == set_compressed || type == set_repeat) { + cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); + } + if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10; + while (ctp < ctEnd) { + if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; + else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ + ctp++; + } + return cSymbolTypeSizeEstimateInBits / 8; +} + +static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_fseCTables_t* fseTables, + const ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) +{ + size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ + size_t cSeqSizeEstimate = 0; + if (nbSeq == 0) return sequencesSectionHeaderSize; + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff, + nbSeq, fseTables->offcodeCTable, NULL, + OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, + workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL, + nbSeq, fseTables->litlengthCTable, LL_bits, + LL_defaultNorm, LL_defaultNormLog, MaxLL, + workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML, + nbSeq, fseTables->matchlengthCTable, ML_bits, + ML_defaultNorm, ML_defaultNormLog, MaxML, + workspace, wkspSize); + if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; + return cSeqSizeEstimate + sequencesSectionHeaderSize; +} + +typedef struct { + size_t estLitSize; + size_t estBlockSize; +} EstimatedBlockSize; +static EstimatedBlockSize ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, + const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_entropyCTables_t* entropy, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize, + int writeLitEntropy, int writeSeqEntropy) +{ + EstimatedBlockSize ebs; + ebs.estLitSize = ZSTD_estimateSubBlockSize_literal(literals, litSize, + &entropy->huf, &entropyMetadata->hufMetadata, + workspace, wkspSize, writeLitEntropy); + ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, + nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, + workspace, wkspSize, writeSeqEntropy); + ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize; + return ebs; +} + +static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) +{ + if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle) + return 1; + if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle) + return 1; + if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle) + return 1; + return 0; +} + +static size_t countLiterals(SeqStore_t const* seqStore, const SeqDef* sp, size_t seqCount) +{ + size_t n, total = 0; + assert(sp != NULL); + for (n=0; n %zu bytes", seqCount, (const void*)sp, total); + return total; +} + +#define BYTESCALE 256 + +static size_t sizeBlockSequences(const SeqDef* sp, size_t nbSeqs, + size_t targetBudget, size_t avgLitCost, size_t avgSeqCost, + int firstSubBlock) +{ + size_t n, budget = 0, inSize=0; + /* entropy headers */ + size_t const headerSize = (size_t)firstSubBlock * 120 * BYTESCALE; /* generous estimate */ + assert(firstSubBlock==0 || firstSubBlock==1); + budget += headerSize; + + /* first sequence => at least one sequence*/ + budget += sp[0].litLength * avgLitCost + avgSeqCost; + if (budget > targetBudget) return 1; + inSize = sp[0].litLength + (sp[0].mlBase+MINMATCH); + + /* loop over sequences */ + for (n=1; n targetBudget) + /* though continue to expand until the sub-block is deemed compressible */ + && (budget < inSize * BYTESCALE) ) + break; + } + + return n; +} + +/** ZSTD_compressSubBlock_multi() : + * Breaks super-block into multiple sub-blocks and compresses them. + * Entropy will be written into the first block. + * The following blocks use repeat_mode to compress. + * Sub-blocks are all compressed, except the last one when beneficial. + * @return : compressed size of the super block (which features multiple ZSTD blocks) + * or 0 if it failed to compress. */ +static size_t ZSTD_compressSubBlock_multi(const SeqStore_t* seqStorePtr, + const ZSTD_compressedBlockState_t* prevCBlock, + ZSTD_compressedBlockState_t* nextCBlock, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const int bmi2, U32 lastBlock, + void* workspace, size_t wkspSize) +{ + const SeqDef* const sstart = seqStorePtr->sequencesStart; + const SeqDef* const send = seqStorePtr->sequences; + const SeqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */ + size_t const nbSeqs = (size_t)(send - sstart); + const BYTE* const lstart = seqStorePtr->litStart; + const BYTE* const lend = seqStorePtr->lit; + const BYTE* lp = lstart; + size_t const nbLiterals = (size_t)(lend - lstart); + BYTE const* ip = (BYTE const*)src; + BYTE const* const iend = ip + srcSize; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + const BYTE* llCodePtr = seqStorePtr->llCode; + const BYTE* mlCodePtr = seqStorePtr->mlCode; + const BYTE* ofCodePtr = seqStorePtr->ofCode; + size_t const minTarget = ZSTD_TARGETCBLOCKSIZE_MIN; /* enforce minimum size, to reduce undesirable side effects */ + size_t const targetCBlockSize = MAX(minTarget, cctxParams->targetCBlockSize); + int writeLitEntropy = (entropyMetadata->hufMetadata.hType == set_compressed); + int writeSeqEntropy = 1; + + DEBUGLOG(5, "ZSTD_compressSubBlock_multi (srcSize=%u, litSize=%u, nbSeq=%u)", + (unsigned)srcSize, (unsigned)(lend-lstart), (unsigned)(send-sstart)); + + /* let's start by a general estimation for the full block */ + if (nbSeqs > 0) { + EstimatedBlockSize const ebs = + ZSTD_estimateSubBlockSize(lp, nbLiterals, + ofCodePtr, llCodePtr, mlCodePtr, nbSeqs, + &nextCBlock->entropy, entropyMetadata, + workspace, wkspSize, + writeLitEntropy, writeSeqEntropy); + /* quick estimation */ + size_t const avgLitCost = nbLiterals ? (ebs.estLitSize * BYTESCALE) / nbLiterals : BYTESCALE; + size_t const avgSeqCost = ((ebs.estBlockSize - ebs.estLitSize) * BYTESCALE) / nbSeqs; + const size_t nbSubBlocks = MAX((ebs.estBlockSize + (targetCBlockSize/2)) / targetCBlockSize, 1); + size_t n, avgBlockBudget, blockBudgetSupp=0; + avgBlockBudget = (ebs.estBlockSize * BYTESCALE) / nbSubBlocks; + DEBUGLOG(5, "estimated fullblock size=%u bytes ; avgLitCost=%.2f ; avgSeqCost=%.2f ; targetCBlockSize=%u, nbSubBlocks=%u ; avgBlockBudget=%.0f bytes", + (unsigned)ebs.estBlockSize, (double)avgLitCost/BYTESCALE, (double)avgSeqCost/BYTESCALE, + (unsigned)targetCBlockSize, (unsigned)nbSubBlocks, (double)avgBlockBudget/BYTESCALE); + /* simplification: if estimates states that the full superblock doesn't compress, just bail out immediately + * this will result in the production of a single uncompressed block covering @srcSize.*/ + if (ebs.estBlockSize > srcSize) return 0; + + /* compress and write sub-blocks */ + assert(nbSubBlocks>0); + for (n=0; n < nbSubBlocks-1; n++) { + /* determine nb of sequences for current sub-block + nbLiterals from next sequence */ + size_t const seqCount = sizeBlockSequences(sp, (size_t)(send-sp), + avgBlockBudget + blockBudgetSupp, avgLitCost, avgSeqCost, n==0); + /* if reached last sequence : break to last sub-block (simplification) */ + assert(seqCount <= (size_t)(send-sp)); + if (sp + seqCount == send) break; + assert(seqCount > 0); + /* compress sub-block */ + { int litEntropyWritten = 0; + int seqEntropyWritten = 0; + size_t litSize = countLiterals(seqStorePtr, sp, seqCount); + const size_t decompressedSize = + ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 0); + size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, + sp, seqCount, + lp, litSize, + llCodePtr, mlCodePtr, ofCodePtr, + cctxParams, + op, (size_t)(oend-op), + bmi2, writeLitEntropy, writeSeqEntropy, + &litEntropyWritten, &seqEntropyWritten, + 0); + FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); + + /* check compressibility, update state components */ + if (cSize > 0 && cSize < decompressedSize) { + DEBUGLOG(5, "Committed sub-block compressing %u bytes => %u bytes", + (unsigned)decompressedSize, (unsigned)cSize); + assert(ip + decompressedSize <= iend); + ip += decompressedSize; + lp += litSize; + op += cSize; + llCodePtr += seqCount; + mlCodePtr += seqCount; + ofCodePtr += seqCount; + /* Entropy only needs to be written once */ + if (litEntropyWritten) { + writeLitEntropy = 0; + } + if (seqEntropyWritten) { + writeSeqEntropy = 0; + } + sp += seqCount; + blockBudgetSupp = 0; + } } + /* otherwise : do not compress yet, coalesce current sub-block with following one */ + } + } /* if (nbSeqs > 0) */ + + /* write last block */ + DEBUGLOG(5, "Generate last sub-block: %u sequences remaining", (unsigned)(send - sp)); + { int litEntropyWritten = 0; + int seqEntropyWritten = 0; + size_t litSize = (size_t)(lend - lp); + size_t seqCount = (size_t)(send - sp); + const size_t decompressedSize = + ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 1); + size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, + sp, seqCount, + lp, litSize, + llCodePtr, mlCodePtr, ofCodePtr, + cctxParams, + op, (size_t)(oend-op), + bmi2, writeLitEntropy, writeSeqEntropy, + &litEntropyWritten, &seqEntropyWritten, + lastBlock); + FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); + + /* update pointers, the nb of literals borrowed from next sequence must be preserved */ + if (cSize > 0 && cSize < decompressedSize) { + DEBUGLOG(5, "Last sub-block compressed %u bytes => %u bytes", + (unsigned)decompressedSize, (unsigned)cSize); + assert(ip + decompressedSize <= iend); + ip += decompressedSize; + lp += litSize; + op += cSize; + llCodePtr += seqCount; + mlCodePtr += seqCount; + ofCodePtr += seqCount; + /* Entropy only needs to be written once */ + if (litEntropyWritten) { + writeLitEntropy = 0; + } + if (seqEntropyWritten) { + writeSeqEntropy = 0; + } + sp += seqCount; + } + } + + + if (writeLitEntropy) { + DEBUGLOG(5, "Literal entropy tables were never written"); + ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf)); + } + if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) { + /* If we haven't written our entropy tables, then we've violated our contract and + * must emit an uncompressed block. + */ + DEBUGLOG(5, "Sequence entropy tables were never written => cancel, emit an uncompressed block"); + return 0; + } + + if (ip < iend) { + /* some data left : last part of the block sent uncompressed */ + size_t const rSize = (size_t)((iend - ip)); + size_t const cSize = ZSTD_noCompressBlock(op, (size_t)(oend - op), ip, rSize, lastBlock); + DEBUGLOG(5, "Generate last uncompressed sub-block of %u bytes", (unsigned)(rSize)); + FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); + assert(cSize != 0); + op += cSize; + /* We have to regenerate the repcodes because we've skipped some sequences */ + if (sp < send) { + const SeqDef* seq; + Repcodes_t rep; + ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); + for (seq = sstart; seq < sp; ++seq) { + ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); + } + ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); + } + } + + DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed all subBlocks: total compressed size = %u", + (unsigned)(op-ostart)); + return (size_t)(op-ostart); +} + +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + unsigned lastBlock) +{ + ZSTD_entropyCTablesMetadata_t entropyMetadata; + + FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore, + &zc->blockState.prevCBlock->entropy, + &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + &entropyMetadata, + zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */), ""); + + return ZSTD_compressSubBlock_multi(&zc->seqStore, + zc->blockState.prevCBlock, + zc->blockState.nextCBlock, + &entropyMetadata, + &zc->appliedParams, + dst, dstCapacity, + src, srcSize, + zc->bmi2, lastBlock, + zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */); +} diff --git a/lib/compress/zstd_compress_superblock.h b/lib/compress/zstd_compress_superblock.h new file mode 100644 index 00000000000..8e494f0d5e6 --- /dev/null +++ b/lib/compress/zstd_compress_superblock.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMPRESS_ADVANCED_H +#define ZSTD_COMPRESS_ADVANCED_H + +/*-************************************* +* Dependencies +***************************************/ + +#include "../zstd.h" /* ZSTD_CCtx */ + +/*-************************************* +* Target Compressed Block Size +***************************************/ + +/* ZSTD_compressSuperBlock() : + * Used to compress a super block when targetCBlockSize is being used. + * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */ +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + void const* src, size_t srcSize, + unsigned lastBlock); + +#endif /* ZSTD_COMPRESS_ADVANCED_H */ diff --git a/lib/compress/zstd_cwksp.h b/lib/compress/zstd_cwksp.h new file mode 100644 index 00000000000..77518002d00 --- /dev/null +++ b/lib/compress/zstd_cwksp.h @@ -0,0 +1,765 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_CWKSP_H +#define ZSTD_CWKSP_H + +/*-************************************* +* Dependencies +***************************************/ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ +#include "../common/zstd_internal.h" +#include "../common/portability_macros.h" +#include "../common/compiler.h" /* ZS2_isPower2 */ + +/*-************************************* +* Constants +***************************************/ + +/* Since the workspace is effectively its own little malloc implementation / + * arena, when we run under ASAN, we should similarly insert redzones between + * each internal element of the workspace, so ASAN will catch overruns that + * reach outside an object but that stay inside the workspace. + * + * This defines the size of that redzone. + */ +#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE +#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128 +#endif + + +/* Set our tables and aligneds to align by 64 bytes */ +#define ZSTD_CWKSP_ALIGNMENT_BYTES 64 + +/*-************************************* +* Structures +***************************************/ +typedef enum { + ZSTD_cwksp_alloc_objects, + ZSTD_cwksp_alloc_aligned_init_once, + ZSTD_cwksp_alloc_aligned, + ZSTD_cwksp_alloc_buffers +} ZSTD_cwksp_alloc_phase_e; + +/** + * Used to describe whether the workspace is statically allocated (and will not + * necessarily ever be freed), or if it's dynamically allocated and we can + * expect a well-formed caller to free this. + */ +typedef enum { + ZSTD_cwksp_dynamic_alloc, + ZSTD_cwksp_static_alloc +} ZSTD_cwksp_static_alloc_e; + +/** + * Zstd fits all its internal datastructures into a single continuous buffer, + * so that it only needs to perform a single OS allocation (or so that a buffer + * can be provided to it and it can perform no allocations at all). This buffer + * is called the workspace. + * + * Several optimizations complicate that process of allocating memory ranges + * from this workspace for each internal datastructure: + * + * - These different internal datastructures have different setup requirements: + * + * - The static objects need to be cleared once and can then be trivially + * reused for each compression. + * + * - Various buffers don't need to be initialized at all--they are always + * written into before they're read. + * + * - The matchstate tables have a unique requirement that they don't need + * their memory to be totally cleared, but they do need the memory to have + * some bound, i.e., a guarantee that all values in the memory they've been + * allocated is less than some maximum value (which is the starting value + * for the indices that they will then use for compression). When this + * guarantee is provided to them, they can use the memory without any setup + * work. When it can't, they have to clear the area. + * + * - These buffers also have different alignment requirements. + * + * - We would like to reuse the objects in the workspace for multiple + * compressions without having to perform any expensive reallocation or + * reinitialization work. + * + * - We would like to be able to efficiently reuse the workspace across + * multiple compressions **even when the compression parameters change** and + * we need to resize some of the objects (where possible). + * + * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp + * abstraction was created. It works as follows: + * + * Workspace Layout: + * + * [ ... workspace ... ] + * [objects][tables ->] free space [<- buffers][<- aligned][<- init once] + * + * The various objects that live in the workspace are divided into the + * following categories, and are allocated separately: + * + * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, + * so that literally everything fits in a single buffer. Note: if present, + * this must be the first object in the workspace, since ZSTD_customFree{CCtx, + * CDict}() rely on a pointer comparison to see whether one or two frees are + * required. + * + * - Fixed size objects: these are fixed-size, fixed-count objects that are + * nonetheless "dynamically" allocated in the workspace so that we can + * control how they're initialized separately from the broader ZSTD_CCtx. + * Examples: + * - Entropy Workspace + * - 2 x ZSTD_compressedBlockState_t + * - CDict dictionary contents + * + * - Tables: these are any of several different datastructures (hash tables, + * chain tables, binary trees) that all respect a common format: they are + * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). + * Their sizes depend on the cparams. These tables are 64-byte aligned. + * + * - Init once: these buffers require to be initialized at least once before + * use. They should be used when we want to skip memory initialization + * while not triggering memory checkers (like Valgrind) when reading from + * from this memory without writing to it first. + * These buffers should be used carefully as they might contain data + * from previous compressions. + * Buffers are aligned to 64 bytes. + * + * - Aligned: these buffers don't require any initialization before they're + * used. The user of the buffer should make sure they write into a buffer + * location before reading from it. + * Buffers are aligned to 64 bytes. + * + * - Buffers: these buffers are used for various purposes that don't require + * any alignment or initialization before they're used. This means they can + * be moved around at no cost for a new compression. + * + * Allocating Memory: + * + * The various types of objects must be allocated in order, so they can be + * correctly packed into the workspace buffer. That order is: + * + * 1. Objects + * 2. Init once / Tables + * 3. Aligned / Tables + * 4. Buffers / Tables + * + * Attempts to reserve objects of different types out of order will fail. + */ +typedef struct { + void* workspace; + void* workspaceEnd; + + void* objectEnd; + void* tableEnd; + void* tableValidEnd; + void* allocStart; + void* initOnceStart; + + BYTE allocFailed; + int workspaceOversizedDuration; + ZSTD_cwksp_alloc_phase_e phase; + ZSTD_cwksp_static_alloc_e isStatic; +} ZSTD_cwksp; + +/*-************************************* +* Functions +***************************************/ + +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); +MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws); + +MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { + (void)ws; + assert(ws->workspace <= ws->objectEnd); + assert(ws->objectEnd <= ws->tableEnd); + assert(ws->objectEnd <= ws->tableValidEnd); + assert(ws->tableEnd <= ws->allocStart); + assert(ws->tableValidEnd <= ws->allocStart); + assert(ws->allocStart <= ws->workspaceEnd); + assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws)); + assert(ws->workspace <= ws->initOnceStart); +#if ZSTD_MEMORY_SANITIZER + { + intptr_t const offset = __msan_test_shadow(ws->initOnceStart, + (U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart); + (void)offset; +#if defined(ZSTD_MSAN_PRINT) + if(offset!=-1) { + __msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32); + } +#endif + assert(offset==-1); + }; +#endif +} + +/** + * Align must be a power of 2. + */ +MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) { + size_t const mask = align - 1; + assert(ZSTD_isPower2(align)); + return (size + mask) & ~mask; +} + +/** + * Use this to determine how much space in the workspace we will consume to + * allocate this object. (Normally it should be exactly the size of the object, + * but under special conditions, like ASAN, where we pad each object, it might + * be larger.) + * + * Since tables aren't currently redzoned, you don't need to call through this + * to figure out how much space you need for the matchState tables. Everything + * else is though. + * + * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size(). + */ +MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { + if (size == 0) + return 0; +#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; +#else + return size; +#endif +} + +MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) { + return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment)); +} + +/** + * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. + * Used to determine the number of bytes required for a given "aligned". + */ +MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) { + return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES); +} + +/** + * Returns the amount of additional space the cwksp must allocate + * for internal purposes (currently only alignment). + */ +MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { + /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES + * bytes to align the beginning of tables section and end of buffers; + */ + size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2; + return slackSpace; +} + + +/** + * Return the number of additional bytes required to align a pointer to the given number of bytes. + * alignBytes must be a power of two. + */ +MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) { + size_t const alignBytesMask = alignBytes - 1; + size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask; + assert(ZSTD_isPower2(alignBytes)); + assert(bytes < alignBytes); + return bytes; +} + +/** + * Returns the initial value for allocStart which is used to determine the position from + * which we can allocate from the end of the workspace. + */ +MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) +{ + char* endPtr = (char*)ws->workspaceEnd; + assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES)); + endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES); + return (void*)endPtr; +} + +/** + * Internal function. Do not use directly. + * Reserves the given number of bytes within the aligned/buffer segment of the wksp, + * which counts from the end of the wksp (as opposed to the object/table segment). + * + * Returns a pointer to the beginning of that space. + */ +MEM_STATIC void* +ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) +{ + void* const alloc = (BYTE*)ws->allocStart - bytes; + void* const bottom = ws->tableEnd; + DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining", + alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); + ZSTD_cwksp_assert_internal_consistency(ws); + assert(alloc >= bottom); + if (alloc < bottom) { + DEBUGLOG(4, "cwksp: alloc failed!"); + ws->allocFailed = 1; + return NULL; + } + /* the area is reserved from the end of wksp. + * If it overlaps with tableValidEnd, it voids guarantees on values' range */ + if (alloc < ws->tableValidEnd) { + ws->tableValidEnd = alloc; + } + ws->allocStart = alloc; + return alloc; +} + +/** + * Moves the cwksp to the next phase, and does any necessary allocations. + * cwksp initialization must necessarily go through each phase in order. + * Returns a 0 on success, or zstd error + */ +MEM_STATIC size_t +ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) +{ + assert(phase >= ws->phase); + if (phase > ws->phase) { + /* Going from allocating objects to allocating initOnce / tables */ + if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once && + phase >= ZSTD_cwksp_alloc_aligned_init_once) { + ws->tableValidEnd = ws->objectEnd; + ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); + + { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */ + void *const alloc = ws->objectEnd; + size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES); + void *const objectEnd = (BYTE *) alloc + bytesToAlign; + DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign); + RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, + "table phase - alignment initial allocation failed!"); + ws->objectEnd = objectEnd; + ws->tableEnd = objectEnd; /* table area starts being empty */ + if (ws->tableValidEnd < ws->tableEnd) { + ws->tableValidEnd = ws->tableEnd; + } + } + } + ws->phase = phase; + ZSTD_cwksp_assert_internal_consistency(ws); + } + return 0; +} + +/** + * Returns whether this object/buffer/etc was allocated in this workspace. + */ +MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) +{ + return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd); +} + +/** + * Internal function. Do not use directly. + */ +MEM_STATIC void* +ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) +{ + void* alloc; + if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) { + return NULL; + } + +#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + /* over-reserve space */ + bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; +#endif + + alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes); + +#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on + * either size. */ + if (alloc) { + alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; + if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { + /* We need to keep the redzone poisoned while unpoisoning the bytes that + * are actually allocated. */ + __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE); + } + } +#endif + + return alloc; +} + +/** + * Reserves and returns unaligned memory. + */ +MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) +{ + return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); +} + +/** + * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + * This memory has been initialized at least once in the past. + * This doesn't mean it has been initialized this time, and it might contain data from previous + * operations. + * The main usage is for algorithms that might need read access into uninitialized memory. + * The algorithm must maintain safety under these conditions and must make sure it doesn't + * leak any of the past data (directly or in side channels). + */ +MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes) +{ + size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES); + void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once); + assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); + if(ptr && ptr < ws->initOnceStart) { + /* We assume the memory following the current allocation is either: + * 1. Not usable as initOnce memory (end of workspace) + * 2. Another initOnce buffer that has been allocated before (and so was previously memset) + * 3. An ASAN redzone, in which case we don't want to write on it + * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart. + * Note that we assume here that MSAN and ASAN cannot run in the same time. */ + ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes)); + ws->initOnceStart = ptr; + } +#if ZSTD_MEMORY_SANITIZER + assert(__msan_test_shadow(ptr, bytes) == -1); +#endif + return ptr; +} + +/** + * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + */ +MEM_STATIC void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, size_t bytes) +{ + void* const ptr = ZSTD_cwksp_reserve_internal(ws, + ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), + ZSTD_cwksp_alloc_aligned); + assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); + return ptr; +} + +/** + * Aligned on 64 bytes. These buffers have the special property that + * their values remain constrained, allowing us to reuse them without + * memset()-ing them. + */ +MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) +{ + const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once; + void* alloc; + void* end; + void* top; + + /* We can only start allocating tables after we are done reserving space for objects at the + * start of the workspace */ + if(ws->phase < phase) { + if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) { + return NULL; + } + } + alloc = ws->tableEnd; + end = (BYTE *)alloc + bytes; + top = ws->allocStart; + + DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining", + alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); + assert((bytes & (sizeof(U32)-1)) == 0); + ZSTD_cwksp_assert_internal_consistency(ws); + assert(end <= top); + if (end > top) { + DEBUGLOG(4, "cwksp: table alloc failed!"); + ws->allocFailed = 1; + return NULL; + } + ws->tableEnd = end; + +#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { + __asan_unpoison_memory_region(alloc, bytes); + } +#endif + + assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); + assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); + return alloc; +} + +/** + * Aligned on sizeof(void*). + * Note : should happen only once, at workspace first initialization + */ +MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) +{ + size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); + void* alloc = ws->objectEnd; + void* end = (BYTE*)alloc + roundedBytes; + +#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + /* over-reserve space */ + end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; +#endif + + DEBUGLOG(4, + "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining", + alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); + assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0); + assert(bytes % ZSTD_ALIGNOF(void*) == 0); + ZSTD_cwksp_assert_internal_consistency(ws); + /* we must be in the first phase, no advance is possible */ + if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { + DEBUGLOG(3, "cwksp: object alloc failed!"); + ws->allocFailed = 1; + return NULL; + } + ws->objectEnd = end; + ws->tableEnd = end; + ws->tableValidEnd = end; + +#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on + * either size. */ + alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; + if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { + __asan_unpoison_memory_region(alloc, bytes); + } +#endif + + return alloc; +} +/** + * with alignment control + * Note : should happen only once, at workspace first initialization + */ +MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, size_t byteSize, size_t alignment) +{ + size_t const mask = alignment - 1; + size_t const surplus = (alignment > sizeof(void*)) ? alignment - sizeof(void*) : 0; + void* const start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus); + if (start == NULL) return NULL; + if (surplus == 0) return start; + assert(ZSTD_isPower2(alignment)); + return (void*)(((size_t)start + surplus) & ~mask); +} + +MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) +{ + DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); + +#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) + /* To validate that the table reuse logic is sound, and that we don't + * access table space that we haven't cleaned, we re-"poison" the table + * space every time we mark it dirty. + * Since tableValidEnd space and initOnce space may overlap we don't poison + * the initOnce portion as it break its promise. This means that this poisoning + * check isn't always applied fully. */ + { + size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; + assert(__msan_test_shadow(ws->objectEnd, size) == -1); + if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { + __msan_poison(ws->objectEnd, size); + } else { + assert(ws->initOnceStart >= ws->objectEnd); + __msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd); + } + } +#endif + + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + ws->tableValidEnd = ws->objectEnd; + ZSTD_cwksp_assert_internal_consistency(ws); +} + +MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) { + DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean"); + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + if (ws->tableValidEnd < ws->tableEnd) { + ws->tableValidEnd = ws->tableEnd; + } + ZSTD_cwksp_assert_internal_consistency(ws); +} + +/** + * Zero the part of the allocated tables not already marked clean. + */ +MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { + DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables"); + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + if (ws->tableValidEnd < ws->tableEnd) { + ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd)); + } + ZSTD_cwksp_mark_tables_clean(ws); +} + +/** + * Invalidates table allocations. + * All other allocations remain valid. + */ +MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) +{ + DEBUGLOG(4, "cwksp: clearing tables!"); + +#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + /* We don't do this when the workspace is statically allocated, because + * when that is the case, we have no capability to hook into the end of the + * workspace's lifecycle to unpoison the memory. + */ + if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { + size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; + __asan_poison_memory_region(ws->objectEnd, size); + } +#endif + + ws->tableEnd = ws->objectEnd; + ZSTD_cwksp_assert_internal_consistency(ws); +} + +/** + * Invalidates all buffer, aligned, and table allocations. + * Object allocations remain valid. + */ +MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { + DEBUGLOG(4, "cwksp: clearing!"); + +#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) + /* To validate that the context reuse logic is sound, and that we don't + * access stuff that this compression hasn't initialized, we re-"poison" + * the workspace except for the areas in which we expect memory reuse + * without initialization (objects, valid tables area and init once + * memory). */ + { + if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { + size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd; + __msan_poison(ws->tableValidEnd, size); + } + } +#endif + +#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + /* We don't do this when the workspace is statically allocated, because + * when that is the case, we have no capability to hook into the end of the + * workspace's lifecycle to unpoison the memory. + */ + if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { + size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; + __asan_poison_memory_region(ws->objectEnd, size); + } +#endif + + ws->tableEnd = ws->objectEnd; + ws->allocStart = ZSTD_cwksp_initialAllocStart(ws); + ws->allocFailed = 0; + if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) { + ws->phase = ZSTD_cwksp_alloc_aligned_init_once; + } + ZSTD_cwksp_assert_internal_consistency(ws); +} + +MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { + return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); +} + +MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) { + return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) + + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart); +} + +/** + * The provided workspace takes ownership of the buffer [start, start+size). + * Any existing values in the workspace are ignored (the previously managed + * buffer, if present, must be separately freed). + */ +MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) { + DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); + assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ + ws->workspace = start; + ws->workspaceEnd = (BYTE*)start + size; + ws->objectEnd = ws->workspace; + ws->tableValidEnd = ws->objectEnd; + ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); + ws->phase = ZSTD_cwksp_alloc_objects; + ws->isStatic = isStatic; + ZSTD_cwksp_clear(ws); + ws->workspaceOversizedDuration = 0; + ZSTD_cwksp_assert_internal_consistency(ws); +} + +MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) { + void* workspace = ZSTD_customMalloc(size, customMem); + DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); + RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); + ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc); + return 0; +} + +MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { + void *ptr = ws->workspace; + DEBUGLOG(4, "cwksp: freeing workspace"); +#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) + if (ptr != NULL && customMem.customFree != NULL) { + __msan_unpoison(ptr, ZSTD_cwksp_sizeof(ws)); + } +#endif + ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp)); + ZSTD_customFree(ptr, customMem); +} + +/** + * Moves the management of a workspace from one cwksp to another. The src cwksp + * is left in an invalid state (src must be re-init()'ed before it's used again). + */ +MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { + *dst = *src; + ZSTD_memset(src, 0, sizeof(ZSTD_cwksp)); +} + +MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { + return ws->allocFailed; +} + +/*-************************************* +* Functions Checking Free Space +***************************************/ + +/* ZSTD_alignmentSpaceWithinBounds() : + * Returns if the estimated space needed for a wksp is within an acceptable limit of the + * actual amount of space used. + */ +MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) { + /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice + * the alignment bytes difference between estimation and actual usage */ + return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) && + ZSTD_cwksp_used(ws) <= estimatedSpace; +} + + +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) { + return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); +} + +MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) { + return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace; +} + +MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) { + return ZSTD_cwksp_check_available( + ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR); +} + +MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) { + return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) + && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; +} + +MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( + ZSTD_cwksp* ws, size_t additionalNeededSpace) { + if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) { + ws->workspaceOversizedDuration++; + } else { + ws->workspaceOversizedDuration = 0; + } +} + +#endif /* ZSTD_CWKSP_H */ diff --git a/lib/compress/zstd_double_fast.c b/lib/compress/zstd_double_fast.c index f156da9dfaf..1a266e7d955 100644 --- a/lib/compress/zstd_double_fast.c +++ b/lib/compress/zstd_double_fast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,8 +11,49 @@ #include "zstd_compress_internal.h" #include "zstd_double_fast.h" +#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR -void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashLarge = ms->hashTable; + U32 const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; + U32 const mls = cParams->minMatch; + U32* const hashSmall = ms->chainTable; + U32 const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS; + const BYTE* const base = ms->window.base; + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; + + /* Always insert every fastHashFillStep position into the hash tables. + * Insert the other positions into the large hash table if their entry + * is empty. + */ + for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { + U32 const curr = (U32)(ip - base); + U32 i; + for (i = 0; i < fastHashFillStep; ++i) { + size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls); + size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8); + if (i == 0) { + ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i); + } + if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { + ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i); + } + /* Only load extra positions for ZSTD_dtlm_full */ + if (dtlm == ZSTD_dtlm_fast) + break; + } } +} + +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -31,27 +72,263 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, * is empty. */ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { - U32 const current = (U32)(ip - base); + U32 const curr = (U32)(ip - base); U32 i; for (i = 0; i < fastHashFillStep; ++i) { size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); if (i == 0) - hashSmall[smHash] = current + i; + hashSmall[smHash] = curr + i; if (i == 0 || hashLarge[lgHash] == 0) - hashLarge[lgHash] = current + i; + hashLarge[lgHash] = curr + i; /* Only load extra positions for ZSTD_dtlm_full */ if (dtlm == ZSTD_dtlm_fast) break; - } } + } } +} + +void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) +{ + if (tfp == ZSTD_tfp_forCDict) { + ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm); + } else { + ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm); + } } FORCE_INLINE_TEMPLATE -size_t ZSTD_compressBlock_doubleFast_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_doubleFast_noDict_generic( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, U32 const mls /* template */) +{ + ZSTD_compressionParameters const* cParams = &ms->cParams; + U32* const hashLong = ms->hashTable; + const U32 hBitsL = cParams->hashLog; + U32* const hashSmall = ms->chainTable; + const U32 hBitsS = cParams->chainLog; + const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* anchor = istart; + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); + /* presumes that, if there is a dictionary, it must be using Attach mode */ + const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + const BYTE* const prefixLowest = base + prefixLowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved1 = 0, offsetSaved2 = 0; + + size_t mLength; + U32 offset; + U32 curr; + + /* how many positions to search before increasing step size */ + const size_t kStepIncr = 1 << kSearchStrength; + /* the position at which to increment the step size if no match is found */ + const BYTE* nextStep; + size_t step; /* the current step size */ + + size_t hl0; /* the long hash at ip */ + size_t hl1; /* the long hash at ip1 */ + + U32 idxl0; /* the long match index for ip */ + U32 idxl1; /* the long match index for ip1 */ + + const BYTE* matchl0; /* the long match for ip */ + const BYTE* matchs0; /* the short match for ip */ + const BYTE* matchl1; /* the long match for ip1 */ + const BYTE* matchs0_safe; /* matchs0 or safe address */ + + const BYTE* ip = istart; /* the current position */ + const BYTE* ip1; /* the next position */ + /* Array of ~random data, should have low probability of matching data + * we load from here instead of from tables, if matchl0/matchl1 are + * invalid indices. Used to avoid unpredictable branches. */ + const BYTE dummy[] = {0x12,0x34,0x56,0x78,0x9a,0xbc,0xde,0xf0,0xe2,0xb4}; + + DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic"); + + /* init */ + ip += ((ip - prefixLowest) == 0); + { + U32 const current = (U32)(ip - base); + U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); + U32 const maxRep = current - windowLow; + if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0; + } + + /* Outer Loop: one iteration per match found and stored */ + while (1) { + step = 1; + nextStep = ip + kStepIncr; + ip1 = ip + step; + + if (ip1 > ilimit) { + goto _cleanup; + } + + hl0 = ZSTD_hashPtr(ip, hBitsL, 8); + idxl0 = hashLong[hl0]; + matchl0 = base + idxl0; + + /* Inner Loop: one iteration per search / position */ + do { + const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls); + const U32 idxs0 = hashSmall[hs0]; + curr = (U32)(ip-base); + matchs0 = base + idxs0; + + hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */ + + /* check noDict repcode */ + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); + goto _match_stored; + } + + hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); + + /* idxl0 > prefixLowestIndex is a (somewhat) unpredictable branch. + * However expression below complies into conditional move. Since + * match is unlikely and we only *branch* on idxl0 > prefixLowestIndex + * if there is a match, all branches become predictable. */ + { const BYTE* const matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]); + + /* check prefix long match */ + if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) { + mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8; + offset = (U32)(ip-matchl0); + while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */ + goto _match_found; + } } + + idxl1 = hashLong[hl1]; + matchl1 = base + idxl1; + + /* Same optimization as matchl0 above */ + matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]); + + /* check prefix short match */ + if(MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) { + goto _search_next_long; + } + + if (ip1 >= nextStep) { + PREFETCH_L1(ip1 + 64); + PREFETCH_L1(ip1 + 128); + step++; + nextStep += kStepIncr; + } + ip = ip1; + ip1 += step; + + hl0 = hl1; + idxl0 = idxl1; + matchl0 = matchl1; + #if defined(__aarch64__) + PREFETCH_L1(ip+256); + #endif + } while (ip1 <= ilimit); + +_cleanup: + /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), + * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ + offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; + + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved1; + rep[1] = offset_2 ? offset_2 : offsetSaved2; + + /* Return the last literals size */ + return (size_t)(iend - anchor); + +_search_next_long: + + /* short match found: let's check for a longer one */ + mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4; + offset = (U32)(ip - matchs0); + + /* check long match at +1 position */ + if ((idxl1 > prefixLowestIndex) && (MEM_read64(matchl1) == MEM_read64(ip1))) { + size_t const l1len = ZSTD_count(ip1+8, matchl1+8, iend) + 8; + if (l1len > mLength) { + /* use the long match instead */ + ip = ip1; + mLength = l1len; + offset = (U32)(ip-matchl1); + matchs0 = matchl1; + } + } + + while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* complete backward */ + + /* fall-through */ + +_match_found: /* requires ip, offset, mLength */ + offset_2 = offset_1; + offset_1 = offset; + + if (step < 4) { + /* It is unsafe to write this value back to the hashtable when ip1 is + * greater than or equal to the new ip we will have after we're done + * processing this match. Rather than perform that test directly + * (ip1 >= ip + mLength), which costs speed in practice, we do a simpler + * more predictable test. The minmatch even if we take a short match is + * 4 bytes, so as long as step, the distance between ip and ip1 + * (initially) is less than 4, we know ip1 < new ip. */ + hashLong[hl1] = (U32)(ip1 - base); + } + + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); + +_match_stored: + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Complementary insertion */ + /* done after iLimit test, as candidates could be > iend-8 */ + { U32 const indexToInsert = curr+2; + hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); + } + + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } + } + } +} + + +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, - U32 const mls /* template */, ZSTD_dictMode_e const dictMode) + U32 const mls /* template */) { ZSTD_compressionParameters const* cParams = &ms->cParams; U32* const hashLong = ms->hashTable; @@ -63,62 +340,45 @@ size_t ZSTD_compressBlock_doubleFast_generic( const BYTE* ip = istart; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); - const U32 lowestValid = ms->window.dictLimit; - const U32 maxDistance = 1U << cParams->windowLog; - const U32 prefixLowestIndex = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid; + /* presumes that, if there is a dictionary, it must be using Attach mode */ + const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); const BYTE* const prefixLowest = base + prefixLowestIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; - - const ZSTD_matchState_t* const dms = ms->dictMatchState; - const ZSTD_compressionParameters* const dictCParams = - dictMode == ZSTD_dictMatchState ? - &dms->cParams : NULL; - const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ? - dms->hashTable : NULL; - const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ? - dms->chainTable : NULL; - const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ? - dms->window.dictLimit : 0; - const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ? - dms->window.base : NULL; - const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ? - dictBase + dictStartIndex : NULL; - const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ? - dms->window.nextSrc : NULL; - const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ? - prefixLowestIndex - (U32)(dictEnd - dictBase) : - 0; - const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ? - dictCParams->hashLog : hBitsL; - const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ? - dictCParams->chainLog : hBitsS; - const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictStart); - - DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic"); - - assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState); + + const ZSTD_MatchState_t* const dms = ms->dictMatchState; + const ZSTD_compressionParameters* const dictCParams = &dms->cParams; + const U32* const dictHashLong = dms->hashTable; + const U32* const dictHashSmall = dms->chainTable; + const U32 dictStartIndex = dms->window.dictLimit; + const BYTE* const dictBase = dms->window.base; + const BYTE* const dictStart = dictBase + dictStartIndex; + const BYTE* const dictEnd = dms->window.nextSrc; + const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase); + const U32 dictHBitsL = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; + const U32 dictHBitsS = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS; + const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); + + DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic"); /* if a dictionary is attached, it must be within window range */ - if (dictMode == ZSTD_dictMatchState) { - assert(lowestValid + maxDistance >= endIndex); + assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); + + if (ms->prefetchCDictTables) { + size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); + size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32); + PREFETCH_AREA(dictHashLong, hashTableBytes); + PREFETCH_AREA(dictHashSmall, chainTableBytes); } /* init */ ip += (dictAndPrefixLength == 0); - if (dictMode == ZSTD_noDict) { - U32 const maxRep = (U32)(ip - prefixLowest); - if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; - } - if (dictMode == ZSTD_dictMatchState) { - /* dictMatchState repCode checks don't currently handle repCode == 0 - * disabling. */ - assert(offset_1 <= dictAndPrefixLength); - assert(offset_2 <= dictAndPrefixLength); - } + + /* dictMatchState repCode checks don't currently handle repCode == 0 + * disabling. */ + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); /* Main Search Loop */ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ @@ -126,69 +386,60 @@ size_t ZSTD_compressBlock_doubleFast_generic( U32 offset; size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); - size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8); - size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls); - U32 const current = (U32)(ip-base); + size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8); + size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls); + U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS]; + U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS]; + int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL); + int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS); + U32 const curr = (U32)(ip-base); U32 const matchIndexL = hashLong[h2]; U32 matchIndexS = hashSmall[h]; const BYTE* matchLong = base + matchIndexL; const BYTE* match = base + matchIndexS; - const U32 repIndex = current + 1 - offset_1; - const BYTE* repMatch = (dictMode == ZSTD_dictMatchState - && repIndex < prefixLowestIndex) ? + const U32 repIndex = curr + 1 - offset_1; + const BYTE* repMatch = (repIndex < prefixLowestIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; - hashLong[h2] = hashSmall[h] = current; /* update hash tables */ + hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ - /* check dictMatchState repcode */ - if (dictMode == ZSTD_dictMatchState - && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + /* check repcode */ + if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); goto _match_stored; } - /* check noDict repcode */ - if ( dictMode == ZSTD_noDict - && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { - mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH); - goto _match_stored; - } - - if (matchIndexL > prefixLowestIndex) { + if ((matchIndexL >= prefixLowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { /* check prefix long match */ - if (MEM_read64(matchLong) == MEM_read64(ip)) { - mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; - offset = (U32)(ip-matchLong); - while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ - goto _match_found; - } - } else if (dictMode == ZSTD_dictMatchState) { + mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; + offset = (U32)(ip-matchLong); + while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + goto _match_found; + } else if (dictTagsMatchL) { /* check dictMatchState long match */ - U32 const dictMatchIndexL = dictHashLong[dictHL]; + U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS; const BYTE* dictMatchL = dictBase + dictMatchIndexL; assert(dictMatchL < dictEnd); if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) { mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8; - offset = (U32)(current - dictMatchIndexL - dictIndexDelta); + offset = (U32)(curr - dictMatchIndexL - dictIndexDelta); while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */ goto _match_found; } } if (matchIndexS > prefixLowestIndex) { - /* check prefix short match */ + /* short match candidate */ if (MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } - } else if (dictMode == ZSTD_dictMatchState) { + } else if (dictTagsMatchS) { /* check dictMatchState short match */ - U32 const dictMatchIndexS = dictHashSmall[dictHS]; + U32 const dictMatchIndexS = dictMatchIndexAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS; match = dictBase + dictMatchIndexS; matchIndexS = dictMatchIndexS + dictIndexDelta; @@ -197,42 +448,44 @@ size_t ZSTD_compressBlock_doubleFast_generic( } } ip += ((ip-anchor) >> kSearchStrength) + 1; +#if defined(__aarch64__) + PREFETCH_L1(ip+256); +#endif continue; _search_next_long: - { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); - size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8); + size_t const dictHashAndTagL3 = ZSTD_hashPtr(ip+1, dictHBitsL, 8); U32 const matchIndexL3 = hashLong[hl3]; + U32 const dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS]; + int const dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3); const BYTE* matchL3 = base + matchIndexL3; - hashLong[hl3] = current + 1; + hashLong[hl3] = curr + 1; /* check prefix long +1 match */ - if (matchIndexL3 > prefixLowestIndex) { - if (MEM_read64(matchL3) == MEM_read64(ip+1)) { - mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; - ip++; - offset = (U32)(ip-matchL3); - while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ - goto _match_found; - } - } else if (dictMode == ZSTD_dictMatchState) { + if ((matchIndexL3 >= prefixLowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1))) { + mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; + ip++; + offset = (U32)(ip-matchL3); + while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ + goto _match_found; + } else if (dictTagsMatchL3) { /* check dict long +1 match */ - U32 const dictMatchIndexL3 = dictHashLong[dictHLNext]; + U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS; const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; assert(dictMatchL3 < dictEnd); if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) { mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8; ip++; - offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta); + offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta); while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */ goto _match_found; } } } /* if no long +1 match, explore the short match we found */ - if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) { + if (matchIndexS < prefixLowestIndex) { mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4; - offset = (U32)(current - matchIndexS); + offset = (U32)(curr - matchIndexS); while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } else { mLength = ZSTD_count(ip+4, match+4, iend) + 4; @@ -240,13 +493,11 @@ size_t ZSTD_compressBlock_doubleFast_generic( while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } - /* fall-through */ - _match_found: offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); _match_stored: /* match found */ @@ -254,63 +505,68 @@ size_t ZSTD_compressBlock_doubleFast_generic( anchor = ip; if (ip <= ilimit) { - /* Fill Table */ - hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = - hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */ - hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = - hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + /* Complementary insertion */ + /* done after iLimit test, as candidates could be > iend-8 */ + { U32 const indexToInsert = curr+2; + hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); + } /* check immediate repcode */ - if (dictMode == ZSTD_dictMatchState) { - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState - && repIndex2 < prefixLowestIndex ? - dictBase - dictIndexDelta + repIndex2 : - base + repIndex2; - if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; - U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH); - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } } - - if (dictMode == ZSTD_noDict) { - while ( (ip <= ilimit) - && ( (offset_2>0) - & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); - ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH); - ip += rLength; + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ? + dictBase + repIndex2 - dictIndexDelta : + base + repIndex2; + if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2)) + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; anchor = ip; - continue; /* faster when present ... (?) */ - } } } + continue; + } + break; + } + } } /* while (ip < ilimit) */ /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + rep[0] = offset_1; + rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); } +#define ZSTD_GEN_DFAST_FN(dictMode, mls) \ + static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \ + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ + void const* src, size_t srcSize) \ + { \ + return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \ + } + +ZSTD_GEN_DFAST_FN(noDict, 4) +ZSTD_GEN_DFAST_FN(noDict, 5) +ZSTD_GEN_DFAST_FN(noDict, 6) +ZSTD_GEN_DFAST_FN(noDict, 7) + +ZSTD_GEN_DFAST_FN(dictMatchState, 4) +ZSTD_GEN_DFAST_FN(dictMatchState, 5) +ZSTD_GEN_DFAST_FN(dictMatchState, 6) +ZSTD_GEN_DFAST_FN(dictMatchState, 7) + size_t ZSTD_compressBlock_doubleFast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const U32 mls = ms->cParams.minMatch; @@ -318,19 +574,19 @@ size_t ZSTD_compressBlock_doubleFast( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize); } } size_t ZSTD_compressBlock_doubleFast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const U32 mls = ms->cParams.minMatch; @@ -338,19 +594,21 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize); } } -static size_t ZSTD_compressBlock_doubleFast_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_doubleFast_extDict_generic( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls /* template */) { @@ -366,9 +624,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( const BYTE* const ilimit = iend - 8; const BYTE* const base = ms->window.base; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); - const U32 maxDistance = 1U << cParams->windowLog; - const U32 lowestValid = ms->window.lowLimit; - const U32 lowLimit = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid; + const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); const U32 dictStartIndex = lowLimit; const U32 dictLimit = ms->window.dictLimit; const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit; @@ -382,7 +638,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( /* if extDict is invalidated due to maxDistance, switch to "regular" variant */ if (prefixStartIndex == dictStartIndex) - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize); /* Search Loop */ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ @@ -396,31 +652,31 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base; const BYTE* matchLong = matchLongBase + matchLongIndex; - const U32 current = (U32)(ip-base); - const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ + const U32 curr = (U32)(ip-base); + const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; size_t mLength; - hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */ + hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ - if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */ - & (repIndex > dictStartIndex)) + if (((ZSTD_index_overlap_check(prefixStartIndex, repIndex)) + & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); } else { if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart; U32 offset; mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8; - offset = current - matchLongIndex; + offset = curr - matchLongIndex; while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); @@ -428,52 +684,56 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base; const BYTE* match3 = match3Base + matchIndex3; U32 offset; - hashLong[h3] = current + 1; + hashLong[h3] = curr + 1; if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart; mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8; ip++; - offset = current+1 - matchIndex3; + offset = curr+1 - matchIndex3; while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ } else { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; - offset = current - matchIndex; + offset = curr - matchIndex; while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); } else { ip += ((ip-anchor) >> kSearchStrength) + 1; continue; } } - /* found a match : store it */ + /* move to next sequence start */ ip += mLength; anchor = ip; if (ip <= ilimit) { - /* Fill Table */ - hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; - hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2; - hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); - hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + /* Complementary insertion */ + /* done after iLimit test, as candidates could be > iend-8 */ + { U32 const indexToInsert = curr+2; + hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); + } + /* check immediate repcode */ while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; - if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */ - & (repIndex2 > dictStartIndex)) + if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) + & (offset_2 <= current2 - dictStartIndex)) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH); + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; @@ -491,9 +751,13 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( return (size_t)(iend - anchor); } +ZSTD_GEN_DFAST_FN(extDict, 4) +ZSTD_GEN_DFAST_FN(extDict, 5) +ZSTD_GEN_DFAST_FN(extDict, 6) +ZSTD_GEN_DFAST_FN(extDict, 7) size_t ZSTD_compressBlock_doubleFast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; @@ -501,12 +765,14 @@ size_t ZSTD_compressBlock_doubleFast_extDict( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize); } } + +#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */ diff --git a/lib/compress/zstd_double_fast.h b/lib/compress/zstd_double_fast.h index 4fa31acfc0d..cd562fea8ef 100644 --- a/lib/compress/zstd_double_fast.h +++ b/lib/compress/zstd_double_fast.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,28 +11,32 @@ #ifndef ZSTD_DOUBLE_FAST_H #define ZSTD_DOUBLE_FAST_H -#if defined (__cplusplus) -extern "C" { -#endif - -#include "mem.h" /* U32 */ +#include "../common/mem.h" /* U32 */ #include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */ -void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, - void const* end, ZSTD_dictTableLoadMethod_e dtlm); +#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR + +void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp); + size_t ZSTD_compressBlock_doubleFast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_doubleFast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_doubleFast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - -#if defined (__cplusplus) -} -#endif +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST ZSTD_compressBlock_doubleFast +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE ZSTD_compressBlock_doubleFast_dictMatchState +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT ZSTD_compressBlock_doubleFast_extDict +#else +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST NULL +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT NULL +#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */ #endif /* ZSTD_DOUBLE_FAST_H */ diff --git a/lib/compress/zstd_fast.c b/lib/compress/zstd_fast.c index a05b8a47f14..ee25bcbac8d 100644 --- a/lib/compress/zstd_fast.c +++ b/lib/compress/zstd_fast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,11 +8,49 @@ * You may select, at your option, one of the above-listed licenses. */ -#include "zstd_compress_internal.h" +#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ #include "zstd_fast.h" +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_fillHashTableForCDict(ZSTD_MatchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; + U32 const mls = cParams->minMatch; + const BYTE* const base = ms->window.base; + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; + + /* Currently, we always use ZSTD_dtlm_full for filling CDict tables. + * Feel free to remove this assert if there's a good reason! */ + assert(dtlm == ZSTD_dtlm_full); + + /* Always insert every fastHashFillStep position into the hash table. + * Insert the other positions if their hash entry is empty. + */ + for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { + U32 const curr = (U32)(ip - base); + { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls); + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); } + + if (dtlm == ZSTD_dtlm_fast) continue; + /* Only load extra positions for ZSTD_dtlm_full */ + { U32 p; + for (p = 1; p < fastHashFillStep; ++p) { + size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls); + if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */ + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); + } } } } +} -void ZSTD_fillHashTable(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_fillHashTableForCCtx(ZSTD_MatchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { @@ -25,180 +63,426 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; const U32 fastHashFillStep = 3; + /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables. + * Feel free to remove this assert if there's a good reason! */ + assert(dtlm == ZSTD_dtlm_fast); + /* Always insert every fastHashFillStep position into the hash table. * Insert the other positions if their hash entry is empty. */ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { - U32 const current = (U32)(ip - base); + U32 const curr = (U32)(ip - base); size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls); - hashTable[hash0] = current; + hashTable[hash0] = curr; if (dtlm == ZSTD_dtlm_fast) continue; /* Only load extra positions for ZSTD_dtlm_full */ { U32 p; for (p = 1; p < fastHashFillStep; ++p) { size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls); if (hashTable[hash] == 0) { /* not yet filled */ - hashTable[hash] = current + p; + hashTable[hash] = curr + p; } } } } } +void ZSTD_fillHashTable(ZSTD_MatchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) +{ + if (tfp == ZSTD_tfp_forCDict) { + ZSTD_fillHashTableForCDict(ms, end, dtlm); + } else { + ZSTD_fillHashTableForCCtx(ms, end, dtlm); + } +} + +typedef int (*ZSTD_match4Found) (const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit); + +static int +ZSTD_match4Found_cmov(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit) +{ + /* Array of ~random data, should have low probability of matching data. + * Load from here if the index is invalid. + * Used to avoid unpredictable branches. */ + static const BYTE dummy[] = {0x12,0x34,0x56,0x78}; + + /* currentIdx >= lowLimit is a (somewhat) unpredictable branch. + * However expression below compiles into conditional move. + */ + const BYTE* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy); + /* Note: this used to be written as : return test1 && test2; + * Unfortunately, once inlined, these tests become branches, + * in which case it becomes critical that they are executed in the right order (test1 then test2). + * So we have to write these tests in a specific manner to ensure their ordering. + */ + if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) return 0; + /* force ordering of these tests, which matters once the function is inlined, as they become branches */ +#if defined(__GNUC__) + __asm__(""); +#endif + return matchIdx >= idxLowLimit; +} + +static int +ZSTD_match4Found_branch(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit) +{ + /* using a branch instead of a cmov, + * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true, + * aka almost all candidates are within range */ + U32 mval; + if (matchIdx >= idxLowLimit) { + mval = MEM_read32(matchAddress); + } else { + mval = MEM_read32(currentPtr) ^ 1; /* guaranteed to not match. */ + } + + return (MEM_read32(currentPtr) == mval); +} + + +/** + * If you squint hard enough (and ignore repcodes), the search operation at any + * given position is broken into 4 stages: + * + * 1. Hash (map position to hash value via input read) + * 2. Lookup (map hash val to index via hashtable read) + * 3. Load (map index to value at that position via input read) + * 4. Compare + * + * Each of these steps involves a memory read at an address which is computed + * from the previous step. This means these steps must be sequenced and their + * latencies are cumulative. + * + * Rather than do 1->2->3->4 sequentially for a single position before moving + * onto the next, this implementation interleaves these operations across the + * next few positions: + * + * R = Repcode Read & Compare + * H = Hash + * T = Table Lookup + * M = Match Read & Compare + * + * Pos | Time --> + * ----+------------------- + * N | ... M + * N+1 | ... TM + * N+2 | R H T M + * N+3 | H TM + * N+4 | R H T M + * N+5 | H ... + * N+6 | R ... + * + * This is very much analogous to the pipelining of execution in a CPU. And just + * like a CPU, we have to dump the pipeline when we find a match (i.e., take a + * branch). + * + * When this happens, we throw away our current state, and do the following prep + * to re-enter the loop: + * + * Pos | Time --> + * ----+------------------- + * N | H T + * N+1 | H + * + * This is also the work we do at the beginning to enter the loop initially. + */ FORCE_INLINE_TEMPLATE -size_t ZSTD_compressBlock_fast_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_fast_noDict_generic( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, - U32 const mls) + U32 const mls, int useCmov) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; - /* support stepSize of 0 */ - size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; + size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; /* min 2 */ const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; - /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */ - const BYTE* ip0 = istart; - const BYTE* ip1; - const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); - const U32 maxDistance = 1U << cParams->windowLog; - const U32 validStartIndex = ms->window.dictLimit; - const U32 prefixStartIndex = (endIndex - validStartIndex > maxDistance) ? endIndex - maxDistance : validStartIndex; + const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; - U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; - /* init */ + const BYTE* anchor = istart; + const BYTE* ip0 = istart; + const BYTE* ip1; + const BYTE* ip2; + const BYTE* ip3; + U32 current0; + + U32 rep_offset1 = rep[0]; + U32 rep_offset2 = rep[1]; + U32 offsetSaved1 = 0, offsetSaved2 = 0; + + size_t hash0; /* hash for ip0 */ + size_t hash1; /* hash for ip1 */ + U32 matchIdx; /* match idx for ip0 */ + + U32 offcode; + const BYTE* match0; + size_t mLength; + + /* ip0 and ip1 are always adjacent. The targetLength skipping and + * uncompressibility acceleration is applied to every other position, + * matching the behavior of #1562. step therefore represents the gap + * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */ + size_t step; + const BYTE* nextStep; + const size_t kStepIncr = (1 << (kSearchStrength - 1)); + const ZSTD_match4Found matchFound = useCmov ? ZSTD_match4Found_cmov : ZSTD_match4Found_branch; + + DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); ip0 += (ip0 == prefixStart); + { U32 const curr = (U32)(ip0 - base); + U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); + U32 const maxRep = curr - windowLow; + if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0; + if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0; + } + + /* start each op */ +_start: /* Requires: ip0 */ + + step = stepSize; + nextStep = ip0 + kStepIncr; + + /* calculate positions, ip0 - anchor == 0, so we skip step calc */ ip1 = ip0 + 1; - { - U32 const maxRep = (U32)(ip0 - prefixStart); - if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + ip2 = ip0 + step; + ip3 = ip2 + 1; + + if (ip3 >= ilimit) { + goto _cleanup; } - /* Main Search Loop */ - while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */ - size_t mLength; - BYTE const* ip2 = ip0 + 2; - size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls); - U32 const val0 = MEM_read32(ip0); - size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls); - U32 const val1 = MEM_read32(ip1); - U32 const current0 = (U32)(ip0-base); - U32 const current1 = (U32)(ip1-base); - U32 const matchIndex0 = hashTable[h0]; - U32 const matchIndex1 = hashTable[h1]; - BYTE const* repMatch = ip2-offset_1; - const BYTE* match0 = base + matchIndex0; - const BYTE* match1 = base + matchIndex1; - U32 offcode; - hashTable[h0] = current0; /* update hash table */ - hashTable[h1] = current1; /* update hash table */ - - assert(ip0 + 1 == ip1); - - if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) { - mLength = ip2[-1] == repMatch[-1] ? 1 : 0; - ip0 = ip2 - mLength; - match0 = repMatch - mLength; - offcode = 0; + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + + matchIdx = hashTable[hash0]; + + do { + /* load repcode match for ip[2]*/ + const U32 rval = MEM_read32(ip2 - rep_offset1); + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + /* check repcode at ip[2] */ + if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) { + ip0 = ip2; + match0 = ip0 - rep_offset1; + mLength = ip0[-1] == match0[-1]; + ip0 -= mLength; + match0 -= mLength; + offcode = REPCODE1_TO_OFFBASE; + mLength += 4; + + /* Write next hash table entry: it's already calculated. + * This write is known to be safe because ip1 is before the + * repcode (ip2). */ + hashTable[hash1] = (U32)(ip1 - base); + goto _match; } - if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) { - /* found a regular match */ + + if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) { + /* Write next hash table entry (it's already calculated). + * This write is known to be safe because the ip1 == ip0 + 1, + * so searching will resume after ip1 */ + hashTable[hash1] = (U32)(ip1 - base); + goto _offset; } - if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) { - /* found a regular match after one literal */ - ip0 = ip1; - match0 = match1; + + /* lookup ip[1] */ + matchIdx = hashTable[hash1]; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip3; + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) { + /* Write next hash table entry, since it's already calculated */ + if (step <= 4) { + /* Avoid writing an index if it's >= position where search will resume. + * The minimum possible match has length 4, so search can resume at ip0 + 4. + */ + hashTable[hash1] = (U32)(ip1 - base); + } goto _offset; } - { - size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize; - assert(step >= 2); - ip0 += step; - ip1 += step; - continue; + + /* lookup ip[1] */ + matchIdx = hashTable[hash1]; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip0 + step; + ip3 = ip1 + step; + + /* calculate step */ + if (ip2 >= nextStep) { + step++; + PREFETCH_L1(ip1 + 64); + PREFETCH_L1(ip1 + 128); + nextStep += kStepIncr; } -_offset: /* Requires: ip0, match0 */ - /* Compute the offset code */ - offset_2 = offset_1; - offset_1 = (U32)(ip0-match0); - offcode = offset_1 + ZSTD_REP_MOVE; - mLength = 0; - /* Count the backwards match length */ - while (((ip0>anchor) & (match0>prefixStart)) - && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */ + } while (ip3 < ilimit); + +_cleanup: + /* Note that there are probably still a couple positions one could search. + * However, it seems to be a meaningful performance hit to try to search + * them. So let's not. */ + + /* When the repcodes are outside of the prefix, we set them to zero before the loop. + * When the offsets are still zero, we need to restore them after the block to have a correct + * repcode history. If only one offset was invalid, it is easy. The tricky case is when both + * offsets were invalid. We need to figure out which offset to refill with. + * - If both offsets are zero they are in the same order. + * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`. + * - If only one is zero, we need to decide which offset to restore. + * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1. + * - It is impossible for rep_offset2 to be non-zero. + * + * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then + * set rep[0] = rep_offset1 and rep[1] = offsetSaved1. + */ + offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2; + + /* save reps for next block */ + rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1; + rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2; + + /* Return the last literals size */ + return (size_t)(iend - anchor); + +_offset: /* Requires: ip0, idx */ + + /* Compute the offset code. */ + match0 = base + matchIdx; + rep_offset2 = rep_offset1; + rep_offset1 = (U32)(ip0-match0); + offcode = OFFSET_TO_OFFBASE(rep_offset1); + mLength = 4; + + /* Count the backwards match length. */ + while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) { + ip0--; + match0--; + mLength++; + } _match: /* Requires: ip0, match0, offcode */ - /* Count the forward length */ - mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4; - ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH); - /* match found */ - ip0 += mLength; - anchor = ip0; - ip1 = ip0 + 1; - if (ip0 <= ilimit) { - /* Fill Table */ - assert(base+current0+2 > istart); /* check base overflow */ - hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); + /* Count the forward length. */ + mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); + + ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); - while ( (ip0 <= ilimit) - && ( (offset_2>0) - & (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) { + ip0 += mLength; + anchor = ip0; + + /* Fill table and check for immediate repcode. */ + if (ip0 <= ilimit) { + /* Fill Table */ + assert(base+current0+2 > istart); /* check base overflow */ + hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); + + if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */ + while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) { /* store sequence */ - size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4; - U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ + size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4; + { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); ip0 += rLength; - ip1 = ip0 + 1; - ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH); + ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength); anchor = ip0; continue; /* faster when present (confirmed on gcc-8) ... (?) */ - } - } - } - - /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + } } } - /* Return the last literals size */ - return (size_t)(iend - anchor); + goto _start; } +#define ZSTD_GEN_FAST_FN(dictMode, mml, cmov) \ + static size_t ZSTD_compressBlock_fast_##dictMode##_##mml##_##cmov( \ + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ + void const* src, size_t srcSize) \ + { \ + return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mml, cmov); \ + } + +ZSTD_GEN_FAST_FN(noDict, 4, 1) +ZSTD_GEN_FAST_FN(noDict, 5, 1) +ZSTD_GEN_FAST_FN(noDict, 6, 1) +ZSTD_GEN_FAST_FN(noDict, 7, 1) + +ZSTD_GEN_FAST_FN(noDict, 4, 0) +ZSTD_GEN_FAST_FN(noDict, 5, 0) +ZSTD_GEN_FAST_FN(noDict, 6, 0) +ZSTD_GEN_FAST_FN(noDict, 7, 0) size_t ZSTD_compressBlock_fast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - ZSTD_compressionParameters const* cParams = &ms->cParams; - U32 const mls = cParams->minMatch; + U32 const mml = ms->cParams.minMatch; + /* use cmov when "candidate in range" branch is likely unpredictable */ + int const useCmov = ms->cParams.windowLog < 19; assert(ms->dictMatchState == NULL); - switch(mls) - { - default: /* includes case 3 */ - case 4 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4); - case 5 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5); - case 6 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6); - case 7 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7); + if (useCmov) { + switch(mml) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize); + case 5 : + return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize); + case 6 : + return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize); + case 7 : + return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize); + } + } else { + /* use a branch instead */ + switch(mml) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize); + case 5 : + return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize); + case 6 : + return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize); + case 7 : + return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize); + } } } FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_fast_dictMatchState_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize, U32 const mls) + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; @@ -207,16 +491,16 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( U32 const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; + const BYTE* ip0 = istart; + const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */ const BYTE* anchor = istart; const U32 prefixStartIndex = ms->window.dictLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; - const ZSTD_matchState_t* const dms = ms->dictMatchState; + const ZSTD_MatchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; const U32* const dictHashTable = dms->hashTable; const U32 dictStartIndex = dms->window.dictLimit; @@ -224,164 +508,219 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dms->window.nextSrc; const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); - const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart); - const U32 dictHLog = dictCParams->hashLog; + const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart); + const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; /* if a dictionary is still attached, it necessarily means that * it is within window size. So we just check it. */ const U32 maxDistance = 1U << cParams->windowLog; - const U32 endIndex = (U32)((size_t)(ip - base) + srcSize); + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); assert(endIndex - prefixStartIndex <= maxDistance); (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ - /* ensure there will be no no underflow + (void)hasStep; /* not currently specialized on whether it's accelerated */ + + /* ensure there will be no underflow * when translating a dict index into a local index */ assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); + if (ms->prefetchCDictTables) { + size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); + PREFETCH_AREA(dictHashTable, hashTableBytes); + } + /* init */ - ip += (dictAndPrefixLength == 0); + DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); + ip0 += (dictAndPrefixLength == 0); /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); - /* Main Search Loop */ - while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + /* Outer search loop */ + assert(stepSize >= 1); + while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */ size_t mLength; - size_t const h = ZSTD_hashPtr(ip, hlog, mls); - U32 const current = (U32)(ip-base); - U32 const matchIndex = hashTable[h]; - const BYTE* match = base + matchIndex; - const U32 repIndex = current + 1 - offset_1; - const BYTE* repMatch = (repIndex < prefixStartIndex) ? - dictBase + (repIndex - dictIndexDelta) : - base + repIndex; - hashTable[h] = current; /* update hash table */ - - if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH); - } else if ( (matchIndex <= prefixStartIndex) ) { - size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls); - U32 const dictMatchIndex = dictHashTable[dictHash]; - const BYTE* dictMatch = dictBase + dictMatchIndex; - if (dictMatchIndex <= dictStartIndex || - MEM_read32(dictMatch) != MEM_read32(ip)) { - assert(stepSize >= 1); - ip += ((ip-anchor) >> kSearchStrength) + stepSize; - continue; - } else { - /* found a dict match */ - U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta); - mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4; - while (((ip>anchor) & (dictMatch>dictStart)) - && (ip[-1] == dictMatch[-1])) { - ip--; dictMatch--; mLength++; + size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls); + + size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls); + U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS]; + int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0); + + U32 matchIndex = hashTable[hash0]; + U32 curr = (U32)(ip0 - base); + size_t step = stepSize; + const size_t kStepIncr = 1 << kSearchStrength; + const BYTE* nextStep = ip0 + kStepIncr; + + /* Inner search loop */ + while (1) { + const BYTE* match = base + matchIndex; + const U32 repIndex = curr + 1 - offset_1; + const BYTE* repMatch = (repIndex < prefixStartIndex) ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; + const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls); + size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); + hashTable[hash0] = curr; /* update hash table */ + + if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) { + const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; + ip0++; + ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); + break; + } + + if (dictTagsMatch) { + /* Found a possible dict match */ + const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS; + const BYTE* dictMatch = dictBase + dictMatchIndex; + if (dictMatchIndex > dictStartIndex && + MEM_read32(dictMatch) == MEM_read32(ip0)) { + /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */ + if (matchIndex <= prefixStartIndex) { + U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta); + mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4; + while (((ip0 > anchor) & (dictMatch > dictStart)) + && (ip0[-1] == dictMatch[-1])) { + ip0--; + dictMatch--; + mLength++; + } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); + break; + } + } + } + + if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex)) { + /* found a regular match of size >= 4 */ + U32 const offset = (U32) (ip0 - match); + mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4; + while (((ip0 > anchor) & (match > prefixStart)) + && (ip0[-1] == match[-1])) { + ip0--; + match--; + mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); + break; } - } else if (MEM_read32(match) != MEM_read32(ip)) { - /* it's not a match, and we're not going to check the dictionary */ - assert(stepSize >= 1); - ip += ((ip-anchor) >> kSearchStrength) + stepSize; - continue; - } else { - /* found a regular match */ - U32 const offset = (U32)(ip-match); - mLength = ZSTD_count(ip+4, match+4, iend) + 4; - while (((ip>anchor) & (match>prefixStart)) - && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); - } + + /* Prepare for next iteration */ + dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS]; + dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1); + matchIndex = hashTable[hash1]; + + if (ip1 >= nextStep) { + step++; + nextStep += kStepIncr; + } + ip0 = ip1; + ip1 = ip1 + step; + if (ip1 > ilimit) goto _cleanup; + + curr = (U32)(ip0 - base); + hash0 = hash1; + } /* end inner search loop */ /* match found */ - ip += mLength; - anchor = ip; + assert(mLength); + ip0 += mLength; + anchor = ip0; - if (ip <= ilimit) { + if (ip0 <= ilimit) { /* Fill Table */ - assert(base+current+2 > istart); /* check base overflow */ - hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); + assert(base+curr+2 > istart); /* check base overflow */ + hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); /* check immediate repcode */ - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); + while (ip0 <= ilimit) { + U32 const current2 = (U32)(ip0-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase - dictIndexDelta + repIndex2 : base + repIndex2; - if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + if ( (ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) + && (MEM_read32(repMatch2) == MEM_read32(ip0))) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; + size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH); - hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; - ip += repLength2; - anchor = ip; + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2; + ip0 += repLength2; + anchor = ip0; continue; } break; } } + + /* Prepare for next iteration */ + assert(ip0 == anchor); + ip1 = ip0 + stepSize; } +_cleanup: /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + rep[0] = offset_1; + rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); } + +ZSTD_GEN_FAST_FN(dictMatchState, 4, 0) +ZSTD_GEN_FAST_FN(dictMatchState, 5, 0) +ZSTD_GEN_FAST_FN(dictMatchState, 6, 0) +ZSTD_GEN_FAST_FN(dictMatchState, 7, 0) + size_t ZSTD_compressBlock_fast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - ZSTD_compressionParameters const* cParams = &ms->cParams; - U32 const mls = cParams->minMatch; + U32 const mls = ms->cParams.minMatch; assert(ms->dictMatchState != NULL); switch(mls) { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize); } } -static size_t ZSTD_compressBlock_fast_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize, U32 const mls) +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_fast_extDict_generic( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ - U32 const stepSize = cParams->targetLength + !(cParams->targetLength); + size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); - const U32 maxDistance = 1U << cParams->windowLog; - const U32 validLow = ms->window.lowLimit; - const U32 lowLimit = (endIndex - validLow > maxDistance) ? endIndex - maxDistance : validLow; + const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); const U32 dictStartIndex = lowLimit; const BYTE* const dictStart = dictBase + dictStartIndex; const U32 dictLimit = ms->window.dictLimit; @@ -391,101 +730,256 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved1 = 0, offsetSaved2 = 0; + + const BYTE* ip0 = istart; + const BYTE* ip1; + const BYTE* ip2; + const BYTE* ip3; + U32 current0; + + + size_t hash0; /* hash for ip0 */ + size_t hash1; /* hash for ip1 */ + U32 idx; /* match idx for ip0 */ + const BYTE* idxBase; /* base pointer for idx */ + + U32 offcode; + const BYTE* match0; + size_t mLength; + const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */ + + size_t step; + const BYTE* nextStep; + const size_t kStepIncr = (1 << (kSearchStrength - 1)); + + (void)hasStep; /* not currently specialized on whether it's accelerated */ + + DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1); /* switch to "regular" variant if extDict is invalidated due to maxDistance */ if (prefixStartIndex == dictStartIndex) - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls); - - /* Search Loop */ - while (ip < ilimit) { /* < instead of <=, because (ip+1) */ - const size_t h = ZSTD_hashPtr(ip, hlog, mls); - const U32 matchIndex = hashTable[h]; - const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; - const BYTE* match = matchBase + matchIndex; - const U32 current = (U32)(ip-base); - const U32 repIndex = current + 1 - offset_1; - const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; - const BYTE* const repMatch = repBase + repIndex; - size_t mLength; - hashTable[h] = current; /* update hash table */ - assert(offset_1 <= current +1); /* check repIndex */ - - if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex)) - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH); - } else { - if ( (matchIndex < dictStartIndex) || - (MEM_read32(match) != MEM_read32(ip)) ) { - assert(stepSize >= 1); - ip += ((ip-anchor) >> kSearchStrength) + stepSize; - continue; + return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); + + { U32 const curr = (U32)(ip0 - base); + U32 const maxRep = curr - dictStartIndex; + if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0; + if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0; + } + + /* start each op */ +_start: /* Requires: ip0 */ + + step = stepSize; + nextStep = ip0 + kStepIncr; + + /* calculate positions, ip0 - anchor == 0, so we skip step calc */ + ip1 = ip0 + 1; + ip2 = ip0 + step; + ip3 = ip2 + 1; + + if (ip3 >= ilimit) { + goto _cleanup; + } + + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + + idx = hashTable[hash0]; + idxBase = idx < prefixStartIndex ? dictBase : base; + + do { + { /* load repcode match for ip[2] */ + U32 const current2 = (U32)(ip2 - base); + U32 const repIndex = current2 - offset_1; + const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; + U32 rval; + if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */ + & (offset_1 > 0) ) { + rval = MEM_read32(repBase + repIndex); + } else { + rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */ } - { const BYTE* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; - const BYTE* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; - U32 offset; - mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; - while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - offset = current - matchIndex; - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + /* check repcode at ip[2] */ + if (MEM_read32(ip2) == rval) { + ip0 = ip2; + match0 = repBase + repIndex; + matchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + assert((match0 != prefixStart) & (match0 != dictStart)); + mLength = ip0[-1] == match0[-1]; + ip0 -= mLength; + match0 -= mLength; + offcode = REPCODE1_TO_OFFBASE; + mLength += 4; + goto _match; } } - /* found a match : store it */ - ip += mLength; - anchor = ip; + { /* load match for ip[0] */ + U32 const mval = idx >= dictStartIndex ? + MEM_read32(idxBase + idx) : + MEM_read32(ip0) ^ 1; /* guaranteed not to match */ - if (ip <= ilimit) { - /* Fill Table */ - hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; - hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); - /* check immediate repcode */ - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; - if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */ - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; - U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH); - hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } } } + /* check match at ip[0] */ + if (MEM_read32(ip0) == mval) { + /* found a match! */ + goto _offset; + } } + + /* lookup ip[1] */ + idx = hashTable[hash1]; + idxBase = idx < prefixStartIndex ? dictBase : base; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip3; + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + { /* load match for ip[0] */ + U32 const mval = idx >= dictStartIndex ? + MEM_read32(idxBase + idx) : + MEM_read32(ip0) ^ 1; /* guaranteed not to match */ + + /* check match at ip[0] */ + if (MEM_read32(ip0) == mval) { + /* found a match! */ + goto _offset; + } } + + /* lookup ip[1] */ + idx = hashTable[hash1]; + idxBase = idx < prefixStartIndex ? dictBase : base; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip0 + step; + ip3 = ip1 + step; + + /* calculate step */ + if (ip2 >= nextStep) { + step++; + PREFETCH_L1(ip1 + 64); + PREFETCH_L1(ip1 + 128); + nextStep += kStepIncr; + } + } while (ip3 < ilimit); + +_cleanup: + /* Note that there are probably still a couple positions we could search. + * However, it seems to be a meaningful performance hit to try to search + * them. So let's not. */ + + /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), + * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ + offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; /* save reps for next block */ - rep[0] = offset_1; - rep[1] = offset_2; + rep[0] = offset_1 ? offset_1 : offsetSaved1; + rep[1] = offset_2 ? offset_2 : offsetSaved2; /* Return the last literals size */ return (size_t)(iend - anchor); + +_offset: /* Requires: ip0, idx, idxBase */ + + /* Compute the offset code. */ + { U32 const offset = current0 - idx; + const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart; + matchEnd = idx < prefixStartIndex ? dictEnd : iend; + match0 = idxBase + idx; + offset_2 = offset_1; + offset_1 = offset; + offcode = OFFSET_TO_OFFBASE(offset); + mLength = 4; + + /* Count the backwards match length. */ + while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) { + ip0--; + match0--; + mLength++; + } } + +_match: /* Requires: ip0, match0, offcode, matchEnd */ + + /* Count the forward length. */ + assert(matchEnd != 0); + mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart); + + ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); + + ip0 += mLength; + anchor = ip0; + + /* write next hash table entry */ + if (ip1 < ip0) { + hashTable[hash1] = (U32)(ip1 - base); + } + + /* Fill table and check for immediate repcode. */ + if (ip0 <= ilimit) { + /* Fill Table */ + assert(base+current0+2 > istart); /* check base overflow */ + hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); + + while (ip0 <= ilimit) { + U32 const repIndex2 = (U32)(ip0-base) - offset_2; + const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; + if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) & (offset_2 > 0)) + && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; + { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); + ip0 += repLength2; + anchor = ip0; + continue; + } + break; + } } + + goto _start; } +ZSTD_GEN_FAST_FN(extDict, 4, 0) +ZSTD_GEN_FAST_FN(extDict, 5, 0) +ZSTD_GEN_FAST_FN(extDict, 6, 0) +ZSTD_GEN_FAST_FN(extDict, 7, 0) size_t ZSTD_compressBlock_fast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - ZSTD_compressionParameters const* cParams = &ms->cParams; - U32 const mls = cParams->minMatch; + U32 const mls = ms->cParams.minMatch; + assert(ms->dictMatchState == NULL); switch(mls) { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize); } } diff --git a/lib/compress/zstd_fast.h b/lib/compress/zstd_fast.h index b74a88c57c8..216821ac33b 100644 --- a/lib/compress/zstd_fast.h +++ b/lib/compress/zstd_fast.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,27 +11,20 @@ #ifndef ZSTD_FAST_H #define ZSTD_FAST_H -#if defined (__cplusplus) -extern "C" { -#endif - -#include "mem.h" /* U32 */ +#include "../common/mem.h" /* U32 */ #include "zstd_compress_internal.h" -void ZSTD_fillHashTable(ZSTD_matchState_t* ms, - void const* end, ZSTD_dictTableLoadMethod_e dtlm); +void ZSTD_fillHashTable(ZSTD_MatchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp); size_t ZSTD_compressBlock_fast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_fast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_fast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -#if defined (__cplusplus) -} -#endif - #endif /* ZSTD_FAST_H */ diff --git a/lib/compress/zstd_lazy.c b/lib/compress/zstd_lazy.c index 94d906c01f1..18b7b43948f 100644 --- a/lib/compress/zstd_lazy.c +++ b/lib/compress/zstd_lazy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,14 +10,23 @@ #include "zstd_compress_internal.h" #include "zstd_lazy.h" +#include "../common/bits.h" /* ZSTD_countTrailingZeros64 */ + +#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) + +#define kLazySkippingStep 8 /*-************************************* * Binary Tree search ***************************************/ -static void -ZSTD_updateDUBT(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_updateDUBT(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend, U32 mls) { @@ -58,11 +67,12 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms, /** ZSTD_insertDUBT1() : * sort one already inserted but unsorted position - * assumption : current >= btlow == (current - btmask) + * assumption : curr >= btlow == (curr - btmask) * doesn't fail */ -static void -ZSTD_insertDUBT1(ZSTD_matchState_t* ms, - U32 current, const BYTE* inputEnd, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_insertDUBT1(const ZSTD_MatchState_t* ms, + U32 curr, const BYTE* inputEnd, U32 nbCompares, U32 btLow, const ZSTD_dictMode_e dictMode) { @@ -74,41 +84,41 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms, const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const U32 dictLimit = ms->window.dictLimit; - const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current; - const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit; + const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr; + const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* match; - U32* smallerPtr = bt + 2*(current&btMask); + U32* smallerPtr = bt + 2*(curr&btMask); U32* largerPtr = smallerPtr + 1; U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */ U32 dummy32; /* to be nullified at the end */ U32 const windowValid = ms->window.lowLimit; U32 const maxDistance = 1U << cParams->windowLog; - U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid; + U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid; DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)", - current, dictLimit, windowLow); - assert(current >= btLow); + curr, dictLimit, windowLow); + assert(curr >= btLow); assert(ip < iend); /* condition for ZSTD_count */ - while (nbCompares-- && (matchIndex > windowLow)) { + for (; nbCompares && (matchIndex > windowLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - assert(matchIndex < current); + assert(matchIndex < curr); /* note : all candidates are now supposed sorted, * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */ if ( (dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit) /* both in current segment*/ - || (current < dictLimit) /* both in extDict */) { + || (curr < dictLimit) /* both in extDict */) { const BYTE* const mBase = ( (dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) ? base : dictBase; assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */ - || (current < dictLimit) ); + || (curr < dictLimit) ); match = mBase + matchIndex; matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); } else { @@ -119,7 +129,7 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms, } DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ", - current, matchIndex, (U32)matchLength); + curr, matchIndex, (U32)matchLength); if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ @@ -149,9 +159,10 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms, } -static size_t -ZSTD_DUBT_findBetterDictMatch ( - ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_DUBT_findBetterDictMatch ( + const ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, size_t* offsetPtr, size_t bestLength, @@ -159,7 +170,7 @@ ZSTD_DUBT_findBetterDictMatch ( U32 const mls, const ZSTD_dictMode_e dictMode) { - const ZSTD_matchState_t * const dms = ms->dictMatchState; + const ZSTD_MatchState_t * const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dmsCParams = &dms->cParams; const U32 * const dictHashTable = dms->hashTable; U32 const hashLog = dmsCParams->hashLog; @@ -168,7 +179,7 @@ ZSTD_DUBT_findBetterDictMatch ( const BYTE* const base = ms->window.base; const BYTE* const prefixStart = base + ms->window.dictLimit; - U32 const current = (U32)(ip-base); + U32 const curr = (U32)(ip-base); const BYTE* const dictBase = dms->window.base; const BYTE* const dictEnd = dms->window.nextSrc; U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base); @@ -185,7 +196,7 @@ ZSTD_DUBT_findBetterDictMatch ( (void)dictMode; assert(dictMode == ZSTD_dictMatchState); - while (nbCompares-- && (dictMatchIndex > dictLowLimit)) { + for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) { U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match = dictBase + dictMatchIndex; @@ -195,10 +206,10 @@ ZSTD_DUBT_findBetterDictMatch ( if (matchLength > bestLength) { U32 matchIndex = dictMatchIndex + dictIndexDelta; - if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) { + if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) { DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)", - current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex); - bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; + curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, OFFSET_TO_OFFBASE(curr - matchIndex), dictMatchIndex, matchIndex); + bestLength = matchLength, *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); } if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */ break; /* drop, to guarantee consistency (miss a little bit of compression) */ @@ -218,19 +229,20 @@ ZSTD_DUBT_findBetterDictMatch ( } if (bestLength >= MINMATCH) { - U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; + U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offsetPtr); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)", - current, (U32)bestLength, (U32)*offsetPtr, mIndex); + curr, (U32)bestLength, (U32)*offsetPtr, mIndex); } return bestLength; } -static size_t -ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, - size_t* offsetPtr, + size_t* offBasePtr, U32 const mls, const ZSTD_dictMode_e dictMode) { @@ -241,15 +253,13 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, U32 matchIndex = hashTable[h]; const BYTE* const base = ms->window.base; - U32 const current = (U32)(ip-base); - U32 const maxDistance = 1U << cParams->windowLog; - U32 const windowValid = ms->window.lowLimit; - U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid; + U32 const curr = (U32)(ip-base); + U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; - U32 const btLow = (btMask >= current) ? 0 : current - btMask; + U32 const btLow = (btMask >= curr) ? 0 : curr - btMask; U32 const unsortLimit = MAX(btLow, windowLow); U32* nextCandidate = bt + 2*(matchIndex&btMask); @@ -258,8 +268,9 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, U32 nbCandidates = nbCompares; U32 previousCandidate = 0; - DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current); + DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr); assert(ip <= iend-8); /* required for h calculation */ + assert(dictMode != ZSTD_dedicatedDictSearch); /* reach end of unsorted candidates list */ while ( (matchIndex > unsortLimit) @@ -301,16 +312,16 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, const U32 dictLimit = ms->window.dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; - U32* smallerPtr = bt + 2*(current&btMask); - U32* largerPtr = bt + 2*(current&btMask) + 1; - U32 matchEndIdx = current + 8 + 1; + U32* smallerPtr = bt + 2*(curr&btMask); + U32* largerPtr = bt + 2*(curr&btMask) + 1; + U32 matchEndIdx = curr + 8 + 1; U32 dummy32; /* to be nullified at the end */ size_t bestLength = 0; matchIndex = hashTable[h]; - hashTable[h] = current; /* Update Hash Table */ + hashTable[h] = curr; /* Update Hash Table */ - while (nbCompares-- && (matchIndex > windowLow)) { + for (; nbCompares && (matchIndex > windowLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match; @@ -328,8 +339,8 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, if (matchLength > bestLength) { if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; - if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) - bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; + if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)*offBasePtr)) ) + bestLength = matchLength, *offBasePtr = OFFSET_TO_OFFBASE(curr - matchIndex); if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ if (dictMode == ZSTD_dictMatchState) { nbCompares = 0; /* in addition to avoiding checking any @@ -358,19 +369,20 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, *smallerPtr = *largerPtr = 0; + assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ if (dictMode == ZSTD_dictMatchState && nbCompares) { bestLength = ZSTD_DUBT_findBetterDictMatch( ms, ip, iend, - offsetPtr, bestLength, nbCompares, + offBasePtr, bestLength, nbCompares, mls, dictMode); } - assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */ + assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ if (bestLength >= MINMATCH) { - U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; + U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offBasePtr); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)", - current, (U32)bestLength, (U32)*offsetPtr, mIndex); + curr, (U32)bestLength, (U32)*offBasePtr, mIndex); } return bestLength; } @@ -378,69 +390,236 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ -FORCE_INLINE_TEMPLATE size_t -ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_BtFindBestMatch( ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, - size_t* offsetPtr, + size_t* offBasePtr, const U32 mls /* template */, const ZSTD_dictMode_e dictMode) { DEBUGLOG(7, "ZSTD_BtFindBestMatch"); if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ ZSTD_updateDUBT(ms, ip, iLimit, mls); - return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode); + return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode); } +/*********************************** +* Dedicated dict search +***********************************/ -static size_t -ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip) { - switch(ms->cParams.minMatch) + const BYTE* const base = ms->window.base; + U32 const target = (U32)(ip - base); + U32* const hashTable = ms->hashTable; + U32* const chainTable = ms->chainTable; + U32 const chainSize = 1 << ms->cParams.chainLog; + U32 idx = ms->nextToUpdate; + U32 const minChain = chainSize < target - idx ? target - chainSize : idx; + U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG; + U32 const cacheSize = bucketSize - 1; + U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize; + U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts; + + /* We know the hashtable is oversized by a factor of `bucketSize`. + * We are going to temporarily pretend `bucketSize == 1`, keeping only a + * single entry. We will use the rest of the space to construct a temporary + * chaintable. + */ + U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG; + U32* const tmpHashTable = hashTable; + U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog); + U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog; + U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx; + U32 hashIdx; + + assert(ms->cParams.chainLog <= 24); + assert(ms->cParams.hashLog > ms->cParams.chainLog); + assert(idx != 0); + assert(tmpMinChain <= minChain); + + /* fill conventional hash table and conventional chain table */ + for ( ; idx < target; idx++) { + U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch); + if (idx >= tmpMinChain) { + tmpChainTable[idx - tmpMinChain] = hashTable[h]; + } + tmpHashTable[h] = idx; + } + + /* sort chains into ddss chain table */ { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); - case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); - case 7 : - case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); + U32 chainPos = 0; + for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) { + U32 count; + U32 countBeyondMinChain = 0; + U32 i = tmpHashTable[hashIdx]; + for (count = 0; i >= tmpMinChain && count < cacheSize; count++) { + /* skip through the chain to the first position that won't be + * in the hash cache bucket */ + if (i < minChain) { + countBeyondMinChain++; + } + i = tmpChainTable[i - tmpMinChain]; + } + if (count == cacheSize) { + for (count = 0; count < chainLimit;) { + if (i < minChain) { + if (!i || ++countBeyondMinChain > cacheSize) { + /* only allow pulling `cacheSize` number of entries + * into the cache or chainTable beyond `minChain`, + * to replace the entries pulled out of the + * chainTable into the cache. This lets us reach + * back further without increasing the total number + * of entries in the chainTable, guaranteeing the + * DDSS chain table will fit into the space + * allocated for the regular one. */ + break; + } + } + chainTable[chainPos++] = i; + count++; + if (i < tmpMinChain) { + break; + } + i = tmpChainTable[i - tmpMinChain]; + } + } else { + count = 0; + } + if (count) { + tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count; + } else { + tmpHashTable[hashIdx] = 0; + } + } + assert(chainPos <= chainSize); /* I believe this is guaranteed... */ + } + + /* move chain pointers into the last entry of each hash bucket */ + for (hashIdx = (1 << hashLog); hashIdx; ) { + U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG; + U32 const chainPackedPointer = tmpHashTable[hashIdx]; + U32 i; + for (i = 0; i < cacheSize; i++) { + hashTable[bucketIdx + i] = 0; + } + hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer; } + + /* fill the buckets of the hash table */ + for (idx = ms->nextToUpdate; idx < target; idx++) { + U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch) + << ZSTD_LAZY_DDSS_BUCKET_LOG; + U32 i; + /* Shift hash cache down 1. */ + for (i = cacheSize - 1; i; i--) + hashTable[h + i] = hashTable[h + i - 1]; + hashTable[h] = idx; + } + + ms->nextToUpdate = target; } +/* Returns the longest match length found in the dedicated dict search structure. + * If none are longer than the argument ml, then ml will be returned. + */ +FORCE_INLINE_TEMPLATE +size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts, + const ZSTD_MatchState_t* const dms, + const BYTE* const ip, const BYTE* const iLimit, + const BYTE* const prefixStart, const U32 curr, + const U32 dictLimit, const size_t ddsIdx) { + const U32 ddsLowestIndex = dms->window.dictLimit; + const BYTE* const ddsBase = dms->window.base; + const BYTE* const ddsEnd = dms->window.nextSrc; + const U32 ddsSize = (U32)(ddsEnd - ddsBase); + const U32 ddsIndexDelta = dictLimit - ddsSize; + const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG); + const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1; + U32 ddsAttempt; + U32 matchIndex; + + for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) { + PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]); + } -static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); - case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); - case 7 : - case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); + U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; + U32 const chainIndex = chainPackedPointer >> 8; + + PREFETCH_L1(&dms->chainTable[chainIndex]); } -} + for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) { + size_t currentMl=0; + const BYTE* match; + matchIndex = dms->hashTable[ddsIdx + ddsAttempt]; + match = ddsBase + matchIndex; + + if (!matchIndex) { + return ml; + } + + /* guaranteed by table construction */ + (void)ddsLowestIndex; + assert(matchIndex >= ddsLowestIndex); + assert(match+4 <= ddsEnd); + if (MEM_read32(match) == MEM_read32(ip)) { + /* assumption : matchIndex <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4; + } + + /* save best solution */ + if (currentMl > ml) { + ml = currentMl; + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta)); + if (ip+currentMl == iLimit) { + /* best possible, avoids read overflow on next attempt */ + return ml; + } + } + } -static size_t ZSTD_BtFindBestMatch_extDict_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); - case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); - case 7 : - case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); + U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; + U32 chainIndex = chainPackedPointer >> 8; + U32 const chainLength = chainPackedPointer & 0xFF; + U32 const chainAttempts = nbAttempts - ddsAttempt; + U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts; + U32 chainAttempt; + + for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) { + PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]); + } + + for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) { + size_t currentMl=0; + const BYTE* match; + matchIndex = dms->chainTable[chainIndex]; + match = ddsBase + matchIndex; + + /* guaranteed by table construction */ + assert(matchIndex >= ddsLowestIndex); + assert(match+4 <= ddsEnd); + if (MEM_read32(match) == MEM_read32(ip)) { + /* assumption : matchIndex <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4; + } + + /* save best solution */ + if (currentMl > ml) { + ml = currentMl; + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta)); + if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ + } + } } + return ml; } - /* ********************************* * Hash Chain ***********************************/ @@ -448,10 +627,12 @@ static size_t ZSTD_BtFindBestMatch_extDict_selectMLS ( /* Update chains up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ -static U32 ZSTD_insertAndFindFirstIndex_internal( - ZSTD_matchState_t* ms, +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_insertAndFindFirstIndex_internal( + ZSTD_MatchState_t* ms, const ZSTD_compressionParameters* const cParams, - const BYTE* ip, U32 const mls) + const BYTE* ip, U32 const mls, U32 const lazySkipping) { U32* const hashTable = ms->hashTable; const U32 hashLog = cParams->hashLog; @@ -466,22 +647,25 @@ static U32 ZSTD_insertAndFindFirstIndex_internal( NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; hashTable[h] = idx; idx++; + /* Stop inserting every position when in the lazy skipping mode. */ + if (lazySkipping) + break; } ms->nextToUpdate = target; return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; } -U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { +U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip) { const ZSTD_compressionParameters* const cParams = &ms->cParams; - return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch); + return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0); } - /* inlining is important to hardwire a hot branch (template emulation) */ FORCE_INLINE_TEMPLATE -size_t ZSTD_HcFindBestMatch_generic ( - ZSTD_matchState_t* ms, +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_HcFindBestMatch( + ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, const U32 mls, const ZSTD_dictMode_e dictMode) @@ -495,23 +679,39 @@ size_t ZSTD_HcFindBestMatch_generic ( const U32 dictLimit = ms->window.dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; - const U32 current = (U32)(ip-base); + const U32 curr = (U32)(ip-base); const U32 maxDistance = 1U << cParams->windowLog; - const U32 lowValid = ms->window.lowLimit; - const U32 lowLimit = (current - lowValid > maxDistance) ? current - maxDistance : lowValid; - const U32 minChain = current > chainSize ? current - chainSize : 0; + const U32 lowestValid = ms->window.lowLimit; + const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; + const U32 isDictionary = (ms->loadedDictEnd != 0); + const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance; + const U32 minChain = curr > chainSize ? curr - chainSize : 0; U32 nbAttempts = 1U << cParams->searchLog; size_t ml=4-1; + const ZSTD_MatchState_t* const dms = ms->dictMatchState; + const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch + ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0; + const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch + ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0; + + U32 matchIndex; + + if (dictMode == ZSTD_dedicatedDictSearch) { + const U32* entry = &dms->hashTable[ddsIdx]; + PREFETCH_L1(entry); + } + /* HC4 match finder */ - U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls); + matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, ms->lazySkipping); - for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) { + for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) { size_t currentMl=0; if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { const BYTE* const match = base + matchIndex; assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ - if (match[ml] == ip[ml]) /* potentially better */ + /* read 4B starting from (match + ml + 1 - sizeof(U32)) */ + if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */ currentMl = ZSTD_count(ip, match, iLimit); } else { const BYTE* const match = dictBase + matchIndex; @@ -523,7 +723,7 @@ size_t ZSTD_HcFindBestMatch_generic ( /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; + *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } @@ -531,8 +731,11 @@ size_t ZSTD_HcFindBestMatch_generic ( matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); } - if (dictMode == ZSTD_dictMatchState) { - const ZSTD_matchState_t* const dms = ms->dictMatchState; + assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ + if (dictMode == ZSTD_dedicatedDictSearch) { + ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms, + ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); + } else if (dictMode == ZSTD_dictMatchState) { const U32* const dmsChainTable = dms->chainTable; const U32 dmsChainSize = (1 << dms->cParams.chainLog); const U32 dmsChainMask = dmsChainSize - 1; @@ -545,7 +748,7 @@ size_t ZSTD_HcFindBestMatch_generic ( matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)]; - for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) { + for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) { size_t currentMl=0; const BYTE* const match = dmsBase + matchIndex; assert(match+4 <= dmsEnd); @@ -555,11 +758,13 @@ size_t ZSTD_HcFindBestMatch_generic ( /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE; + assert(curr > matchIndex + dmsIndexDelta); + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta)); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } if (matchIndex <= dmsMinChain) break; + matchIndex = dmsChainTable[matchIndex & dmsChainMask]; } } @@ -567,125 +772,869 @@ size_t ZSTD_HcFindBestMatch_generic ( return ml; } +/* ********************************* +* (SIMD) Row-based matchfinder +***********************************/ +/* Constants for row-based hash */ +#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1) +#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */ + +#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1) + +typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */ + +/* ZSTD_VecMask_next(): + * Starting from the LSB, returns the idx of the next non-zero bit. + * Basically counting the nb of trailing zeroes. + */ +MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) { + return ZSTD_countTrailingZeros64(val); +} + +/* ZSTD_row_nextIndex(): + * Returns the next index to insert at within a tagTable row, and updates the "head" + * value to reflect the update. Essentially cycles backwards from [1, {entries per row}) + */ +FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) { + U32 next = (*tagRow-1) & rowMask; + next += (next == 0) ? rowMask : 0; /* skip first position */ + *tagRow = (BYTE)next; + return next; +} + +/* ZSTD_isAligned(): + * Checks that a pointer is aligned to "align" bytes which must be a power of 2. + */ +MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) { + assert((align & (align - 1)) == 0); + return (((size_t)ptr) & (align - 1)) == 0; +} -FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) +/* ZSTD_row_prefetch(): + * Performs prefetching for the hashTable and tagTable at a given row. + */ +FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* tagTable, U32 const relRow, U32 const rowLog) { + PREFETCH_L1(hashTable + relRow); + if (rowLog >= 5) { + PREFETCH_L1(hashTable + relRow + 16); + /* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */ + } + PREFETCH_L1(tagTable + relRow); + if (rowLog == 6) { + PREFETCH_L1(tagTable + relRow + 32); + } + assert(rowLog == 4 || rowLog == 5 || rowLog == 6); + assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */ + assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */ +} + +/* ZSTD_row_fillHashCache(): + * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries, + * but not beyond iLimit. + */ +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_row_fillHashCache(ZSTD_MatchState_t* ms, const BYTE* base, + U32 const rowLog, U32 const mls, + U32 idx, const BYTE* const iLimit) { - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); + U32 const* const hashTable = ms->hashTable; + BYTE const* const tagTable = ms->tagTable; + U32 const hashLog = ms->rowHashLog; + U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1); + U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch); + + for (; idx < lim; ++idx) { + U32 const hash = (U32)ZSTD_hashPtrSalted(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt); + U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; + ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); + ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash; } + + DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1], + ms->hashCache[2], ms->hashCache[3], ms->hashCache[4], + ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]); } +/* ZSTD_row_nextCachedHash(): + * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at + * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable. + */ +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable, + BYTE const* tagTable, BYTE const* base, + U32 idx, U32 const hashLog, + U32 const rowLog, U32 const mls, + U64 const hashSalt) +{ + U32 const newHash = (U32)ZSTD_hashPtrSalted(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt); + U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; + ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); + { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK]; + cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash; + return hash; + } +} -static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) +/* ZSTD_row_update_internalImpl(): + * Updates the hash table with positions starting from updateStartIdx until updateEndIdx. + */ +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_row_update_internalImpl(ZSTD_MatchState_t* ms, + U32 updateStartIdx, U32 const updateEndIdx, + U32 const mls, U32 const rowLog, + U32 const rowMask, U32 const useCache) { - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); + U32* const hashTable = ms->hashTable; + BYTE* const tagTable = ms->tagTable; + U32 const hashLog = ms->rowHashLog; + const BYTE* const base = ms->window.base; + + DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx); + for (; updateStartIdx < updateEndIdx; ++updateStartIdx) { + U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt) + : (U32)ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt); + U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; + U32* const row = hashTable + relRow; + BYTE* tagRow = tagTable + relRow; + U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask); + + assert(hash == ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt)); + tagRow[pos] = hash & ZSTD_ROW_HASH_TAG_MASK; + row[pos] = updateStartIdx; + } +} + +/* ZSTD_row_update_internal(): + * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate. + * Skips sections of long matches as is necessary. + */ +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_row_update_internal(ZSTD_MatchState_t* ms, const BYTE* ip, + U32 const mls, U32 const rowLog, + U32 const rowMask, U32 const useCache) +{ + U32 idx = ms->nextToUpdate; + const BYTE* const base = ms->window.base; + const U32 target = (U32)(ip - base); + const U32 kSkipThreshold = 384; + const U32 kMaxMatchStartPositionsToUpdate = 96; + const U32 kMaxMatchEndPositionsToUpdate = 32; + + if (useCache) { + /* Only skip positions when using hash cache, i.e. + * if we are loading a dict, don't skip anything. + * If we decide to skip, then we only update a set number + * of positions at the beginning and end of the match. + */ + if (UNLIKELY(target - idx > kSkipThreshold)) { + U32 const bound = idx + kMaxMatchStartPositionsToUpdate; + ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache); + idx = target - kMaxMatchEndPositionsToUpdate; + ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1); + } } + assert(target >= idx); + ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache); + ms->nextToUpdate = target; } +/* ZSTD_row_update(): + * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary + * processing. + */ +void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip) { + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); + const U32 rowMask = (1u << rowLog) - 1; + const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */); -FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) + DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog); + ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* don't use cache */); +} + +/* Returns the mask width of bits group of which will be set to 1. Given not all + * architectures have easy movemask instruction, this helps to iterate over + * groups of bits easier and faster. + */ +FORCE_INLINE_TEMPLATE U32 +ZSTD_row_matchMaskGroupWidth(const U32 rowEntries) { - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); + assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); + assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES); + (void)rowEntries; +#if defined(ZSTD_ARCH_ARM_NEON) + /* NEON path only works for little endian */ + if (!MEM_isLittleEndian()) { + return 1; + } + if (rowEntries == 16) { + return 4; + } + if (rowEntries == 32) { + return 2; + } + if (rowEntries == 64) { + return 1; + } +#endif + return 1; +} + +#if defined(ZSTD_ARCH_X86_SSE2) +FORCE_INLINE_TEMPLATE ZSTD_VecMask +ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head) +{ + const __m128i comparisonMask = _mm_set1_epi8((char)tag); + int matches[4] = {0}; + int i; + assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4); + for (i=0; i> chunkSize; + do { + size_t chunk = MEM_readST(&src[i]); + chunk ^= splatChar; + chunk = (((chunk | x80) - x01) | chunk) & x80; + matches <<= chunkSize; + matches |= (chunk * extractMagic) >> shiftAmount; + i -= chunkSize; + } while (i >= 0); + } else { /* big endian: reverse bits during extraction */ + const size_t msb = xFF ^ (xFF >> 1); + const size_t extractMagic = (msb / 0x1FF) | msb; + do { + size_t chunk = MEM_readST(&src[i]); + chunk ^= splatChar; + chunk = (((chunk | x80) - x01) | chunk) & x80; + matches <<= chunkSize; + matches |= ((chunk >> 7) * extractMagic) >> shiftAmount; + i -= chunkSize; + } while (i >= 0); + } + matches = ~matches; + if (rowEntries == 16) { + return ZSTD_rotateRight_U16((U16)matches, headGrouped); + } else if (rowEntries == 32) { + return ZSTD_rotateRight_U32((U32)matches, headGrouped); + } else { + return ZSTD_rotateRight_U64((U64)matches, headGrouped); + } + } +#endif +} + +/* The high-level approach of the SIMD row based match finder is as follows: + * - Figure out where to insert the new entry: + * - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index. + * - The hash is salted by a value that changes on every context reset, so when the same table is used + * we will avoid collisions that would otherwise slow us down by introducing phantom matches. + * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines + * which row to insert into. + * - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can + * be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes + * per row). + * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and + * generate a bitfield that we can cycle through to check the collisions in the hash table. + * - Pick the longest match. + * - Insert the tag into the equivalent row and position in the tagTable. + */ +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_RowFindBestMatch( + ZSTD_MatchState_t* ms, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 mls, const ZSTD_dictMode_e dictMode, + const U32 rowLog) +{ + U32* const hashTable = ms->hashTable; + BYTE* const tagTable = ms->tagTable; + U32* const hashCache = ms->hashCache; + const U32 hashLog = ms->rowHashLog; + const ZSTD_compressionParameters* const cParams = &ms->cParams; + const BYTE* const base = ms->window.base; + const BYTE* const dictBase = ms->window.dictBase; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const U32 curr = (U32)(ip-base); + const U32 maxDistance = 1U << cParams->windowLog; + const U32 lowestValid = ms->window.lowLimit; + const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; + const U32 isDictionary = (ms->loadedDictEnd != 0); + const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance; + const U32 rowEntries = (1U << rowLog); + const U32 rowMask = rowEntries - 1; + const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */ + const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries); + const U64 hashSalt = ms->hashSalt; + U32 nbAttempts = 1U << cappedSearchLog; + size_t ml=4-1; + U32 hash; + + /* DMS/DDS variables that may be referenced laster */ + const ZSTD_MatchState_t* const dms = ms->dictMatchState; + + /* Initialize the following variables to satisfy static analyzer */ + size_t ddsIdx = 0; + U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */ + U32 dmsTag = 0; + U32* dmsRow = NULL; + BYTE* dmsTagRow = NULL; + + if (dictMode == ZSTD_dedicatedDictSearch) { + const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG; + { /* Prefetch DDS hashtable entry */ + ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG; + PREFETCH_L1(&dms->hashTable[ddsIdx]); + } + ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0; + } + + if (dictMode == ZSTD_dictMatchState) { + /* Prefetch DMS rows */ + U32* const dmsHashTable = dms->hashTable; + BYTE* const dmsTagTable = dms->tagTable; + U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls); + U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; + dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK; + dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow); + dmsRow = dmsHashTable + dmsRelRow; + ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog); + } + + /* Update the hashTable and tagTable up to (but not including) ip */ + if (!ms->lazySkipping) { + ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */); + hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls, hashSalt); + } else { + /* Stop inserting every position when in the lazy skipping mode. + * The hash cache is also not kept up to date in this mode. + */ + hash = (U32)ZSTD_hashPtrSalted(ip, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt); + ms->nextToUpdate = curr; + } + ms->hashSaltEntropy += hash; /* collect salt entropy */ + + { /* Get the hash for ip, compute the appropriate row */ + U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; + U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK; + U32* const row = hashTable + relRow; + BYTE* tagRow = (BYTE*)(tagTable + relRow); + U32 const headGrouped = (*tagRow & rowMask) * groupWidth; + U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; + size_t numMatches = 0; + size_t currMatch = 0; + ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries); + + /* Cycle through the matches and prefetch */ + for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) { + U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask; + U32 const matchIndex = row[matchPos]; + if(matchPos == 0) continue; + assert(numMatches < rowEntries); + if (matchIndex < lowLimit) + break; + if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { + PREFETCH_L1(base + matchIndex); + } else { + PREFETCH_L1(dictBase + matchIndex); + } + matchBuffer[numMatches++] = matchIndex; + --nbAttempts; + } + + /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop + in ZSTD_row_update_internal() at the next search. */ + { + U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask); + tagRow[pos] = (BYTE)tag; + row[pos] = ms->nextToUpdate++; + } + + /* Return the longest match */ + for (; currMatch < numMatches; ++currMatch) { + U32 const matchIndex = matchBuffer[currMatch]; + size_t currentMl=0; + assert(matchIndex < curr); + assert(matchIndex >= lowLimit); + + if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { + const BYTE* const match = base + matchIndex; + assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ + /* read 4B starting from (match + ml + 1 - sizeof(U32)) */ + if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */ + currentMl = ZSTD_count(ip, match, iLimit); + } else { + const BYTE* const match = dictBase + matchIndex; + assert(match+4 <= dictEnd); + if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4; + } + + /* Save best solution */ + if (currentMl > ml) { + ml = currentMl; + *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); + if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ + } + } + } + + assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ + if (dictMode == ZSTD_dedicatedDictSearch) { + ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms, + ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); + } else if (dictMode == ZSTD_dictMatchState) { + /* TODO: Measure and potentially add prefetching to DMS */ + const U32 dmsLowestIndex = dms->window.dictLimit; + const BYTE* const dmsBase = dms->window.base; + const BYTE* const dmsEnd = dms->window.nextSrc; + const U32 dmsSize = (U32)(dmsEnd - dmsBase); + const U32 dmsIndexDelta = dictLimit - dmsSize; + + { U32 const headGrouped = (*dmsTagRow & rowMask) * groupWidth; + U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; + size_t numMatches = 0; + size_t currMatch = 0; + ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries); + + for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) { + U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask; + U32 const matchIndex = dmsRow[matchPos]; + if(matchPos == 0) continue; + if (matchIndex < dmsLowestIndex) + break; + PREFETCH_L1(dmsBase + matchIndex); + matchBuffer[numMatches++] = matchIndex; + --nbAttempts; + } + + /* Return the longest match */ + for (; currMatch < numMatches; ++currMatch) { + U32 const matchIndex = matchBuffer[currMatch]; + size_t currentMl=0; + assert(matchIndex >= dmsLowestIndex); + assert(matchIndex < curr); + + { const BYTE* const match = dmsBase + matchIndex; + assert(match+4 <= dmsEnd); + if (MEM_read32(match) == MEM_read32(ip)) + currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4; + } + + if (currentMl > ml) { + ml = currentMl; + assert(curr > matchIndex + dmsIndexDelta); + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta)); + if (ip+currentMl == iLimit) break; + } + } + } + } + return ml; +} + + +/** + * Generate search functions templated on (dictMode, mls, rowLog). + * These functions are outlined for code size & compilation time. + * ZSTD_searchMax() dispatches to the correct implementation function. + * + * TODO: The start of the search function involves loading and calculating a + * bunch of constants from the ZSTD_MatchState_t. These computations could be + * done in an initialization function, and saved somewhere in the match state. + * Then we could pass a pointer to the saved state instead of the match state, + * and avoid duplicate computations. + * + * TODO: Move the match re-winding into searchMax. This improves compression + * ratio, and unlocks further simplifications with the next TODO. + * + * TODO: Try moving the repcode search into searchMax. After the re-winding + * and repcode search are in searchMax, there is no more logic in the match + * finder loop that requires knowledge about the dictMode. So we should be + * able to avoid force inlining it, and we can join the extDict loop with + * the single segment loop. It should go in searchMax instead of its own + * function to avoid having multiple virtual function calls per search. + */ + +#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls +#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls +#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog + +#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE + +#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \ + ZSTD_MatchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offBasePtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \ + } \ + +#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \ + ZSTD_MatchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offsetPtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ + } \ + +#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \ + ZSTD_MatchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offsetPtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \ + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \ + } \ + +#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \ + X(dictMode, mls, 4) \ + X(dictMode, mls, 5) \ + X(dictMode, mls, 6) + +#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \ + ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \ + ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \ + ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6) + +#define ZSTD_FOR_EACH_MLS(X, dictMode) \ + X(dictMode, 4) \ + X(dictMode, 5) \ + X(dictMode, 6) + +#define ZSTD_FOR_EACH_DICT_MODE(X, ...) \ + X(__VA_ARGS__, noDict) \ + X(__VA_ARGS__, extDict) \ + X(__VA_ARGS__, dictMatchState) \ + X(__VA_ARGS__, dedicatedDictSearch) + +/* Generate row search fns for each combination of (dictMode, mls, rowLog) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN) +/* Generate binary Tree search fns for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN) +/* Generate hash chain search fns for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN) + +typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e; + +#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \ + case mls: \ + return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr); +#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \ + case mls: \ + return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr); +#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \ + case rowLog: \ + return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr); + +#define ZSTD_SWITCH_MLS(X, dictMode) \ + switch (mls) { \ + ZSTD_FOR_EACH_MLS(X, dictMode) \ + } + +#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \ + case mls: \ + switch (rowLog) { \ + ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \ + } \ + ZSTD_UNREACHABLE; \ + break; + +#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \ + switch (searchMethod) { \ + case search_hashChain: \ + ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \ + break; \ + case search_binaryTree: \ + ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \ + break; \ + case search_rowHash: \ + ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \ + break; \ + } \ + ZSTD_UNREACHABLE; + +/** + * Searches for the longest match at @p ip. + * Dispatches to the correct implementation function based on the + * (searchMethod, dictMode, mls, rowLog). We use switch statements + * here instead of using an indirect function call through a function + * pointer because after Spectre and Meltdown mitigations, indirect + * function calls can be very costly, especially in the kernel. + * + * NOTE: dictMode and searchMethod should be templated, so those switch + * statements should be optimized out. Only the mls & rowLog switches + * should be left. + * + * @param ms The match state. + * @param ip The position to search at. + * @param iend The end of the input data. + * @param[out] offsetPtr Stores the match offset into this pointer. + * @param mls The minimum search length, in the range [4, 6]. + * @param rowLog The row log (if applicable), in the range [4, 6]. + * @param searchMethod The search method to use (templated). + * @param dictMode The dictMode (templated). + * + * @returns The length of the longest match found, or < mls if no match is found. + * If a match is found its offset is stored in @p offsetPtr. + */ +FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax( + ZSTD_MatchState_t* ms, + const BYTE* ip, + const BYTE* iend, + size_t* offsetPtr, + U32 const mls, + U32 const rowLog, + searchMethod_e const searchMethod, + ZSTD_dictMode_e const dictMode) +{ + if (dictMode == ZSTD_noDict) { + ZSTD_SWITCH_SEARCH_METHOD(noDict) + } else if (dictMode == ZSTD_extDict) { + ZSTD_SWITCH_SEARCH_METHOD(extDict) + } else if (dictMode == ZSTD_dictMatchState) { + ZSTD_SWITCH_SEARCH_METHOD(dictMatchState) + } else if (dictMode == ZSTD_dedicatedDictSearch) { + ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch) + } + ZSTD_UNREACHABLE; + return 0; +} /* ******************************* * Common parser - lazy strategy *********************************/ + FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_lazy_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, - const U32 searchMethod, const U32 depth, + const searchMethod_e searchMethod, const U32 depth, ZSTD_dictMode_e const dictMode) { const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; + const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8; const BYTE* const base = ms->window.base; const U32 prefixLowestIndex = ms->window.dictLimit; const BYTE* const prefixLowest = base + prefixLowestIndex; + const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); - typedef size_t (*searchMax_f)( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); - searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ? - (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) : - (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS); - U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0; - - const ZSTD_matchState_t* const dms = ms->dictMatchState; - const U32 dictLowestIndex = dictMode == ZSTD_dictMatchState ? - dms->window.dictLimit : 0; - const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ? - dms->window.base : NULL; - const BYTE* const dictLowest = dictMode == ZSTD_dictMatchState ? - dictBase + dictLowestIndex : NULL; - const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ? - dms->window.nextSrc : NULL; - const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ? + U32 offset_1 = rep[0], offset_2 = rep[1]; + U32 offsetSaved1 = 0, offsetSaved2 = 0; + + const int isDMS = dictMode == ZSTD_dictMatchState; + const int isDDS = dictMode == ZSTD_dedicatedDictSearch; + const int isDxS = isDMS || isDDS; + const ZSTD_MatchState_t* const dms = ms->dictMatchState; + const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0; + const BYTE* const dictBase = isDxS ? dms->window.base : NULL; + const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL; + const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL; + const U32 dictIndexDelta = isDxS ? prefixLowestIndex - (U32)(dictEnd - dictBase) : 0; - const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictLowest); + const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest)); - /* init */ + DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod); ip += (dictAndPrefixLength == 0); if (dictMode == ZSTD_noDict) { - U32 const maxRep = (U32)(ip - prefixLowest); - if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; - if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; + U32 const curr = (U32)(ip - base); + U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog); + U32 const maxRep = curr - windowLow; + if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0; } - if (dictMode == ZSTD_dictMatchState) { + if (isDxS) { /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); } + /* Reset the lazy skipping state */ + ms->lazySkipping = 0; + + if (searchMethod == search_rowHash) { + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); + } + /* Match Loop */ +#if defined(__GNUC__) && defined(__x86_64__) + /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the + * code alignment is perturbed. To fix the instability align the loop on 32-bytes. + */ + __asm__(".p2align 5"); +#endif while (ip < ilimit) { size_t matchLength=0; - size_t offset=0; + size_t offBase = REPCODE1_TO_OFFBASE; const BYTE* start=ip+1; + DEBUGLOG(7, "search baseline (depth 0)"); /* check repCode */ - if (dictMode == ZSTD_dictMatchState) { + if (isDxS) { const U32 repIndex = (U32)(ip - base) + 1 - offset_1; - const BYTE* repMatch = (dictMode == ZSTD_dictMatchState + const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch) && repIndex < prefixLowestIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; - if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; @@ -699,132 +1648,148 @@ size_t ZSTD_compressBlock_lazy_generic( } /* first search (depth 0) */ - { size_t offsetFound = 999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); + { size_t offbaseFound = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode); if (ml2 > matchLength) - matchLength = ml2, start = ip, offset=offsetFound; + matchLength = ml2, start = ip, offBase = offbaseFound; } if (matchLength < 4) { - ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ + size_t const step = ((size_t)(ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */; + ip += step; + /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time. + * In this mode we stop inserting every position into our tables, and only insert + * positions that we search, which is one in step positions. + * The exact cutoff is flexible, I've just chosen a number that is reasonably high, + * so we minimize the compression ratio loss in "normal" scenarios. This mode gets + * triggered once we've gone 2KB without finding any matches. + */ + ms->lazySkipping = step > kLazySkippingStep; continue; } /* let's try to find a better solution */ if (depth>=1) while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offset = 0, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } - if (dictMode == ZSTD_dictMatchState) { + if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; - if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offset = 0, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } } - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + { size_t ofbCandidate=999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; /* search a better one */ } } /* let's find an even better one */ if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offset = 0, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } - if (dictMode == ZSTD_dictMatchState) { + if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; - if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offset = 0, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } } - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + { size_t ofbCandidate=999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; } } } break; /* nothing found : store previous solution */ } /* NOTE: - * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. - * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which - * overflows the pointer, which is undefined behavior. + * Pay attention that `start[-value]` can lead to strange undefined behavior + * notably if `value` is unsigned, resulting in a large positive `-value`. */ /* catch up */ - if (offset) { + if (OFFBASE_IS_OFFSET(offBase)) { if (dictMode == ZSTD_noDict) { - while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest)) - && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */ + while ( ((start > anchor) & (start - OFFBASE_TO_OFFSET(offBase) > prefixLowest)) + && (start[-1] == (start-OFFBASE_TO_OFFSET(offBase))[-1]) ) /* only search for offset within prefix */ { start--; matchLength++; } } - if (dictMode == ZSTD_dictMatchState) { - U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); + if (isDxS) { + U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase)); const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex; const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ } - offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase); } /* store sequence */ _storeSequence: - { size_t const litLength = start - anchor; - ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH); + { size_t const litLength = (size_t)(start - anchor); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength); anchor = ip = start + matchLength; } + if (ms->lazySkipping) { + /* We've found a match, disable lazy skipping mode, and refill the hash cache. */ + if (searchMethod == search_rowHash) { + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); + } + ms->lazySkipping = 0; + } /* check immediate repcode */ - if (dictMode == ZSTD_dictMatchState) { + if (isDxS) { while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex = current2 - offset_2; - const BYTE* repMatch = dictMode == ZSTD_dictMatchState - && repIndex < prefixLowestIndex ? + const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase - dictIndexDelta + repIndex : base + repIndex; - if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */) + if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4; - offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH); + offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); ip += matchLength; anchor = ip; continue; @@ -838,121 +1803,235 @@ size_t ZSTD_compressBlock_lazy_generic( && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) { /* store sequence */ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ - ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH); + offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap repcodes */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ } } } - /* Save reps for next block */ - rep[0] = offset_1 ? offset_1 : savedOffset; - rep[1] = offset_2 ? offset_2 : savedOffset; + /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), + * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ + offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; + + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved1; + rep[1] = offset_2 ? offset_2 : offsetSaved2; /* Return the last literals size */ - return iend - anchor; + return (size_t)(iend - anchor); } +#endif /* build exclusions */ -size_t ZSTD_compressBlock_btlazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_greedy( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict); } -size_t ZSTD_compressBlock_lazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState); +} + +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch); +} + +size_t ZSTD_compressBlock_greedy_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict); +} + +size_t ZSTD_compressBlock_greedy_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState); +} + +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch); } +#endif +#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_lazy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict); } -size_t ZSTD_compressBlock_greedy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState); } -size_t ZSTD_compressBlock_btlazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch); +} + +size_t ZSTD_compressBlock_lazy_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict); +} + +size_t ZSTD_compressBlock_lazy_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState); +} + +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch); +} +#endif + +#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict); } size_t ZSTD_compressBlock_lazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState); } -size_t ZSTD_compressBlock_lazy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch); } -size_t ZSTD_compressBlock_greedy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict); +} + +size_t ZSTD_compressBlock_lazy2_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState); +} + +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch); } +#endif +#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btlazy2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict); +} + +size_t ZSTD_compressBlock_btlazy2_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState); +} +#endif +#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_lazy_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, - const U32 searchMethod, const U32 depth) + const searchMethod_e searchMethod, const U32 depth) { const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; + const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8; const BYTE* const base = ms->window.base; const U32 dictLimit = ms->window.dictLimit; - const U32 lowestIndex = ms->window.lowLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const dictStart = dictBase + lowestIndex; - - typedef size_t (*searchMax_f)( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); - searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS; + const BYTE* const dictStart = dictBase + ms->window.lowLimit; + const U32 windowLog = ms->cParams.windowLog; + const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); U32 offset_1 = rep[0], offset_2 = rep[1]; + DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod); + + /* Reset the lazy skipping state */ + ms->lazySkipping = 0; + /* init */ ip += (ip == prefixStart); + if (searchMethod == search_rowHash) { + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); + } /* Match Loop */ +#if defined(__GNUC__) && defined(__x86_64__) + /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the + * code alignment is perturbed. To fix the instability align the loop on 32-bytes. + */ + __asm__(".p2align 5"); +#endif while (ip < ilimit) { size_t matchLength=0; - size_t offset=0; + size_t offBase = REPCODE1_TO_OFFBASE; const BYTE* start=ip+1; - U32 current = (U32)(ip-base); + U32 curr = (U32)(ip-base); /* check repCode */ - { const U32 repIndex = (U32)(current+1 - offset_1); + { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog); + const U32 repIndex = (U32)(curr+1 - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; - if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) + & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */ if (MEM_read32(ip+1) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; @@ -961,14 +2040,23 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( } } /* first search (depth 0) */ - { size_t offsetFound = 999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); + { size_t ofbCandidate = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); if (ml2 > matchLength) - matchLength = ml2, start = ip, offset=offsetFound; + matchLength = ml2, start = ip, offBase = ofbCandidate; } - if (matchLength < 4) { - ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ + if (matchLength < 4) { + size_t const step = ((size_t)(ip-anchor) >> kSearchStrength); + ip += step + 1; /* jump faster over incompressible sections */ + /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time. + * In this mode we stop inserting every position into our tables, and only insert + * positions that we search, which is one in step positions. + * The exact cutoff is flexible, I've just chosen a number that is reasonably high, + * so we minimize the compression ratio loss in "normal" scenarios. This mode gets + * triggered once we've gone 2KB without finding any matches. + */ + ms->lazySkipping = step > kLazySkippingStep; continue; } @@ -976,93 +2064,107 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( if (depth>=1) while (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) + & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offset = 0, start = ip; + matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip; } } /* search match, depth 1 */ - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + { size_t ofbCandidate = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; /* search a better one */ } } /* let's find an even better one */ if ((depth==2) && (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) + & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offset = 0, start = ip; + matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip; } } /* search match, depth 2 */ - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + { size_t ofbCandidate = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; } } } break; /* nothing found : store previous solution */ } /* catch up */ - if (offset) { - U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); + if (OFFBASE_IS_OFFSET(offBase)) { + U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase)); const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ - offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase); } /* store sequence */ _storeSequence: - { size_t const litLength = start - anchor; - ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH); + { size_t const litLength = (size_t)(start - anchor); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength); anchor = ip = start + matchLength; } + if (ms->lazySkipping) { + /* We've found a match, disable lazy skipping mode, and refill the hash cache. */ + if (searchMethod == search_rowHash) { + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); + } + ms->lazySkipping = 0; + } /* check immediate repcode */ while (ip <= ilimit) { - const U32 repIndex = (U32)((ip-base) - offset_2); + const U32 repCurrent = (U32)(ip-base); + const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog); + const U32 repIndex = repCurrent - offset_2; const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; - if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) + & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; - offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ - ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH); + offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset history */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ @@ -1075,37 +2177,67 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( rep[1] = offset_2; /* Return the last literals size */ - return iend - anchor; + return (size_t)(iend - anchor); } +#endif /* build exclusions */ - +#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_greedy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0); +} + +size_t ZSTD_compressBlock_greedy_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0); + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0); } +#endif +#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_lazy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) + +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1); +} + +size_t ZSTD_compressBlock_lazy_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1); + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1); } +#endif +#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_lazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2); + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2); +} + +size_t ZSTD_compressBlock_lazy2_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2); } +#endif +#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_btlazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2); + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2); } +#endif diff --git a/lib/compress/zstd_lazy.h b/lib/compress/zstd_lazy.h index bb1763069f3..bd8dc49e64c 100644 --- a/lib/compress/zstd_lazy.h +++ b/lib/compress/zstd_lazy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,57 +11,183 @@ #ifndef ZSTD_LAZY_H #define ZSTD_LAZY_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "zstd_compress_internal.h" -U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip); +/** + * Dedicated Dictionary Search Structure bucket log. In the + * ZSTD_dedicatedDictSearch mode, the hashTable has + * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just + * one. + */ +#define ZSTD_LAZY_DDSS_BUCKET_LOG 2 + +#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ + +#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) +U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip); +void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip); + +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip); void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */ +#endif -size_t ZSTD_compressBlock_btlazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_greedy( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_greedy_extDict( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_greedy_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btlazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#define ZSTD_COMPRESSBLOCK_GREEDY ZSTD_compressBlock_greedy +#define ZSTD_COMPRESSBLOCK_GREEDY_ROW ZSTD_compressBlock_greedy_row +#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE ZSTD_compressBlock_greedy_dictMatchState +#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW ZSTD_compressBlock_greedy_dictMatchState_row +#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH ZSTD_compressBlock_greedy_dedicatedDictSearch +#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_greedy_dedicatedDictSearch_row +#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT ZSTD_compressBlock_greedy_extDict +#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW ZSTD_compressBlock_greedy_extDict_row +#else +#define ZSTD_COMPRESSBLOCK_GREEDY NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_ROW NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW NULL +#endif + +#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - -size_t ZSTD_compressBlock_greedy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + +#define ZSTD_COMPRESSBLOCK_LAZY ZSTD_compressBlock_lazy +#define ZSTD_COMPRESSBLOCK_LAZY_ROW ZSTD_compressBlock_lazy_row +#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE ZSTD_compressBlock_lazy_dictMatchState +#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy_dictMatchState_row +#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy_dedicatedDictSearch +#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy_dedicatedDictSearch_row +#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT ZSTD_compressBlock_lazy_extDict +#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW ZSTD_compressBlock_lazy_extDict_row +#else +#define ZSTD_COMPRESSBLOCK_LAZY NULL +#define ZSTD_COMPRESSBLOCK_LAZY_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH NULL +#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT NULL +#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW NULL +#endif + +#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + +#define ZSTD_COMPRESSBLOCK_LAZY2 ZSTD_compressBlock_lazy2 +#define ZSTD_COMPRESSBLOCK_LAZY2_ROW ZSTD_compressBlock_lazy2_row +#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE ZSTD_compressBlock_lazy2_dictMatchState +#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy2_dictMatchState_row +#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy2_dedicatedDictSearch +#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy2_dedicatedDictSearch_row +#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT ZSTD_compressBlock_lazy2_extDict +#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW ZSTD_compressBlock_lazy2_extDict_row +#else +#define ZSTD_COMPRESSBLOCK_LAZY2 NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW NULL +#endif + +#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btlazy2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_btlazy2_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btlazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -#if defined (__cplusplus) -} +#define ZSTD_COMPRESSBLOCK_BTLAZY2 ZSTD_compressBlock_btlazy2 +#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE ZSTD_compressBlock_btlazy2_dictMatchState +#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT ZSTD_compressBlock_btlazy2_extDict +#else +#define ZSTD_COMPRESSBLOCK_BTLAZY2 NULL +#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT NULL #endif #endif /* ZSTD_LAZY_H */ diff --git a/lib/compress/zstd_ldm.c b/lib/compress/zstd_ldm.c index 784d20f3ab7..070551cad81 100644 --- a/lib/compress/zstd_ldm.c +++ b/lib/compress/zstd_ldm.c @@ -1,46 +1,167 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #include "zstd_ldm.h" -#include "debug.h" +#include "../common/debug.h" +#include "../common/xxhash.h" #include "zstd_fast.h" /* ZSTD_fillHashTable() */ #include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */ +#include "zstd_ldm_geartab.h" -#define LDM_BUCKET_SIZE_LOG 3 +#define LDM_BUCKET_SIZE_LOG 4 #define LDM_MIN_MATCH_LENGTH 64 #define LDM_HASH_RLOG 7 -#define LDM_HASH_CHAR_OFFSET 10 + +typedef struct { + U64 rolling; + U64 stopMask; +} ldmRollingHashState_t; + +/** ZSTD_ldm_gear_init(): + * + * Initializes the rolling hash state such that it will honor the + * settings in params. */ +static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params) +{ + unsigned maxBitsInMask = MIN(params->minMatchLength, 64); + unsigned hashRateLog = params->hashRateLog; + + state->rolling = ~(U32)0; + + /* The choice of the splitting criterion is subject to two conditions: + * 1. it has to trigger on average every 2^(hashRateLog) bytes; + * 2. ideally, it has to depend on a window of minMatchLength bytes. + * + * In the gear hash algorithm, bit n depends on the last n bytes; + * so in order to obtain a good quality splitting criterion it is + * preferable to use bits with high weight. + * + * To match condition 1 we use a mask with hashRateLog bits set + * and, because of the previous remark, we make sure these bits + * have the highest possible weight while still respecting + * condition 2. + */ + if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) { + state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog); + } else { + /* In this degenerate case we simply honor the hash rate. */ + state->stopMask = ((U64)1 << hashRateLog) - 1; + } +} + +/** ZSTD_ldm_gear_reset() + * Feeds [data, data + minMatchLength) into the hash without registering any + * splits. This effectively resets the hash state. This is used when skipping + * over data, either at the beginning of a block, or skipping sections. + */ +static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state, + BYTE const* data, size_t minMatchLength) +{ + U64 hash = state->rolling; + size_t n = 0; + +#define GEAR_ITER_ONCE() do { \ + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \ + n += 1; \ + } while (0) + while (n + 3 < minMatchLength) { + GEAR_ITER_ONCE(); + GEAR_ITER_ONCE(); + GEAR_ITER_ONCE(); + GEAR_ITER_ONCE(); + } + while (n < minMatchLength) { + GEAR_ITER_ONCE(); + } +#undef GEAR_ITER_ONCE +} + +/** ZSTD_ldm_gear_feed(): + * + * Registers in the splits array all the split points found in the first + * size bytes following the data pointer. This function terminates when + * either all the data has been processed or LDM_BATCH_SIZE splits are + * present in the splits array. + * + * Precondition: The splits array must not be full. + * Returns: The number of bytes processed. */ +static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state, + BYTE const* data, size_t size, + size_t* splits, unsigned* numSplits) +{ + size_t n; + U64 hash, mask; + + hash = state->rolling; + mask = state->stopMask; + n = 0; + +#define GEAR_ITER_ONCE() do { \ + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \ + n += 1; \ + if (UNLIKELY((hash & mask) == 0)) { \ + splits[*numSplits] = n; \ + *numSplits += 1; \ + if (*numSplits == LDM_BATCH_SIZE) \ + goto done; \ + } \ + } while (0) + + while (n + 3 < size) { + GEAR_ITER_ONCE(); + GEAR_ITER_ONCE(); + GEAR_ITER_ONCE(); + GEAR_ITER_ONCE(); + } + while (n < size) { + GEAR_ITER_ONCE(); + } + +#undef GEAR_ITER_ONCE + +done: + state->rolling = hash; + return n; +} void ZSTD_ldm_adjustParameters(ldmParams_t* params, - ZSTD_compressionParameters const* cParams) + const ZSTD_compressionParameters* cParams) { params->windowLog = cParams->windowLog; ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX); DEBUGLOG(4, "ZSTD_ldm_adjustParameters"); - if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG; - if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH; - if (cParams->strategy >= ZSTD_btopt) { - /* Get out of the way of the optimal parser */ - U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength); - assert(minMatch >= ZSTD_LDM_MINMATCH_MIN); - assert(minMatch <= ZSTD_LDM_MINMATCH_MAX); - params->minMatchLength = minMatch; + if (params->hashRateLog == 0) { + if (params->hashLog > 0) { + /* if params->hashLog is set, derive hashRateLog from it */ + assert(params->hashLog <= ZSTD_HASHLOG_MAX); + if (params->windowLog > params->hashLog) { + params->hashRateLog = params->windowLog - params->hashLog; + } + } else { + assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); + /* mapping from [fast, rate7] to [btultra2, rate4] */ + params->hashRateLog = 7 - (cParams->strategy/3); + } } if (params->hashLog == 0) { - params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG); - assert(params->hashLog <= ZSTD_HASHLOG_MAX); + params->hashLog = BOUNDED(ZSTD_HASHLOG_MIN, params->windowLog - params->hashRateLog, ZSTD_HASHLOG_MAX); } - if (params->hashRateLog == 0) { - params->hashRateLog = params->windowLog < params->hashLog - ? 0 - : params->windowLog - params->hashLog; + if (params->minMatchLength == 0) { + params->minMatchLength = LDM_MIN_MATCH_LENGTH; + if (cParams->strategy >= ZSTD_btultra) + params->minMatchLength /= 2; + } + if (params->bucketSizeLog==0) { + assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); + params->bucketSizeLog = BOUNDED(LDM_BUCKET_SIZE_LOG, (U32)cParams->strategy, ZSTD_LDM_BUCKETSIZELOG_MAX); } params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog); } @@ -49,98 +170,37 @@ size_t ZSTD_ldm_getTableSize(ldmParams_t params) { size_t const ldmHSize = ((size_t)1) << params.hashLog; size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog); - size_t const ldmBucketSize = - ((size_t)1) << (params.hashLog - ldmBucketSizeLog); - size_t const totalSize = ldmBucketSize + ldmHSize * sizeof(ldmEntry_t); - return params.enableLdm ? totalSize : 0; + size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog); + size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize) + + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t)); + return params.enableLdm == ZSTD_ps_enable ? totalSize : 0; } size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) { - return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0; -} - -/** ZSTD_ldm_getSmallHash() : - * numBits should be <= 32 - * If numBits==0, returns 0. - * @return : the most significant numBits of value. */ -static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits) -{ - assert(numBits <= 32); - return numBits == 0 ? 0 : (U32)(value >> (64 - numBits)); -} - -/** ZSTD_ldm_getChecksum() : - * numBitsToDiscard should be <= 32 - * @return : the next most significant 32 bits after numBitsToDiscard */ -static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard) -{ - assert(numBitsToDiscard <= 32); - return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF; -} - -/** ZSTD_ldm_getTag() ; - * Given the hash, returns the most significant numTagBits bits - * after (32 + hbits) bits. - * - * If there are not enough bits remaining, return the last - * numTagBits bits. */ -static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits) -{ - assert(numTagBits < 32 && hbits <= 32); - if (32 - hbits < numTagBits) { - return hash & (((U32)1 << numTagBits) - 1); - } else { - return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1); - } + return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0; } /** ZSTD_ldm_getBucket() : * Returns a pointer to the start of the bucket associated with hash. */ static ldmEntry_t* ZSTD_ldm_getBucket( - ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams) + const ldmState_t* ldmState, size_t hash, U32 const bucketSizeLog) { - return ldmState->hashTable + (hash << ldmParams.bucketSizeLog); + return ldmState->hashTable + (hash << bucketSizeLog); } /** ZSTD_ldm_insertEntry() : * Insert the entry with corresponding hash into the hash table */ static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, size_t const hash, const ldmEntry_t entry, - ldmParams_t const ldmParams) + U32 const bucketSizeLog) { - BYTE* const bucketOffsets = ldmState->bucketOffsets; - *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry; - bucketOffsets[hash]++; - bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1; -} + BYTE* const pOffset = ldmState->bucketOffsets + hash; + unsigned const offset = *pOffset; + + *(ZSTD_ldm_getBucket(ldmState, hash, bucketSizeLog) + offset) = entry; + *pOffset = (BYTE)((offset + 1) & ((1u << bucketSizeLog) - 1)); -/** ZSTD_ldm_makeEntryAndInsertByTag() : - * - * Gets the small hash, checksum, and tag from the rollingHash. - * - * If the tag matches (1 << ldmParams.hashRateLog)-1, then - * creates an ldmEntry from the offset, and inserts it into the hash table. - * - * hBits is the length of the small hash, which is the most significant hBits - * of rollingHash. The checksum is the next 32 most significant bits, followed - * by ldmParams.hashRateLog bits that make up the tag. */ -static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState, - U64 const rollingHash, - U32 const hBits, - U32 const offset, - ldmParams_t const ldmParams) -{ - U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog); - U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1; - if (tag == tagMask) { - U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits); - U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); - ldmEntry_t entry; - entry.offset = offset; - entry.checksum = checksum; - ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams); - } } /** ZSTD_ldm_countBackwardsMatch() : @@ -149,10 +209,10 @@ static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState, * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ static size_t ZSTD_ldm_countBackwardsMatch( const BYTE* pIn, const BYTE* pAnchor, - const BYTE* pMatch, const BYTE* pBase) + const BYTE* pMatch, const BYTE* pMatchBase) { size_t matchLength = 0; - while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) { + while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) { pIn--; pMatch--; matchLength++; @@ -160,6 +220,27 @@ static size_t ZSTD_ldm_countBackwardsMatch( return matchLength; } +/** ZSTD_ldm_countBackwardsMatch_2segments() : + * Returns the number of bytes that match backwards from pMatch, + * even with the backwards match spanning 2 different segments. + * + * On reaching `pMatchBase`, start counting from mEnd */ +static size_t ZSTD_ldm_countBackwardsMatch_2segments( + const BYTE* pIn, const BYTE* pAnchor, + const BYTE* pMatch, const BYTE* pMatchBase, + const BYTE* pExtDictStart, const BYTE* pExtDictEnd) +{ + size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase); + if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) { + /* If backwards match is entirely in the extDict or prefix, immediately return */ + return matchLength; + } + DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength); + matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart); + DEBUGLOG(7, "final backwards match length = %zu", matchLength); + return matchLength; +} + /** ZSTD_ldm_fillFastTables() : * * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. @@ -167,7 +248,7 @@ static size_t ZSTD_ldm_countBackwardsMatch( * * The tables for the other strategies are filled within their * block compressors. */ -static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, +static size_t ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms, void const* end) { const BYTE* const iend = (const BYTE*)end; @@ -175,11 +256,15 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, switch(ms->cParams.strategy) { case ZSTD_fast: - ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast); + ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); break; case ZSTD_dfast: - ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast); +#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR + ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); +#else + assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif break; case ZSTD_greedy: @@ -197,30 +282,44 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, return 0; } -/** ZSTD_ldm_fillLdmHashTable() : - * - * Fills hashTable from (lastHashed + 1) to iend (non-inclusive). - * lastHash is the rolling hash that corresponds to lastHashed. - * - * Returns the rolling hash corresponding to position iend-1. */ -static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state, - U64 lastHash, const BYTE* lastHashed, - const BYTE* iend, const BYTE* base, - U32 hBits, ldmParams_t const ldmParams) +void ZSTD_ldm_fillHashTable( + ldmState_t* ldmState, const BYTE* ip, + const BYTE* iend, ldmParams_t const* params) { - U64 rollingHash = lastHash; - const BYTE* cur = lastHashed + 1; - - while (cur < iend) { - rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1], - cur[ldmParams.minMatchLength-1], - state->hashPower); - ZSTD_ldm_makeEntryAndInsertByTag(state, - rollingHash, hBits, - (U32)(cur - base), ldmParams); - ++cur; + U32 const minMatchLength = params->minMatchLength; + U32 const bucketSizeLog = params->bucketSizeLog; + U32 const hBits = params->hashLog - bucketSizeLog; + BYTE const* const base = ldmState->window.base; + BYTE const* const istart = ip; + ldmRollingHashState_t hashState; + size_t* const splits = ldmState->splitIndices; + unsigned numSplits; + + DEBUGLOG(5, "ZSTD_ldm_fillHashTable"); + + ZSTD_ldm_gear_init(&hashState, params); + while (ip < iend) { + size_t hashed; + unsigned n; + + numSplits = 0; + hashed = ZSTD_ldm_gear_feed(&hashState, ip, (size_t)(iend - ip), splits, &numSplits); + + for (n = 0; n < numSplits; n++) { + if (ip + splits[n] >= istart + minMatchLength) { + BYTE const* const split = ip + splits[n] - minMatchLength; + U64 const xxhash = XXH64(split, minMatchLength, 0); + U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1)); + ldmEntry_t entry; + + entry.offset = (U32)(split - base); + entry.checksum = (U32)(xxhash >> 32); + ZSTD_ldm_insertEntry(ldmState, hash, entry, params->bucketSizeLog); + } + } + + ip += hashed; } - return rollingHash; } @@ -229,27 +328,26 @@ static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state, * Sets cctx->nextToUpdate to a position corresponding closer to anchor * if it is far way * (after a long match, only update tables a limited amount). */ -static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) +static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, const BYTE* anchor) { - U32 const current = (U32)(anchor - ms->window.base); - if (current > ms->nextToUpdate + 1024) { + U32 const curr = (U32)(anchor - ms->window.base); + if (curr > ms->nextToUpdate + 1024) { ms->nextToUpdate = - current - MIN(512, current - ms->nextToUpdate - 1024); + curr - MIN(512, curr - ms->nextToUpdate - 1024); } } -static size_t ZSTD_ldm_generateSequences_internal( - ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_ldm_generateSequences_internal( + ldmState_t* ldmState, RawSeqStore_t* rawSeqStore, ldmParams_t const* params, void const* src, size_t srcSize) { /* LDM parameters */ int const extDict = ZSTD_window_hasExtDict(ldmState->window); U32 const minMatchLength = params->minMatchLength; - U64 const hashPower = ldmState->hashPower; + U32 const entsPerBucket = 1U << params->bucketSizeLog; U32 const hBits = params->hashLog - params->bucketSizeLog; - U32 const ldmBucketSize = 1U << params->bucketSizeLog; - U32 const hashRateLog = params->hashRateLog; - U32 const ldmTagMask = (1U << params->hashRateLog) - 1; /* Prefix and extDict parameters */ U32 const dictLimit = ldmState->window.dictLimit; U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit; @@ -261,45 +359,69 @@ static size_t ZSTD_ldm_generateSequences_internal( /* Input bounds */ BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; - BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE); + BYTE const* const ilimit = iend - HASH_READ_SIZE; /* Input positions */ BYTE const* anchor = istart; BYTE const* ip = istart; - /* Rolling hash */ - BYTE const* lastHashed = NULL; - U64 rollingHash = 0; - - while (ip <= ilimit) { - size_t mLength; - U32 const current = (U32)(ip - base); - size_t forwardMatchLength = 0, backwardMatchLength = 0; - ldmEntry_t* bestEntry = NULL; - if (ip != istart) { - rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0], - lastHashed[minMatchLength], - hashPower); - } else { - rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength); + /* Rolling hash state */ + ldmRollingHashState_t hashState; + /* Arrays for staged-processing */ + size_t* const splits = ldmState->splitIndices; + ldmMatchCandidate_t* const candidates = ldmState->matchCandidates; + unsigned numSplits; + + if (srcSize < minMatchLength) + return iend - anchor; + + /* Initialize the rolling hash state with the first minMatchLength bytes */ + ZSTD_ldm_gear_init(&hashState, params); + ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength); + ip += minMatchLength; + + while (ip < ilimit) { + size_t hashed; + unsigned n; + + numSplits = 0; + hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip, + splits, &numSplits); + + for (n = 0; n < numSplits; n++) { + BYTE const* const split = ip + splits[n] - minMatchLength; + U64 const xxhash = XXH64(split, minMatchLength, 0); + U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1)); + + candidates[n].split = split; + candidates[n].hash = hash; + candidates[n].checksum = (U32)(xxhash >> 32); + candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, params->bucketSizeLog); + PREFETCH_L1(candidates[n].bucket); } - lastHashed = ip; - /* Do not insert and do not look for a match */ - if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) { - ip++; - continue; - } + for (n = 0; n < numSplits; n++) { + size_t forwardMatchLength = 0, backwardMatchLength = 0, + bestMatchLength = 0, mLength; + U32 offset; + BYTE const* const split = candidates[n].split; + U32 const checksum = candidates[n].checksum; + U32 const hash = candidates[n].hash; + ldmEntry_t* const bucket = candidates[n].bucket; + ldmEntry_t const* cur; + ldmEntry_t const* bestEntry = NULL; + ldmEntry_t newEntry; + + newEntry.offset = (U32)(split - base); + newEntry.checksum = checksum; + + /* If a split point would generate a sequence overlapping with + * the previous one, we merely register it in the hash table and + * move on */ + if (split < anchor) { + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); + continue; + } - /* Get the best entry and compute the match lengths */ - { - ldmEntry_t* const bucket = - ZSTD_ldm_getBucket(ldmState, - ZSTD_ldm_getSmallHash(rollingHash, hBits), - *params); - ldmEntry_t* cur; - size_t bestMatchLength = 0; - U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); - - for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) { + for (cur = bucket; cur < bucket + entsPerBucket; cur++) { size_t curForwardMatchLength, curBackwardMatchLength, curTotalMatchLength; if (cur->checksum != checksum || cur->offset <= lowestIndex) { @@ -313,30 +435,23 @@ static size_t ZSTD_ldm_generateSequences_internal( cur->offset < dictLimit ? dictEnd : iend; BYTE const* const lowMatchPtr = cur->offset < dictLimit ? dictStart : lowPrefixPtr; - - curForwardMatchLength = ZSTD_count_2segments( - ip, pMatch, iend, - matchEnd, lowPrefixPtr); + curForwardMatchLength = + ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr); if (curForwardMatchLength < minMatchLength) { continue; } - curBackwardMatchLength = - ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch, - lowMatchPtr); - curTotalMatchLength = curForwardMatchLength + - curBackwardMatchLength; + curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments( + split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd); } else { /* !extDict */ BYTE const* const pMatch = base + cur->offset; - curForwardMatchLength = ZSTD_count(ip, pMatch, iend); + curForwardMatchLength = ZSTD_count(split, pMatch, iend); if (curForwardMatchLength < minMatchLength) { continue; } curBackwardMatchLength = - ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch, - lowPrefixPtr); - curTotalMatchLength = curForwardMatchLength + - curBackwardMatchLength; + ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr); } + curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; if (curTotalMatchLength > bestMatchLength) { bestMatchLength = curTotalMatchLength; @@ -345,57 +460,54 @@ static size_t ZSTD_ldm_generateSequences_internal( bestEntry = cur; } } - } - /* No match found -- continue searching */ - if (bestEntry == NULL) { - ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, - hBits, current, - *params); - ip++; - continue; - } - - /* Match found */ - mLength = forwardMatchLength + backwardMatchLength; - ip -= backwardMatchLength; + /* No match found -- insert an entry into the hash table + * and process the next candidate match */ + if (bestEntry == NULL) { + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); + continue; + } - { - /* Store the sequence: - * ip = current - backwardMatchLength - * The match is at (bestEntry->offset - backwardMatchLength) - */ - U32 const matchIndex = bestEntry->offset; - U32 const offset = current - matchIndex; - rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size; - - /* Out of sequence storage */ - if (rawSeqStore->size == rawSeqStore->capacity) - return ERROR(dstSize_tooSmall); - seq->litLength = (U32)(ip - anchor); - seq->matchLength = (U32)mLength; - seq->offset = offset; - rawSeqStore->size++; - } + /* Match found */ + offset = (U32)(split - base) - bestEntry->offset; + mLength = forwardMatchLength + backwardMatchLength; + { + rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size; + + /* Out of sequence storage */ + if (rawSeqStore->size == rawSeqStore->capacity) + return ERROR(dstSize_tooSmall); + seq->litLength = (U32)(split - backwardMatchLength - anchor); + seq->matchLength = (U32)mLength; + seq->offset = offset; + rawSeqStore->size++; + } - /* Insert the current entry into the hash table */ - ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits, - (U32)(lastHashed - base), - *params); + /* Insert the current entry into the hash table --- it must be + * done after the previous block to avoid clobbering bestEntry */ + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); - assert(ip + backwardMatchLength == lastHashed); + anchor = split + forwardMatchLength; - /* Fill the hash table from lastHashed+1 to ip+mLength*/ - /* Heuristic: don't need to fill the entire table at end of block */ - if (ip + mLength <= ilimit) { - rollingHash = ZSTD_ldm_fillLdmHashTable( - ldmState, rollingHash, lastHashed, - ip + mLength, base, hBits, *params); - lastHashed = ip + mLength - 1; + /* If we find a match that ends after the data that we've hashed + * then we have a repeating, overlapping, pattern. E.g. all zeros. + * If one repetition of the pattern matches our `stopMask` then all + * repetitions will. We don't need to insert them all into out table, + * only the first one. So skip over overlapping matches. + * This is a major speed boost (20x) for compressing a single byte + * repeated, when that byte ends up in the table. + */ + if (anchor > ip + hashed) { + ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength); + /* Continue the outer loop at anchor (ip + hashed == anchor). */ + ip = anchor - hashed; + break; + } } - ip += mLength; - anchor = ip; + + ip += hashed; } + return iend - anchor; } @@ -412,7 +524,7 @@ static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size, } size_t ZSTD_ldm_generateSequences( - ldmState_t* ldmState, rawSeqStore_t* sequences, + ldmState_t* ldmState, RawSeqStore_t* sequences, ldmParams_t const* params, void const* src, size_t srcSize) { U32 const maxDist = 1U << params->windowLog; @@ -444,11 +556,13 @@ size_t ZSTD_ldm_generateSequences( assert(chunkStart < iend); /* 1. Perform overflow correction if necessary. */ - if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) { + if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) { U32 const ldmHSize = 1U << params->hashLog; U32 const correction = ZSTD_window_correctOverflow( - &ldmState->window, /* cycleLog */ 0, maxDist, src); + &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart); ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); + /* invalidate dictionaries on overflow correction */ + ldmState->loadedDictEnd = 0; } /* 2. We enforce the maximum offset allowed. * @@ -456,9 +570,15 @@ size_t ZSTD_ldm_generateSequences( * the window through early invalidation. * TODO: * Test the chunk size. * * Try invalidation after the sequence generation and test the - * the offset against maxDist directly. + * offset against maxDist directly. + * + * NOTE: Because of dictionaries + sequence splitting we MUST make sure + * that any offset used is valid at the END of the sequence, since it may + * be split into two sequences. This condition holds when using + * ZSTD_window_enforceMaxDist(), but if we move to checking offsets + * against maxDist directly, we'll have to carefully handle that case. */ - ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL, NULL); + ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL); /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */ newLeftoverSize = ZSTD_ldm_generateSequences_internal( ldmState, sequences, params, chunkStart, chunkSize); @@ -480,7 +600,9 @@ size_t ZSTD_ldm_generateSequences( return 0; } -void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) { +void +ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) +{ while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; if (srcSize <= seq->litLength) { @@ -515,7 +637,7 @@ void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 cons * Returns the current sequence to handle, or if the rest of the block should * be literals, it returns a sequence with offset == 0. */ -static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, +static rawSeq maybeSplitSequence(RawSeqStore_t* rawSeqStore, U32 const remaining, U32 const minMatch) { rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; @@ -539,14 +661,32 @@ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, return sequence; } -size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes) { + U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); + while (currPos && rawSeqStore->pos < rawSeqStore->size) { + rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; + if (currPos >= currSeq.litLength + currSeq.matchLength) { + currPos -= currSeq.litLength + currSeq.matchLength; + rawSeqStore->pos++; + } else { + rawSeqStore->posInSequence = currPos; + break; + } + } + if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) { + rawSeqStore->posInSequence = 0; + } +} + +size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore, + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_ParamSwitch_e useRowMatchFinder, void const* src, size_t srcSize) { const ZSTD_compressionParameters* const cParams = &ms->cParams; unsigned const minMatch = cParams->minMatch; - ZSTD_blockCompressor const blockCompressor = - ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms)); + ZSTD_BlockCompressor_f const blockCompressor = + ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms)); /* Input bounds */ BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; @@ -554,27 +694,35 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, BYTE const* ip = istart; DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize); + /* If using opt parser, use LDMs only as candidates rather than always accepting them */ + if (cParams->strategy >= ZSTD_btopt) { + size_t lastLLSize; + ms->ldmSeqStore = rawSeqStore; + lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize); + ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize); + return lastLLSize; + } + assert(rawSeqStore->pos <= rawSeqStore->size); assert(rawSeqStore->size <= rawSeqStore->capacity); - /* Loop through each sequence and apply the block compressor to the lits */ + /* Loop through each sequence and apply the block compressor to the literals */ while (rawSeqStore->pos < rawSeqStore->size && ip < iend) { /* maybeSplitSequence updates rawSeqStore->pos */ rawSeq const sequence = maybeSplitSequence(rawSeqStore, (U32)(iend - ip), minMatch); - int i; /* End signal */ if (sequence.offset == 0) break; - assert(sequence.offset <= (1U << cParams->windowLog)); assert(ip + sequence.litLength + sequence.matchLength <= iend); /* Fill tables for block compressor */ ZSTD_ldm_limitTableUpdate(ms, ip); ZSTD_ldm_fillFastTables(ms, ip); /* Run the block compressor */ - DEBUGLOG(5, "calling block compressor on segment of size %u", sequence.litLength); + DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength); { + int i; size_t const newLitLength = blockCompressor(ms, seqStore, rep, ip, sequence.litLength); ip += sequence.litLength; @@ -583,9 +731,9 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, rep[i] = rep[i-1]; rep[0] = sequence.offset; /* Store the sequence */ - ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, - sequence.offset + ZSTD_REP_MOVE, - sequence.matchLength - MINMATCH); + ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, + OFFSET_TO_OFFBASE(sequence.offset), + sequence.matchLength); ip += sequence.matchLength; } } diff --git a/lib/compress/zstd_ldm.h b/lib/compress/zstd_ldm.h index a47846128b2..42736231aa8 100644 --- a/lib/compress/zstd_ldm.h +++ b/lib/compress/zstd_ldm.h @@ -1,21 +1,18 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_LDM_H #define ZSTD_LDM_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "zstd_compress_internal.h" /* ldmParams_t, U32 */ -#include "zstd.h" /* ZSTD_CCtx, size_t */ +#include "../zstd.h" /* ZSTD_CCtx, size_t */ /*-************************************* * Long distance matching @@ -23,6 +20,10 @@ extern "C" { #define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT +void ZSTD_ldm_fillHashTable( + ldmState_t* state, const BYTE* ip, + const BYTE* iend, ldmParams_t const* params); + /** * ZSTD_ldm_generateSequences(): * @@ -38,7 +39,7 @@ extern "C" { * sequences. */ size_t ZSTD_ldm_generateSequences( - ldmState_t* ldms, rawSeqStore_t* sequences, + ldmState_t* ldms, RawSeqStore_t* sequences, ldmParams_t const* params, void const* src, size_t srcSize); /** @@ -59,8 +60,9 @@ size_t ZSTD_ldm_generateSequences( * two. We handle that case correctly, and update `rawSeqStore` appropriately. * NOTE: This function does not return any errors. */ -size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore, + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_ParamSwitch_e useRowMatchFinder, void const* src, size_t srcSize); /** @@ -68,11 +70,17 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, * * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`. * Avoids emitting matches less than `minMatch` bytes. - * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). + * Must be called for data that is not passed to ZSTD_ldm_blockCompress(). */ -void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, +void ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch); +/* ZSTD_ldm_skipRawSeqStoreBytes(): + * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'. + * Not to be used in conjunction with ZSTD_ldm_skipSequences(). + * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). + */ +void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes); /** ZSTD_ldm_getTableSize() : * Estimate the space needed for long distance matching tables or 0 if LDM is @@ -98,8 +106,4 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize); void ZSTD_ldm_adjustParameters(ldmParams_t* params, ZSTD_compressionParameters const* cParams); -#if defined (__cplusplus) -} -#endif - #endif /* ZSTD_FAST_H */ diff --git a/lib/compress/zstd_ldm_geartab.h b/lib/compress/zstd_ldm_geartab.h new file mode 100644 index 00000000000..ef34bc5c923 --- /dev/null +++ b/lib/compress/zstd_ldm_geartab.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_LDM_GEARTAB_H +#define ZSTD_LDM_GEARTAB_H + +#include "../common/compiler.h" /* UNUSED_ATTR */ +#include "../common/mem.h" /* U64 */ + +static UNUSED_ATTR const U64 ZSTD_ldm_gearTab[256] = { + 0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc, + 0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05, + 0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e, + 0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889, + 0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e, + 0x37b628620b628, 0x49a8d455d88caf5, 0x8556d711e6958140, + 0x4f7ae74fc605c1f, 0x829f0c3468bd3a20, 0x4ffdc885c625179e, + 0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f, + 0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391, + 0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210, + 0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be, + 0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a, + 0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b, + 0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4, + 0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb, + 0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312, + 0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01, + 0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc, + 0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967, + 0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553, + 0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f, + 0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2, + 0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d, + 0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a, + 0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74, + 0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3, + 0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1, + 0xff452823dbb010a, 0x9d42ed614f3dd267, 0x5b9313c06257c57b, + 0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568, + 0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a, + 0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1, + 0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9, + 0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463, + 0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba, + 0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9, + 0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61, + 0x24a5483879c453e3, 0x88026889192b4b9, 0x28da96671782dbec, + 0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6, + 0xbc135a0a704b70ba, 0x69cd868f7622ada, 0xbc37ba89e0b9c0ab, + 0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5, + 0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59, + 0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7, + 0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc, + 0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb, + 0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be, + 0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312, + 0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1, + 0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc, + 0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d, + 0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445, + 0x820d471e20b348e, 0x1874383cb83d46dc, 0x97edeec7a1efe11c, + 0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5, + 0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5, + 0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28, + 0xaf846af6ab7d0bf4, 0xe5af208eb666e49, 0x5e6622f73534cd6a, + 0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9, + 0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15, + 0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef, + 0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2, + 0x9f90e4c5fd508d8, 0xa34e5956fbaf3385, 0x2e2f8e151d3ef375, + 0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3, + 0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595, + 0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389, + 0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4, + 0x4228e364c5b5ed7, 0x9d7a3edf0da43911, 0x8edcfeda24686756, + 0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc, + 0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45, + 0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea, + 0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f, + 0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc, + 0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c, + 0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a, + 0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17, + 0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3, + 0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4, + 0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91, + 0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40, + 0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741, + 0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f, + 0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4, + 0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad, + 0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047, + 0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2, + 0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e, + 0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b, + 0x2b4da14f2613d8f4 +}; + +#endif /* ZSTD_LDM_GEARTAB_H */ diff --git a/lib/compress/zstd_opt.c b/lib/compress/zstd_opt.c index 39858a82845..562a6b17bc9 100644 --- a/lib/compress/zstd_opt.c +++ b/lib/compress/zstd_opt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,42 +12,52 @@ #include "hist.h" #include "zstd_opt.h" +#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ -#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */ #define ZSTD_MAX_PRICE (1<<30) -#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ +#define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ /*-************************************* * Price functions for optimal parser ***************************************/ -#if 0 /* approximation at bit level */ +#if 0 /* approximation at bit level (for tests) */ # define BITCOST_ACCURACY 0 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat)) -#elif 0 /* fractional bit accuracy */ +# define WEIGHT(stat, opt) ((void)(opt), ZSTD_bitWeight(stat)) +#elif 0 /* fractional bit accuracy (for tests) */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) +# define WEIGHT(stat,opt) ((void)(opt), ZSTD_fracWeight(stat)) #else /* opt==approx, ultra==accurate */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) +# define WEIGHT(stat,opt) ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) #endif +/* ZSTD_bitWeight() : + * provide estimated "cost" of a stat in full bits only */ MEM_STATIC U32 ZSTD_bitWeight(U32 stat) { return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER); } +/* ZSTD_fracWeight() : + * provide fractional-bit "cost" of a stat, + * using linear interpolation approximation */ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) { U32 const stat = rawStat + 1; U32 const hb = ZSTD_highbit32(stat); U32 const BWeight = hb * BITCOST_MULTIPLIER; + /* Fweight was meant for "Fractional weight" + * but it's effectively a value between 1 and 2 + * using fixed point arithmetic */ U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb; U32 const weight = BWeight + FWeight; assert(hb + BITCOST_ACCURACY < 31); @@ -58,7 +68,7 @@ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) /* debugging function, * @return price in bytes as fractional value * for debug messages only */ -MEM_STATIC double ZSTD_fCost(U32 price) +MEM_STATIC double ZSTD_fCost(int price) { return (double)price / (BITCOST_MULTIPLIER*8); } @@ -66,7 +76,7 @@ MEM_STATIC double ZSTD_fCost(U32 price) static int ZSTD_compressedLiterals(optState_t const* const optPtr) { - return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed; + return optPtr->literalCompressionMode != ZSTD_ps_disable; } static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) @@ -79,25 +89,52 @@ static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) } -/* ZSTD_downscaleStat() : - * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus) - * return the resulting sum of elements */ -static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus) +static U32 sum_u32(const unsigned table[], size_t nbElts) +{ + size_t n; + U32 total = 0; + for (n=0; n 0 && ZSTD_FREQ_DIV+malus < 31); + DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", + (unsigned)lastEltIndex+1, (unsigned)shift ); + assert(shift < 30); for (s=0; s> (ZSTD_FREQ_DIV+malus)); - sum += table[s]; + unsigned const base = base1 ? 1 : (table[s]>0); + unsigned const newStat = base + (table[s] >> shift); + sum += newStat; + table[s] = newStat; } return sum; } +/* ZSTD_scaleStats() : + * reduce all elt frequencies in table if sum too large + * return the resulting sum of elements */ +static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget) +{ + U32 const prevsum = sum_u32(table, lastEltIndex+1); + U32 const factor = prevsum >> logTarget; + DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget); + assert(logTarget < 30); + if (factor <= 1) return prevsum; + return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_1guaranteed); +} + /* ZSTD_rescaleFreqs() : * if first block (detected by optPtr->litLengthSum == 0) : init statistics * take hints from dictionary if there is one - * or init from zero, using src for literals stats, or flat 1 for match symbols + * and init from zero if there is none, + * using src for literals stats, and baseline stats for sequence symbols * otherwise downscale existing stats, to be used as seed for next block. */ static void @@ -109,24 +146,28 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize); optPtr->priceType = zop_dynamic; - if (optPtr->litLengthSum == 0) { /* first block : init */ - if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */ - DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef"); + if (optPtr->litLengthSum == 0) { /* no literals stats collected -> first block assumed -> init */ + + /* heuristic: use pre-defined stats for too small inputs */ + if (srcSize <= ZSTD_PREDEF_THRESHOLD) { + DEBUGLOG(5, "srcSize <= %i : use predefined stats", ZSTD_PREDEF_THRESHOLD); optPtr->priceType = zop_predef; } assert(optPtr->symbolCosts != NULL); if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { - /* huffman table presumed generated by dictionary */ + + /* huffman stats covering the full value set : table presumed generated by dictionary */ optPtr->priceType = zop_dynamic; if (compressedLiterals) { + /* generate literals statistics from huffman table */ unsigned lit; assert(optPtr->litFreq != NULL); optPtr->litSum = 0; for (lit=0; lit<=MaxLit; lit++) { U32 const scaleLog = 11; /* scale to 2K */ - U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit); + U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit); assert(bitCost <= scaleLog); optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->litSum += optPtr->litFreq[lit]; @@ -168,20 +209,26 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, optPtr->offCodeSum += optPtr->offCodeFreq[of]; } } - } else { /* not a dictionary */ + } else { /* first block, no dictionary */ assert(optPtr->litFreq != NULL); if (compressedLiterals) { + /* base initial cost of literals on direct frequency within src */ unsigned lit = MaxLit; HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ - optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); + optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8, base_0possible); } - { unsigned ll; - for (ll=0; ll<=MaxLL; ll++) - optPtr->litLengthFreq[ll] = 1; + { unsigned const baseLLfreqs[MaxLL+1] = { + 4, 2, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1 + }; + ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs)); + optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1); } - optPtr->litLengthSum = MaxLL+1; { unsigned ml; for (ml=0; ml<=MaxML; ml++) @@ -189,21 +236,25 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, } optPtr->matchLengthSum = MaxML+1; - { unsigned of; - for (of=0; of<=MaxOff; of++) - optPtr->offCodeFreq[of] = 1; + { unsigned const baseOFCfreqs[MaxOff+1] = { + 6, 2, 1, 1, 2, 3, 4, 4, + 4, 3, 2, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1 + }; + ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs)); + optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1); } - optPtr->offCodeSum = MaxOff+1; } - } else { /* new block : re-use previous statistics, scaled down */ + } else { /* new block : scale down accumulated statistics */ if (compressedLiterals) - optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); - optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0); - optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0); - optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0); + optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12); + optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11); + optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11); + optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11); } ZSTD_setBasePrices(optPtr, optLevel); @@ -216,6 +267,7 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, const optState_t* const optPtr, int optLevel) { + DEBUGLOG(8, "ZSTD_rawLiteralsCost (%u literals)", litLength); if (litLength == 0) return 0; if (!ZSTD_compressedLiterals(optPtr)) @@ -225,11 +277,14 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */ /* dynamic statistics */ - { U32 price = litLength * optPtr->litSumBasePrice; + { U32 price = optPtr->litSumBasePrice * litLength; + U32 const litPriceMax = optPtr->litSumBasePrice - BITCOST_MULTIPLIER; U32 u; + assert(optPtr->litSumBasePrice >= BITCOST_MULTIPLIER); for (u=0; u < litLength; u++) { - assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */ - price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel); + U32 litPrice = WEIGHT(optPtr->litFreq[literals[u]], optLevel); + if (UNLIKELY(litPrice > litPriceMax)) litPrice = litPriceMax; + price -= litPrice; } return price; } @@ -239,7 +294,17 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, * cost of literalLength symbol */ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel) { - if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel); + assert(litLength <= ZSTD_BLOCKSIZE_MAX); + if (optPtr->priceType == zop_predef) + return WEIGHT(litLength, optLevel); + + /* ZSTD_LLcode() can't compute litLength price for sizes >= ZSTD_BLOCKSIZE_MAX + * because it isn't representable in the zstd format. + * So instead just pretend it would cost 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. + * In such a case, the block would be all literals. + */ + if (litLength == ZSTD_BLOCKSIZE_MAX) + return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel); /* dynamic statistics */ { U32 const llCode = ZSTD_LLcode(litLength); @@ -249,57 +314,26 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP } } -/* ZSTD_litLengthContribution() : - * @return ( cost(litlength) - cost(0) ) - * this value can then be added to rawLiteralsCost() - * to provide a cost which is directly comparable to a match ending at same position */ -static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel) -{ - if (optPtr->priceType >= zop_predef) return (int)WEIGHT(litLength, optLevel); - - /* dynamic statistics */ - { U32 const llCode = ZSTD_LLcode(litLength); - int const contribution = (int)(LL_bits[llCode] * BITCOST_MULTIPLIER) - + (int)WEIGHT(optPtr->litLengthFreq[0], optLevel) /* note: log2litLengthSum cancel out */ - - (int)WEIGHT(optPtr->litLengthFreq[llCode], optLevel); -#if 1 - return contribution; -#else - return MAX(0, contribution); /* sometimes better, sometimes not ... */ -#endif - } -} - -/* ZSTD_literalsContribution() : - * creates a fake cost for the literals part of a sequence - * which can be compared to the ending cost of a match - * should a new match start at this position */ -static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength, - const optState_t* const optPtr, - int optLevel) -{ - int const contribution = (int)ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel) - + ZSTD_litLengthContribution(litLength, optPtr, optLevel); - return contribution; -} - /* ZSTD_getMatchPrice() : - * Provides the cost of the match part (offset + matchLength) of a sequence + * Provides the cost of the match part (offset + matchLength) of a sequence. * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. - * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ + * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq() + * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) + */ FORCE_INLINE_TEMPLATE U32 -ZSTD_getMatchPrice(U32 const offset, +ZSTD_getMatchPrice(U32 const offBase, U32 const matchLength, const optState_t* const optPtr, int const optLevel) { U32 price; - U32 const offCode = ZSTD_highbit32(offset+1); + U32 const offCode = ZSTD_highbit32(offBase); U32 const mlBase = matchLength - MINMATCH; assert(matchLength >= MINMATCH); - if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */ - return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER); + if (optPtr->priceType == zop_predef) /* fixed scheme, does not use statistics */ + return WEIGHT(mlBase, optLevel) + + ((16 + offCode) * BITCOST_MULTIPLIER); /* emulated offset cost */ /* dynamic statistics */ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel)); @@ -318,10 +352,10 @@ ZSTD_getMatchPrice(U32 const offset, } /* ZSTD_updateStats() : - * assumption : literals + litLengtn <= iend */ + * assumption : literals + litLength <= iend */ static void ZSTD_updateStats(optState_t* const optPtr, U32 litLength, const BYTE* literals, - U32 offsetCode, U32 matchLength) + U32 offBase, U32 matchLength) { /* literals */ if (ZSTD_compressedLiterals(optPtr)) { @@ -337,8 +371,8 @@ static void ZSTD_updateStats(optState_t* const optPtr, optPtr->litLengthSum++; } - /* match offset code (0-2=>repCode; 3+=>offset+2) */ - { U32 const offCode = ZSTD_highbit32(offsetCode+1); + /* offset code : follows storeSeq() numeric representation */ + { U32 const offCode = ZSTD_highbit32(offBase); assert(offCode <= MaxOff); optPtr->offCodeFreq[offCode]++; optPtr->offCodeSum++; @@ -372,9 +406,11 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) /* Update hashTable3 up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ -static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* const ip) +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_MatchState_t* ms, + U32* nextToUpdate3, + const BYTE* const ip) { U32* const hashTable3 = ms->hashTable3; U32 const hashLog3 = ms->hashLog3; @@ -398,11 +434,15 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, * Binary Tree search ***************************************/ /** ZSTD_insertBt1() : add one or multiple positions to tree. - * ip : assumed <= iend-8 . + * @param ip assumed <= iend-8 . + * @param target The target of ZSTD_updateTree_internal() - we are filling to this position * @return : nb of positions added */ -static U32 ZSTD_insertBt1( - ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_insertBt1( + const ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, + U32 const target, U32 const mls, const int extDict) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -420,32 +460,36 @@ static U32 ZSTD_insertBt1( const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* match; - const U32 current = (U32)(ip-base); - const U32 btLow = btMask >= current ? 0 : current - btMask; - U32* smallerPtr = bt + 2*(current&btMask); + const U32 curr = (U32)(ip-base); + const U32 btLow = btMask >= curr ? 0 : curr - btMask; + U32* smallerPtr = bt + 2*(curr&btMask); U32* largerPtr = smallerPtr + 1; U32 dummy32; /* to be nullified at the end */ - U32 const windowLow = ms->window.lowLimit; - U32 matchEndIdx = current+8+1; + /* windowLow is based on target because + * we only need positions that will be in the window at the end of the tree update. + */ + U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog); + U32 matchEndIdx = curr+8+1; size_t bestLength = 8; U32 nbCompares = 1U << cParams->searchLog; #ifdef ZSTD_C_PREDICT - U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); - U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); + U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0); + U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1); predictedSmall += (predictedSmall>0); predictedLarge += (predictedLarge>0); #endif /* ZSTD_C_PREDICT */ - DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current); + DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr); + assert(curr <= target); assert(ip <= iend-8); /* required for h calculation */ - hashTable[h] = current; /* Update Hash Table */ + hashTable[h] = curr; /* Update Hash Table */ assert(windowLow > 0); - while (nbCompares-- && (matchIndex >= windowLow)) { + for (; nbCompares && (matchIndex >= windowLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - assert(matchIndex < current); + assert(matchIndex < curr); #ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ @@ -508,48 +552,56 @@ static U32 ZSTD_insertBt1( *smallerPtr = *largerPtr = 0; { U32 positions = 0; if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */ - assert(matchEndIdx > current + 8); - return MAX(positions, matchEndIdx - (current + 8)); + assert(matchEndIdx > curr + 8); + return MAX(positions, matchEndIdx - (curr + 8)); } } FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR void ZSTD_updateTree_internal( - ZSTD_matchState_t* ms, + ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, const U32 mls, const ZSTD_dictMode_e dictMode) { const BYTE* const base = ms->window.base; U32 const target = (U32)(ip - base); U32 idx = ms->nextToUpdate; - DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)", + DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)", idx, target, dictMode); - while(idx < target) - idx += ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict); + while(idx < target) { + U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict); + assert(idx < (U32)(idx + forward)); + idx += forward; + } + assert((size_t)(ip - base) <= (size_t)(U32)(-1)); + assert((size_t)(iend - base) <= (size_t)(U32)(-1)); ms->nextToUpdate = target; } -void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { +void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend) { ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); } FORCE_INLINE_TEMPLATE -U32 ZSTD_insertBtAndGetAllMatches ( - ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ - ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode, - const U32 rep[ZSTD_REP_NUM], - U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ - const U32 lengthToBeat, - U32 const mls /* template */) +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 +ZSTD_insertBtAndGetAllMatches ( + ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ + ZSTD_MatchState_t* ms, + U32* nextToUpdate3, + const BYTE* const ip, const BYTE* const iLimit, + const ZSTD_dictMode_e dictMode, + const U32 rep[ZSTD_REP_NUM], + const U32 ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ + const U32 lengthToBeat, + const U32 mls /* template */) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); - U32 const maxDistance = 1U << cParams->windowLog; const BYTE* const base = ms->window.base; - U32 const current = (U32)(ip-base); + U32 const curr = (U32)(ip-base); U32 const hashLog = cParams->hashLog; U32 const minMatch = (mls==3) ? 3 : 4; U32* const hashTable = ms->hashTable; @@ -563,18 +615,17 @@ U32 ZSTD_insertBtAndGetAllMatches ( U32 const dictLimit = ms->window.dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; - U32 const btLow = (btMask >= current) ? 0 : current - btMask; - U32 const windowValid = ms->window.lowLimit; - U32 const windowLow = ((current - windowValid) > maxDistance) ? current - maxDistance : windowValid; + U32 const btLow = (btMask >= curr) ? 0 : curr - btMask; + U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); U32 const matchLow = windowLow ? windowLow : 1; - U32* smallerPtr = bt + 2*(current&btMask); - U32* largerPtr = bt + 2*(current&btMask) + 1; - U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */ + U32* smallerPtr = bt + 2*(curr&btMask); + U32* largerPtr = bt + 2*(curr&btMask) + 1; + U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */ U32 dummy32; /* to be nullified at the end */ U32 mnum = 0; U32 nbCompares = 1U << cParams->searchLog; - const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; + const ZSTD_MatchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; const ZSTD_compressionParameters* const dmsCParams = dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; @@ -588,7 +639,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit; size_t bestLength = lengthToBeat-1; - DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current); + DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr); /* check repCode */ assert(ll0 <= 1); /* necessarily 1 or 0 */ @@ -596,27 +647,30 @@ U32 ZSTD_insertBtAndGetAllMatches ( U32 repCode; for (repCode = ll0; repCode < lastR; repCode++) { U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; - U32 const repIndex = current - repOffset; + U32 const repIndex = curr - repOffset; U32 repLen = 0; - assert(current >= dictLimit); - if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) { /* equivalent to `current > repIndex >= dictLimit` */ - if (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) { + assert(curr >= dictLimit); + if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */ + /* We must validate the repcode offset because when we're using a dictionary the + * valid offset range shrinks when the dictionary goes out of bounds. + */ + if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) { repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch; } - } else { /* repIndex < dictLimit || repIndex >= current */ + } else { /* repIndex < dictLimit || repIndex >= curr */ const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ? dmsBase + repIndex - dmsIndexDelta : dictBase + repIndex; - assert(current >= windowLow); + assert(curr >= windowLow); if ( dictMode == ZSTD_extDict - && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow) /* equivalent to `current > repIndex >= windowLow` */ - & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */) + && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */ + & (ZSTD_index_overlap_check(dictLimit, repIndex)) ) && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch; } if (dictMode == ZSTD_dictMatchState - && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `current > repIndex >= dmsLowLimit` */ - & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */ + && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */ + & (ZSTD_index_overlap_check(dictLimit, repIndex)) ) && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch; } } @@ -625,7 +679,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", repCode, ll0, repOffset, repLen); bestLength = repLen; - matches[mnum].off = repCode - ll0; + matches[mnum].off = REPCODE_TO_OFFBASE(repCode - ll0 + 1); /* expect value between 1 and 3 */ matches[mnum].len = (U32)repLen; mnum++; if ( (repLen > sufficient_len) @@ -637,7 +691,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( if ((mls == 3) /*static*/ && (bestLength < mls)) { U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip); if ((matchIndex3 >= matchLow) - & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) { + & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) { size_t mlen; if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) { const BYTE* const match = base + matchIndex3; @@ -652,54 +706,55 @@ U32 ZSTD_insertBtAndGetAllMatches ( DEBUGLOG(8, "found small match with hlog3, of length %u", (U32)mlen); bestLength = mlen; - assert(current > matchIndex3); + assert(curr > matchIndex3); assert(mnum==0); /* no prior solution */ - matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE; + matches[0].off = OFFSET_TO_OFFBASE(curr - matchIndex3); matches[0].len = (U32)mlen; mnum = 1; if ( (mlen > sufficient_len) | (ip+mlen == iLimit) ) { /* best possible length */ - ms->nextToUpdate = current+1; /* skip insertion */ + ms->nextToUpdate = curr+1; /* skip insertion */ return 1; } } } /* no dictMatchState lookup: dicts don't have a populated HC3 table */ - } + } /* if (mls == 3) */ - hashTable[h] = current; /* Update Hash Table */ + hashTable[h] = curr; /* Update Hash Table */ - while (nbCompares-- && (matchIndex >= matchLow)) { + for (; nbCompares && (matchIndex >= matchLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match; - assert(current > matchIndex); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + assert(curr > matchIndex); if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) { assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */ match = base + matchIndex; + if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit); } else { match = dictBase + matchIndex; + assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); if (matchIndex+matchLength >= dictLimit) - match = base + matchIndex; /* prepare for match[matchLength] */ + match = base + matchIndex; /* prepare for match[matchLength] read */ } if (matchLength > bestLength) { - DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)", - (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE); + DEBUGLOG(8, "found match of length %u at distance %u (offBase=%u)", + (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex)); assert(matchEndIdx > matchIndex); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; - matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE; + matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex); matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */ break; /* drop, to preserve bt consistency (miss a little bit of compression) */ - } - } + } } if (match[matchLength] < ip[matchLength]) { /* match smaller than current */ @@ -718,12 +773,13 @@ U32 ZSTD_insertBtAndGetAllMatches ( *smallerPtr = *largerPtr = 0; + assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ if (dictMode == ZSTD_dictMatchState && nbCompares) { size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); U32 dictMatchIndex = dms->hashTable[dmsH]; const U32* const dmsBt = dms->chainTable; commonLengthSmaller = commonLengthLarger = 0; - while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) { + for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) { const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match = dmsBase + dictMatchIndex; @@ -733,19 +789,18 @@ U32 ZSTD_insertBtAndGetAllMatches ( if (matchLength > bestLength) { matchIndex = dictMatchIndex + dmsIndexDelta; - DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)", - (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE); + DEBUGLOG(8, "found dms match of length %u at distance %u (offBase=%u)", + (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex)); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; - matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE; + matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex); matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { break; /* drop, to guarantee consistency (miss a little bit of compression) */ - } - } + } } if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */ if (match[matchLength] < ip[matchLength]) { @@ -755,76 +810,246 @@ U32 ZSTD_insertBtAndGetAllMatches ( /* match is larger than current */ commonLengthLarger = matchLength; dictMatchIndex = nextPtr[0]; - } - } - } + } } } /* if (dictMode == ZSTD_dictMatchState) */ - assert(matchEndIdx > current+8); + assert(matchEndIdx > curr+8); ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ return mnum; } +typedef U32 (*ZSTD_getAllMatchesFn)( + ZSTD_match_t*, + ZSTD_MatchState_t*, + U32*, + const BYTE*, + const BYTE*, + const U32 rep[ZSTD_REP_NUM], + U32 const ll0, + U32 const lengthToBeat); -FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches ( - ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */ - ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode, - const U32 rep[ZSTD_REP_NUM], - U32 const ll0, - U32 const lengthToBeat) +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_btGetAllMatches_internal( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + U32* nextToUpdate3, + const BYTE* ip, + const BYTE* const iHighLimit, + const U32 rep[ZSTD_REP_NUM], + U32 const ll0, + U32 const lengthToBeat, + const ZSTD_dictMode_e dictMode, + const U32 mls) { - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32 const matchLengthSearch = cParams->minMatch; - DEBUGLOG(8, "ZSTD_BtGetAllMatches"); - if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ - ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode); - switch(matchLengthSearch) - { - case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3); - default : - case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4); - case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5); - case 7 : - case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6); - } + assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls); + DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls); + if (ip < ms->window.base + ms->nextToUpdate) + return 0; /* skipped area */ + ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode); + return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls); } +#define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls + +#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \ + static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \ + ZSTD_match_t* matches, \ + ZSTD_MatchState_t* ms, \ + U32* nextToUpdate3, \ + const BYTE* ip, \ + const BYTE* const iHighLimit, \ + const U32 rep[ZSTD_REP_NUM], \ + U32 const ll0, \ + U32 const lengthToBeat) \ + { \ + return ZSTD_btGetAllMatches_internal( \ + matches, ms, nextToUpdate3, ip, iHighLimit, \ + rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \ + } -/*-******************************* -* Optimal parser -*********************************/ -typedef struct repcodes_s { - U32 rep[3]; -} repcodes_t; +#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \ + GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \ + GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \ + GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \ + GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6) + +GEN_ZSTD_BT_GET_ALL_MATCHES(noDict) +GEN_ZSTD_BT_GET_ALL_MATCHES(extDict) +GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState) + +#define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \ + { \ + ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \ + ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \ + ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \ + ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \ + } -static repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0) +static ZSTD_getAllMatchesFn +ZSTD_selectBtGetAllMatches(ZSTD_MatchState_t const* ms, ZSTD_dictMode_e const dictMode) { - repcodes_t newReps; - if (offset >= ZSTD_REP_NUM) { /* full offset */ - newReps.rep[2] = rep[1]; - newReps.rep[1] = rep[0]; - newReps.rep[0] = offset - ZSTD_REP_MOVE; - } else { /* repcode */ - U32 const repCode = offset + ll0; - if (repCode > 0) { /* note : if repCode==0, no change */ - U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; - newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2]; - newReps.rep[1] = rep[0]; - newReps.rep[0] = currentOffset; - } else { /* repCode == 0 */ - memcpy(&newReps, rep, sizeof(newReps)); + ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = { + ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict), + ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict), + ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState) + }; + U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6); + assert((U32)dictMode < 3); + assert(mls - 3 < 4); + return getAllMatchesFns[(int)dictMode][mls - 3]; +} + +/************************* +* LDM helper functions * +*************************/ + +/* Struct containing info needed to make decision about ldm inclusion */ +typedef struct { + RawSeqStore_t seqStore; /* External match candidates store for this block */ + U32 startPosInBlock; /* Start position of the current match candidate */ + U32 endPosInBlock; /* End position of the current match candidate */ + U32 offset; /* Offset of the match candidate */ +} ZSTD_optLdm_t; + +/* ZSTD_optLdm_skipRawSeqStoreBytes(): + * Moves forward in @rawSeqStore by @nbBytes, + * which will update the fields 'pos' and 'posInSequence'. + */ +static void ZSTD_optLdm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes) +{ + U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); + while (currPos && rawSeqStore->pos < rawSeqStore->size) { + rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; + if (currPos >= currSeq.litLength + currSeq.matchLength) { + currPos -= currSeq.litLength + currSeq.matchLength; + rawSeqStore->pos++; + } else { + rawSeqStore->posInSequence = currPos; + break; } } - return newReps; + if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) { + rawSeqStore->posInSequence = 0; + } } +/* ZSTD_opt_getNextMatchAndUpdateSeqStore(): + * Calculates the beginning and end of the next match in the current block. + * Updates 'pos' and 'posInSequence' of the ldmSeqStore. + */ +static void +ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock, + U32 blockBytesRemaining) +{ + rawSeq currSeq; + U32 currBlockEndPos; + U32 literalsBytesRemaining; + U32 matchBytesRemaining; + + /* Setting match end position to MAX to ensure we never use an LDM during this block */ + if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { + optLdm->startPosInBlock = UINT_MAX; + optLdm->endPosInBlock = UINT_MAX; + return; + } + /* Calculate appropriate bytes left in matchLength and litLength + * after adjusting based on ldmSeqStore->posInSequence */ + currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos]; + assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength); + currBlockEndPos = currPosInBlock + blockBytesRemaining; + literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ? + currSeq.litLength - (U32)optLdm->seqStore.posInSequence : + 0; + matchBytesRemaining = (literalsBytesRemaining == 0) ? + currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) : + currSeq.matchLength; + + /* If there are more literal bytes than bytes remaining in block, no ldm is possible */ + if (literalsBytesRemaining >= blockBytesRemaining) { + optLdm->startPosInBlock = UINT_MAX; + optLdm->endPosInBlock = UINT_MAX; + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining); + return; + } + + /* Matches may be < minMatch by this process. In that case, we will reject them + when we are deciding whether or not to add the ldm */ + optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining; + optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining; + optLdm->offset = currSeq.offset; + + if (optLdm->endPosInBlock > currBlockEndPos) { + /* Match ends after the block ends, we can't use the whole match */ + optLdm->endPosInBlock = currBlockEndPos; + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock); + } else { + /* Consume nb of bytes equal to size of sequence left */ + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining); + } +} -static U32 ZSTD_totalLen(ZSTD_optimal_t sol) +/* ZSTD_optLdm_maybeAddMatch(): + * Adds a match if it's long enough, + * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock', + * into 'matches'. Maintains the correct ordering of 'matches'. + */ +static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, + const ZSTD_optLdm_t* optLdm, U32 currPosInBlock, + U32 minMatch) { - return sol.litlen + sol.mlen; + U32 const posDiff = currPosInBlock - optLdm->startPosInBlock; + /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */ + U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; + + /* Ensure that current block position is not outside of the match */ + if (currPosInBlock < optLdm->startPosInBlock + || currPosInBlock >= optLdm->endPosInBlock + || candidateMatchLength < minMatch) { + return; + } + + if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) { + U32 const candidateOffBase = OFFSET_TO_OFFBASE(optLdm->offset); + DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offBase: %u matchLength %u) at block position=%u", + candidateOffBase, candidateMatchLength, currPosInBlock); + matches[*nbMatches].len = candidateMatchLength; + matches[*nbMatches].off = candidateOffBase; + (*nbMatches)++; + } } +/* ZSTD_optLdm_processMatchCandidate(): + * Wrapper function to update ldm seq store and call ldm functions as necessary. + */ +static void +ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, + ZSTD_match_t* matches, U32* nbMatches, + U32 currPosInBlock, U32 remainingBytes, + U32 minMatch) +{ + if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { + return; + } + + if (currPosInBlock >= optLdm->endPosInBlock) { + if (currPosInBlock > optLdm->endPosInBlock) { + /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily + * at the end of a match from the ldm seq store, and will often be some bytes + * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots" + */ + U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock; + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot); + } + ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); + } + ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock, minMatch); +} + + +/*-******************************* +* Optimal parser +*********************************/ + #if 0 /* debug */ static void @@ -834,7 +1059,7 @@ listStats(const U32* table, int lastEltID) int enb; for (enb=0; enb < nbElts; enb++) { (void)table; - //RAWLOG(2, "%3i:%3i, ", enb, table[enb]); + /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */ RAWLOG(2, "%4i,", table[enb]); } RAWLOG(2, " \n"); @@ -842,9 +1067,15 @@ listStats(const U32* table, int lastEltID) #endif -FORCE_INLINE_TEMPLATE size_t -ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, - seqStore_t* seqStore, +#define LIT_PRICE(_p) (int)ZSTD_rawLiteralsCost(_p, 1, optStatePtr, optLevel) +#define LL_PRICE(_l) (int)ZSTD_litLengthPrice(_l, optStatePtr, optLevel) +#define LL_INCPRICE(_l) (LL_PRICE(_l) - LL_PRICE(_l-1)) + +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t +ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const int optLevel, @@ -860,13 +1091,22 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, const BYTE* const prefixStart = base + ms->window.dictLimit; const ZSTD_compressionParameters* const cParams = &ms->cParams; + ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode); + U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4; U32 nextToUpdate3 = ms->nextToUpdate; ZSTD_optimal_t* const opt = optStatePtr->priceTable; ZSTD_match_t* const matches = optStatePtr->matchTable; - ZSTD_optimal_t lastSequence; + ZSTD_optimal_t lastStretch; + ZSTD_optLdm_t optLdm; + + ZSTD_memset(&lastStretch, 0, sizeof(ZSTD_optimal_t)); + + optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore; + optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0; + ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip)); /* init */ DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u", @@ -882,88 +1122,144 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, /* find first match */ { U32 const litlen = (U32)(ip - anchor); U32 const ll0 = !litlen; - U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch); - if (!nbMatches) { ip++; continue; } + U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch); + ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, + (U32)(ip-istart), (U32)(iend-ip), + minMatch); + if (!nbMatches) { + DEBUGLOG(8, "no match found at cPos %u", (unsigned)(ip-istart)); + ip++; + continue; + } + + /* Match found: let's store this solution, and eventually find more candidates. + * During this forward pass, @opt is used to store stretches, + * defined as "a match followed by N literals". + * Note how this is different from a Sequence, which is "N literals followed by a match". + * Storing stretches allows us to store different match predecessors + * for each literal position part of a literals run. */ /* initialize opt[0] */ - { U32 i ; for (i=0; i immediate encoding */ { U32 const maxML = matches[nbMatches-1].len; - U32 const maxOffset = matches[nbMatches-1].off; - DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series", - nbMatches, maxML, maxOffset, (U32)(ip-prefixStart)); + U32 const maxOffBase = matches[nbMatches-1].off; + DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffBase=%u at cPos=%u => start new series", + nbMatches, maxML, maxOffBase, (U32)(ip-prefixStart)); if (maxML > sufficient_len) { - lastSequence.litlen = litlen; - lastSequence.mlen = maxML; - lastSequence.off = maxOffset; - DEBUGLOG(6, "large match (%u>%u), immediate encoding", + lastStretch.litlen = 0; + lastStretch.mlen = maxML; + lastStretch.off = maxOffBase; + DEBUGLOG(6, "large match (%u>%u) => immediate encoding", maxML, sufficient_len); cur = 0; - last_pos = ZSTD_totalLen(lastSequence); + last_pos = maxML; goto _shortestPath; } } /* set prices for first matches starting position == 0 */ - { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); - U32 pos; + assert(opt[0].price >= 0); + { U32 pos; U32 matchNb; for (pos = 1; pos < minMatch; pos++) { - opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */ + opt[pos].price = ZSTD_MAX_PRICE; + opt[pos].mlen = 0; + opt[pos].litlen = litlen + pos; } for (matchNb = 0; matchNb < nbMatches; matchNb++) { - U32 const offset = matches[matchNb].off; + U32 const offBase = matches[matchNb].off; U32 const end = matches[matchNb].len; - repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0); for ( ; pos <= end ; pos++ ) { - U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel); - U32 const sequencePrice = literalsPrice + matchPrice; + int const matchPrice = (int)ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel); + int const sequencePrice = opt[0].price + matchPrice; DEBUGLOG(7, "rPos:%u => set initial price : %.2f", pos, ZSTD_fCost(sequencePrice)); opt[pos].mlen = pos; - opt[pos].off = offset; - opt[pos].litlen = litlen; - opt[pos].price = sequencePrice; - ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory)); - memcpy(opt[pos].rep, &repHistory, sizeof(repHistory)); - } } + opt[pos].off = offBase; + opt[pos].litlen = 0; /* end of match */ + opt[pos].price = sequencePrice + LL_PRICE(0); + } + } last_pos = pos-1; + opt[pos].price = ZSTD_MAX_PRICE; } } /* check further positions */ for (cur = 1; cur <= last_pos; cur++) { const BYTE* const inr = ip + cur; - assert(cur < ZSTD_OPT_NUM); - DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur) + assert(cur <= ZSTD_OPT_NUM); + DEBUGLOG(7, "cPos:%i==rPos:%u", (int)(inr-istart), cur); /* Fix current position with one literal if cheaper */ - { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1; + { U32 const litlen = opt[cur-1].litlen + 1; int const price = opt[cur-1].price - + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) - + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) - - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); + + LIT_PRICE(ip+cur-1) + + LL_INCPRICE(litlen); assert(price < 1000000000); /* overflow check */ if (price <= opt[cur].price) { - DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", - inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, + ZSTD_optimal_t const prevMatch = opt[cur]; + DEBUGLOG(7, "cPos:%i==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", + (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]); - opt[cur].mlen = 0; - opt[cur].off = 0; + opt[cur] = opt[cur-1]; opt[cur].litlen = litlen; opt[cur].price = price; - memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep)); + if ( (optLevel >= 1) /* additional check only for higher modes */ + && (prevMatch.litlen == 0) /* replace a match */ + && (LL_INCPRICE(1) < 0) /* ll1 is cheaper than ll0 */ + && LIKELY(ip + cur < iend) + ) { + /* check next position, in case it would be cheaper */ + int with1literal = prevMatch.price + LIT_PRICE(ip+cur) + LL_INCPRICE(1); + int withMoreLiterals = price + LIT_PRICE(ip+cur) + LL_INCPRICE(litlen+1); + DEBUGLOG(7, "then at next rPos %u : match+1lit %.2f vs %ulits %.2f", + cur+1, ZSTD_fCost(with1literal), litlen+1, ZSTD_fCost(withMoreLiterals)); + if ( (with1literal < withMoreLiterals) + && (with1literal < opt[cur+1].price) ) { + /* update offset history - before it disappears */ + U32 const prev = cur - prevMatch.mlen; + Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0); + assert(cur >= prevMatch.mlen); + DEBUGLOG(7, "==> match+1lit is cheaper (%.2f < %.2f) (hist:%u,%u,%u) !", + ZSTD_fCost(with1literal), ZSTD_fCost(withMoreLiterals), + newReps.rep[0], newReps.rep[1], newReps.rep[2] ); + opt[cur+1] = prevMatch; /* mlen & offbase */ + ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(Repcodes_t)); + opt[cur+1].litlen = 1; + opt[cur+1].price = with1literal; + if (last_pos < cur+1) last_pos = cur+1; + } + } } else { - DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)", - inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), - opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]); + DEBUGLOG(7, "cPos:%i==rPos:%u : literal would cost more (%.2f>%.2f)", + (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price)); } } + /* Offset history is not updated during match comparison. + * Do it here, now that the match is selected and confirmed. + */ + ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(Repcodes_t)); + assert(cur >= opt[cur].mlen); + if (opt[cur].litlen == 0) { + /* just finished a match => alter offset history */ + U32 const prev = cur - opt[cur].mlen; + Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0); + ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(Repcodes_t)); + } + /* last match must start at a minimum distance of 8 from oend */ if (inr > ilimit) continue; @@ -971,105 +1267,148 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, if ( (optLevel==0) /*static_test*/ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) { - DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1); + DEBUGLOG(7, "skip current position : next rPos(%u) price is cheaper", cur+1); continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ } - { U32 const ll0 = (opt[cur].mlen != 0); - U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0; - U32 const previousPrice = opt[cur].price; - U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel); - U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch); + assert(opt[cur].price >= 0); + { U32 const ll0 = (opt[cur].litlen == 0); + int const previousPrice = opt[cur].price; + int const basePrice = previousPrice + LL_PRICE(0); + U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch); U32 matchNb; + + ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, + (U32)(inr-istart), (U32)(iend-inr), + minMatch); + if (!nbMatches) { DEBUGLOG(7, "rPos:%u : no match found", cur); continue; } - { U32 const maxML = matches[nbMatches-1].len; - DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u", - inr-istart, cur, nbMatches, maxML); - - if ( (maxML > sufficient_len) - || (cur + maxML >= ZSTD_OPT_NUM) ) { - lastSequence.mlen = maxML; - lastSequence.off = matches[nbMatches-1].off; - lastSequence.litlen = litlen; - cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */ - last_pos = cur + ZSTD_totalLen(lastSequence); - if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */ + { U32 const longestML = matches[nbMatches-1].len; + DEBUGLOG(7, "cPos:%i==rPos:%u, found %u matches, of longest ML=%u", + (int)(inr-istart), cur, nbMatches, longestML); + + if ( (longestML > sufficient_len) + || (cur + longestML >= ZSTD_OPT_NUM) + || (ip + cur + longestML >= iend) ) { + lastStretch.mlen = longestML; + lastStretch.off = matches[nbMatches-1].off; + lastStretch.litlen = 0; + last_pos = cur + longestML; goto _shortestPath; } } /* set prices using matches found at position == cur */ for (matchNb = 0; matchNb < nbMatches; matchNb++) { U32 const offset = matches[matchNb].off; - repcodes_t const repHistory = ZSTD_updateRep(opt[cur].rep, offset, ll0); U32 const lastML = matches[matchNb].len; U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; U32 mlen; - DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u", - matchNb, matches[matchNb].off, lastML, litlen); + DEBUGLOG(7, "testing match %u => offBase=%4u, mlen=%2u, llen=%2u", + matchNb, matches[matchNb].off, lastML, opt[cur].litlen); for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ U32 const pos = cur + mlen; - int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); + int const price = basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); if ((pos > last_pos) || (price < opt[pos].price)) { DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)", pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); - while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */ + while (last_pos < pos) { + /* fill empty positions, for future comparisons */ + last_pos++; + opt[last_pos].price = ZSTD_MAX_PRICE; + opt[last_pos].litlen = !0; /* just needs to be != 0, to mean "not an end of match" */ + } opt[pos].mlen = mlen; opt[pos].off = offset; - opt[pos].litlen = litlen; + opt[pos].litlen = 0; opt[pos].price = price; - ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory)); - memcpy(opt[pos].rep, &repHistory, sizeof(repHistory)); } else { DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)", pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */ } } } } + opt[last_pos+1].price = ZSTD_MAX_PRICE; } /* for (cur = 1; cur <= last_pos; cur++) */ - lastSequence = opt[last_pos]; - cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */ - assert(cur < ZSTD_OPT_NUM); /* control overflow*/ + lastStretch = opt[last_pos]; + assert(cur >= lastStretch.mlen); + cur = last_pos - lastStretch.mlen; _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ assert(opt[0].mlen == 0); + assert(last_pos >= lastStretch.mlen); + assert(cur == last_pos - lastStretch.mlen); - { U32 const storeEnd = cur + 1; + if (lastStretch.mlen==0) { + /* no solution : all matches have been converted into literals */ + assert(lastStretch.litlen == (ip - anchor) + last_pos); + ip += last_pos; + continue; + } + assert(lastStretch.off > 0); + + /* Update offset history */ + if (lastStretch.litlen == 0) { + /* finishing on a match : update offset history */ + Repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0); + ZSTD_memcpy(rep, &reps, sizeof(Repcodes_t)); + } else { + ZSTD_memcpy(rep, lastStretch.rep, sizeof(Repcodes_t)); + assert(cur >= lastStretch.litlen); + cur -= lastStretch.litlen; + } + + /* Let's write the shortest path solution. + * It is stored in @opt in reverse order, + * starting from @storeEnd (==cur+2), + * effectively partially @opt overwriting. + * Content is changed too: + * - So far, @opt stored stretches, aka a match followed by literals + * - Now, it will store sequences, aka literals followed by a match + */ + { U32 const storeEnd = cur + 2; U32 storeStart = storeEnd; - U32 seqPos = cur; + U32 stretchPos = cur; DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)", last_pos, cur); (void)last_pos; - assert(storeEnd < ZSTD_OPT_NUM); - DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", - storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off); - opt[storeEnd] = lastSequence; - while (seqPos > 0) { - U32 const backDist = ZSTD_totalLen(opt[seqPos]); + assert(storeEnd < ZSTD_OPT_SIZE); + DEBUGLOG(6, "last stretch copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", + storeEnd, lastStretch.litlen, lastStretch.mlen, lastStretch.off); + opt[storeEnd] = lastStretch; /* note: litlen will be fixed */ + storeStart = storeEnd; + while (1) { + ZSTD_optimal_t nextStretch = opt[stretchPos]; + opt[storeStart].litlen = nextStretch.litlen; + DEBUGLOG(6, "selected sequence (llen=%u,mlen=%u,ofc=%u)", + opt[storeStart].litlen, opt[storeStart].mlen, opt[storeStart].off); + if (nextStretch.mlen == 0) { + /* reaching beginning of segment */ + break; + } storeStart--; - DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", - seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off); - opt[storeStart] = opt[seqPos]; - seqPos = (seqPos > backDist) ? seqPos - backDist : 0; + opt[storeStart] = nextStretch; /* note: litlen will be fixed */ + assert(nextStretch.litlen + nextStretch.mlen <= stretchPos); + stretchPos -= nextStretch.litlen + nextStretch.mlen; } /* save sequences */ - DEBUGLOG(6, "sending selected sequences into seqStore") + DEBUGLOG(6, "sending selected sequences into seqStore"); { U32 storePos; for (storePos=storeStart; storePos <= storeEnd; storePos++) { U32 const llen = opt[storePos].litlen; U32 const mlen = opt[storePos].mlen; - U32 const offCode = opt[storePos].off; + U32 const offBase = opt[storePos].off; U32 const advance = llen + mlen; - DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", - anchor - istart, (unsigned)llen, (unsigned)mlen); + DEBUGLOG(6, "considering seq starting at %i, llen=%u, mlen=%u", + (int)(anchor - istart), (unsigned)llen, (unsigned)mlen); if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */ assert(storePos == storeEnd); /* must be last sequence */ @@ -1077,81 +1416,70 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, continue; /* will finish */ } - /* repcodes update : like ZSTD_updateRep(), but update in place */ - if (offCode >= ZSTD_REP_NUM) { /* full offset */ - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = offCode - ZSTD_REP_MOVE; - } else { /* repcode */ - U32 const repCode = offCode + (llen==0); - if (repCode) { /* note : if repCode==0, no change */ - U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; - if (repCode >= 2) rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = currentOffset; - } } - assert(anchor + llen <= iend); - ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen); - ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH); + ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen); + ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen); anchor += advance; ip = anchor; } } + DEBUGLOG(7, "new offset history : %u, %u, %u", rep[0], rep[1], rep[2]); + + /* update all costs */ ZSTD_setBasePrices(optStatePtr, optLevel); } - } /* while (ip < ilimit) */ /* Return the last literals size */ return (size_t)(iend - anchor); } +#endif /* build exclusions */ +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR +static size_t ZSTD_compressBlock_opt0( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode); +} +#endif + +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +static size_t ZSTD_compressBlock_opt2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode); +} +#endif +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_btopt( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock_btopt"); - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict); + return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict); } +#endif -/* used in 2-pass strategy */ -static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus) -{ - U32 s, sum=0; - assert(ZSTD_FREQ_DIV+bonus >= 0); - for (s=0; slitSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0); - optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0); - optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0); - optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0); -} +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR /* ZSTD_initStats_ultra(): * make a first compression pass, just to seed stats with more accurate starting values. * only works on first block, with no dictionary and no ldm. - * this function cannot error, hence its contract must be respected. + * this function cannot error out, its narrow contract must be respected. */ -static void -ZSTD_initStats_ultra(ZSTD_matchState_t* ms, - seqStore_t* seqStore, - U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize) +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_initStats_ultra(ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) { U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */ - memcpy(tmpRep, rep, sizeof(tmpRep)); + ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep)); DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize); assert(ms->opt.litLengthSum == 0); /* first block */ @@ -1159,38 +1487,36 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms, assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */ assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */ - ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/ + ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/ - /* invalidate first scan from history */ + /* invalidate first scan from history, only keep entropy stats */ ZSTD_resetSeqStore(seqStore); ms->window.base -= srcSize; ms->window.dictLimit += (U32)srcSize; ms->window.lowLimit = ms->window.dictLimit; ms->nextToUpdate = ms->window.dictLimit; - /* re-inforce weight of collected statistics */ - ZSTD_upscaleStats(&ms->opt); } size_t ZSTD_compressBlock_btultra( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize); - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict); } size_t ZSTD_compressBlock_btultra2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - U32 const current = (U32)((const BYTE*)src - ms->window.base); + U32 const curr = (U32)((const BYTE*)src - ms->window.base); DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); - /* 2-pass strategy: + /* 2-passes strategy: * this strategy makes a first pass over first block to collect statistics - * and seed next round's statistics with it. - * After 1st pass, function forgets everything, and starts a new block. + * in order to seed next round's statistics with it. + * After 1st pass, function forgets history, and starts a new block. * Consequently, this can only work if no data has been previously loaded in tables, * aka, no dictionary, no prefix, no ldm preprocessing. * The compression ratio gain is generally small (~0.5% on first block), @@ -1199,42 +1525,47 @@ size_t ZSTD_compressBlock_btultra2( if ( (ms->opt.litLengthSum==0) /* first block */ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ - && (current == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ - && (srcSize > ZSTD_PREDEF_THRESHOLD) + && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ + && (srcSize > ZSTD_PREDEF_THRESHOLD) /* input large enough to not employ default stats */ ) { ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); } - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict); } +#endif +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_btopt_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState); + return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); } -size_t ZSTD_compressBlock_btultra_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btopt_extDict( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState); + return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict); } +#endif -size_t ZSTD_compressBlock_btopt_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btultra_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict); + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_btultra_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict); + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict); } +#endif /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries diff --git a/lib/compress/zstd_opt.h b/lib/compress/zstd_opt.h index 094f7476650..756c7b1d0c5 100644 --- a/lib/compress/zstd_opt.h +++ b/lib/compress/zstd_opt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,46 +11,62 @@ #ifndef ZSTD_OPT_H #define ZSTD_OPT_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "zstd_compress_internal.h" +#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) /* used in ZSTD_loadDictionaryContent() */ -void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend); +void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend); +#endif +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_btopt( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btultra( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btopt_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btultra2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btopt_extDict( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); +#define ZSTD_COMPRESSBLOCK_BTOPT ZSTD_compressBlock_btopt +#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE ZSTD_compressBlock_btopt_dictMatchState +#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT ZSTD_compressBlock_btopt_extDict +#else +#define ZSTD_COMPRESSBLOCK_BTOPT NULL +#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT NULL +#endif -size_t ZSTD_compressBlock_btopt_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btultra( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); - -size_t ZSTD_compressBlock_btopt_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries * and is only specific for the first block (no prefix) */ +size_t ZSTD_compressBlock_btultra2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); -#if defined (__cplusplus) -} +#define ZSTD_COMPRESSBLOCK_BTULTRA ZSTD_compressBlock_btultra +#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE ZSTD_compressBlock_btultra_dictMatchState +#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT ZSTD_compressBlock_btultra_extDict +#define ZSTD_COMPRESSBLOCK_BTULTRA2 ZSTD_compressBlock_btultra2 +#else +#define ZSTD_COMPRESSBLOCK_BTULTRA NULL +#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT NULL +#define ZSTD_COMPRESSBLOCK_BTULTRA2 NULL #endif #endif /* ZSTD_OPT_H */ diff --git a/lib/compress/zstd_preSplit.c b/lib/compress/zstd_preSplit.c new file mode 100644 index 00000000000..d820c20ac24 --- /dev/null +++ b/lib/compress/zstd_preSplit.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "../common/compiler.h" /* ZSTD_ALIGNOF */ +#include "../common/mem.h" /* S64 */ +#include "../common/zstd_deps.h" /* ZSTD_memset */ +#include "../common/zstd_internal.h" /* ZSTD_STATIC_ASSERT */ +#include "hist.h" /* HIST_add */ +#include "zstd_preSplit.h" + + +#define BLOCKSIZE_MIN 3500 +#define THRESHOLD_PENALTY_RATE 16 +#define THRESHOLD_BASE (THRESHOLD_PENALTY_RATE - 2) +#define THRESHOLD_PENALTY 3 + +#define HASHLENGTH 2 +#define HASHLOG_MAX 10 +#define HASHTABLESIZE (1 << HASHLOG_MAX) +#define HASHMASK (HASHTABLESIZE - 1) +#define KNUTH 0x9e3779b9 + +/* for hashLog > 8, hash 2 bytes. + * for hashLog == 8, just take the byte, no hashing. + * The speed of this method relies on compile-time constant propagation */ +FORCE_INLINE_TEMPLATE unsigned hash2(const void *p, unsigned hashLog) +{ + assert(hashLog >= 8); + if (hashLog == 8) return (U32)((const BYTE*)p)[0]; + assert(hashLog <= HASHLOG_MAX); + return (U32)(MEM_read16(p)) * KNUTH >> (32 - hashLog); +} + + +typedef struct { + unsigned events[HASHTABLESIZE]; + size_t nbEvents; +} Fingerprint; +typedef struct { + Fingerprint pastEvents; + Fingerprint newEvents; +} FPStats; + +static void initStats(FPStats* fpstats) +{ + ZSTD_memset(fpstats, 0, sizeof(FPStats)); +} + +FORCE_INLINE_TEMPLATE void +addEvents_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog) +{ + const char* p = (const char*)src; + size_t limit = srcSize - HASHLENGTH + 1; + size_t n; + assert(srcSize >= HASHLENGTH); + for (n = 0; n < limit; n+=samplingRate) { + fp->events[hash2(p+n, hashLog)]++; + } + fp->nbEvents += limit/samplingRate; +} + +FORCE_INLINE_TEMPLATE void +recordFingerprint_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog) +{ + ZSTD_memset(fp, 0, sizeof(unsigned) * ((size_t)1 << hashLog)); + fp->nbEvents = 0; + addEvents_generic(fp, src, srcSize, samplingRate, hashLog); +} + +typedef void (*RecordEvents_f)(Fingerprint* fp, const void* src, size_t srcSize); + +#define FP_RECORD(_rate) ZSTD_recordFingerprint_##_rate + +#define ZSTD_GEN_RECORD_FINGERPRINT(_rate, _hSize) \ + static void FP_RECORD(_rate)(Fingerprint* fp, const void* src, size_t srcSize) \ + { \ + recordFingerprint_generic(fp, src, srcSize, _rate, _hSize); \ + } + +ZSTD_GEN_RECORD_FINGERPRINT(1, 10) +ZSTD_GEN_RECORD_FINGERPRINT(5, 10) +ZSTD_GEN_RECORD_FINGERPRINT(11, 9) +ZSTD_GEN_RECORD_FINGERPRINT(43, 8) + + +static U64 abs64(S64 s64) { return (U64)((s64 < 0) ? -s64 : s64); } + +static U64 fpDistance(const Fingerprint* fp1, const Fingerprint* fp2, unsigned hashLog) +{ + U64 distance = 0; + size_t n; + assert(hashLog <= HASHLOG_MAX); + for (n = 0; n < ((size_t)1 << hashLog); n++) { + distance += + abs64((S64)fp1->events[n] * (S64)fp2->nbEvents - (S64)fp2->events[n] * (S64)fp1->nbEvents); + } + return distance; +} + +/* Compare newEvents with pastEvents + * return 1 when considered "too different" + */ +static int compareFingerprints(const Fingerprint* ref, + const Fingerprint* newfp, + int penalty, + unsigned hashLog) +{ + assert(ref->nbEvents > 0); + assert(newfp->nbEvents > 0); + { U64 p50 = (U64)ref->nbEvents * (U64)newfp->nbEvents; + U64 deviation = fpDistance(ref, newfp, hashLog); + U64 threshold = p50 * (U64)(THRESHOLD_BASE + penalty) / THRESHOLD_PENALTY_RATE; + return deviation >= threshold; + } +} + +static void mergeEvents(Fingerprint* acc, const Fingerprint* newfp) +{ + size_t n; + for (n = 0; n < HASHTABLESIZE; n++) { + acc->events[n] += newfp->events[n]; + } + acc->nbEvents += newfp->nbEvents; +} + +static void flushEvents(FPStats* fpstats) +{ + size_t n; + for (n = 0; n < HASHTABLESIZE; n++) { + fpstats->pastEvents.events[n] = fpstats->newEvents.events[n]; + } + fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents; + ZSTD_memset(&fpstats->newEvents, 0, sizeof(fpstats->newEvents)); +} + +static void removeEvents(Fingerprint* acc, const Fingerprint* slice) +{ + size_t n; + for (n = 0; n < HASHTABLESIZE; n++) { + assert(acc->events[n] >= slice->events[n]); + acc->events[n] -= slice->events[n]; + } + acc->nbEvents -= slice->nbEvents; +} + +#define CHUNKSIZE (8 << 10) +static size_t ZSTD_splitBlock_byChunks(const void* blockStart, size_t blockSize, + int level, + void* workspace, size_t wkspSize) +{ + static const RecordEvents_f records_fs[] = { + FP_RECORD(43), FP_RECORD(11), FP_RECORD(5), FP_RECORD(1) + }; + static const unsigned hashParams[] = { 8, 9, 10, 10 }; + const RecordEvents_f record_f = (assert(0<=level && level<=3), records_fs[level]); + FPStats* const fpstats = (FPStats*)workspace; + const char* p = (const char*)blockStart; + int penalty = THRESHOLD_PENALTY; + size_t pos = 0; + assert(blockSize == (128 << 10)); + assert(workspace != NULL); + assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0); + ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats)); + assert(wkspSize >= sizeof(FPStats)); (void)wkspSize; + + initStats(fpstats); + record_f(&fpstats->pastEvents, p, CHUNKSIZE); + for (pos = CHUNKSIZE; pos <= blockSize - CHUNKSIZE; pos += CHUNKSIZE) { + record_f(&fpstats->newEvents, p + pos, CHUNKSIZE); + if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, penalty, hashParams[level])) { + return pos; + } else { + mergeEvents(&fpstats->pastEvents, &fpstats->newEvents); + if (penalty > 0) penalty--; + } + } + assert(pos == blockSize); + return blockSize; + (void)flushEvents; (void)removeEvents; +} + +/* ZSTD_splitBlock_fromBorders(): very fast strategy : + * compare fingerprint from beginning and end of the block, + * derive from their difference if it's preferable to split in the middle, + * repeat the process a second time, for finer grained decision. + * 3 times did not brought improvements, so I stopped at 2. + * Benefits are good enough for a cheap heuristic. + * More accurate splitting saves more, but speed impact is also more perceptible. + * For better accuracy, use more elaborate variant *_byChunks. + */ +static size_t ZSTD_splitBlock_fromBorders(const void* blockStart, size_t blockSize, + void* workspace, size_t wkspSize) +{ +#define SEGMENT_SIZE 512 + FPStats* const fpstats = (FPStats*)workspace; + Fingerprint* middleEvents = (Fingerprint*)(void*)((char*)workspace + 512 * sizeof(unsigned)); + assert(blockSize == (128 << 10)); + assert(workspace != NULL); + assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0); + ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats)); + assert(wkspSize >= sizeof(FPStats)); (void)wkspSize; + + initStats(fpstats); + HIST_add(fpstats->pastEvents.events, blockStart, SEGMENT_SIZE); + HIST_add(fpstats->newEvents.events, (const char*)blockStart + blockSize - SEGMENT_SIZE, SEGMENT_SIZE); + fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = SEGMENT_SIZE; + if (!compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8)) + return blockSize; + + HIST_add(middleEvents->events, (const char*)blockStart + blockSize/2 - SEGMENT_SIZE/2, SEGMENT_SIZE); + middleEvents->nbEvents = SEGMENT_SIZE; + { U64 const distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8); + U64 const distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8); + U64 const minDistance = SEGMENT_SIZE * SEGMENT_SIZE / 3; + if (abs64((S64)distFromBegin - (S64)distFromEnd) < minDistance) + return 64 KB; + return (distFromBegin > distFromEnd) ? 32 KB : 96 KB; + } +} + +size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize, + int level, + void* workspace, size_t wkspSize) +{ + DEBUGLOG(6, "ZSTD_splitBlock (level=%i)", level); + assert(0<=level && level<=4); + if (level == 0) + return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize); + /* level >= 1*/ + return ZSTD_splitBlock_byChunks(blockStart, blockSize, level-1, workspace, wkspSize); +} diff --git a/lib/compress/zstd_preSplit.h b/lib/compress/zstd_preSplit.h new file mode 100644 index 00000000000..b89a200dccd --- /dev/null +++ b/lib/compress/zstd_preSplit.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_PRESPLIT_H +#define ZSTD_PRESPLIT_H + +#include /* size_t */ + +#define ZSTD_SLIPBLOCK_WORKSPACESIZE 8208 + +/* ZSTD_splitBlock(): + * @level must be a value between 0 and 4. + * higher levels spend more energy to detect block boundaries. + * @workspace must be aligned for size_t. + * @wkspSize must be at least >= ZSTD_SLIPBLOCK_WORKSPACESIZE + * note: + * For the time being, this function only accepts full 128 KB blocks. + * Therefore, @blockSize must be == 128 KB. + * While this could be extended to smaller sizes in the future, + * it is not yet clear if this would be useful. TBD. + */ +size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize, + int level, + void* workspace, size_t wkspSize); + +#endif /* ZSTD_PRESPLIT_H */ diff --git a/lib/compress/zstdmt_compress.c b/lib/compress/zstdmt_compress.c index 38fbb907685..cb710475c2a 100644 --- a/lib/compress/zstdmt_compress.c +++ b/lib/compress/zstdmt_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,17 +15,13 @@ #endif -/* ====== Constants ====== */ -#define ZSTDMT_OVERLAPLOG_DEFAULT 0 - - /* ====== Dependencies ====== */ -#include /* memcpy, memset */ -#include /* INT_MAX, UINT_MAX */ -#include "mem.h" /* MEM_STATIC */ -#include "pool.h" /* threadpool */ -#include "threading.h" /* mutex */ -#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ +#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */ +#include "../common/mem.h" /* MEM_STATIC */ +#include "../common/pool.h" /* threadpool */ +#include "../common/threading.h" /* mutex */ +#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ #include "zstd_ldm.h" #include "zstdmt_compress.h" @@ -44,12 +40,13 @@ # include # include -# define DEBUG_PRINTHEX(l,p,n) { \ - unsigned debug_u; \ - for (debug_u=0; debug_u<(n); debug_u++) \ - RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ - RAWLOG(l, " \n"); \ -} +# define DEBUG_PRINTHEX(l,p,n) \ + do { \ + unsigned debug_u; \ + for (debug_u=0; debug_u<(n); debug_u++) \ + RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ + RAWLOG(l, " \n"); \ + } while (0) static unsigned long long GetCurrentClockTimeMicroseconds(void) { @@ -61,25 +58,28 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void) } } #define MUTEX_WAIT_TIME_DLEVEL 6 -#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \ - if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \ - unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ - ZSTD_pthread_mutex_lock(mutex); \ - { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ - unsigned long long const elapsedTime = (afterTime-beforeTime); \ - if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \ - DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \ - elapsedTime, #mutex); \ - } } \ - } else { \ - ZSTD_pthread_mutex_lock(mutex); \ - } \ -} +#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) \ + do { \ + if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \ + unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ + ZSTD_pthread_mutex_lock(mutex); \ + { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ + unsigned long long const elapsedTime = (afterTime-beforeTime); \ + if (elapsedTime > 1000) { \ + /* or whatever threshold you like; I'm using 1 millisecond here */ \ + DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, \ + "Thread took %llu microseconds to acquire mutex %s \n", \ + elapsedTime, #mutex); \ + } } \ + } else { \ + ZSTD_pthread_mutex_lock(mutex); \ + } \ + } while (0) #else # define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m) -# define DEBUG_PRINTHEX(l,p,n) {} +# define DEBUG_PRINTHEX(l,p,n) do { } while (0) #endif @@ -90,9 +90,9 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void) typedef struct buffer_s { void* start; size_t capacity; -} buffer_t; +} Buffer; -static const buffer_t g_nullBuffer = { NULL, 0 }; +static const Buffer g_nullBuffer = { NULL, 0 }; typedef struct ZSTDMT_bufferPool_s { ZSTD_pthread_mutex_t poolMutex; @@ -100,17 +100,37 @@ typedef struct ZSTDMT_bufferPool_s { unsigned totalBuffers; unsigned nbBuffers; ZSTD_customMem cMem; - buffer_t bTable[1]; /* variable size */ + Buffer* buffers; } ZSTDMT_bufferPool; -static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem) +static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) +{ + DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); + if (!bufPool) return; /* compatibility with free on NULL */ + if (bufPool->buffers) { + unsigned u; + for (u=0; utotalBuffers; u++) { + DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->buffers[u].start); + ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem); + } + ZSTD_customFree(bufPool->buffers, bufPool->cMem); + } + ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); + ZSTD_customFree(bufPool, bufPool->cMem); +} + +static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem) { - unsigned const maxNbBuffers = 2*nbWorkers + 3; - ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc( - sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem); + ZSTDMT_bufferPool* const bufPool = + (ZSTDMT_bufferPool*)ZSTD_customCalloc(sizeof(ZSTDMT_bufferPool), cMem); if (bufPool==NULL) return NULL; if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) { - ZSTD_free(bufPool, cMem); + ZSTD_customFree(bufPool, cMem); + return NULL; + } + bufPool->buffers = (Buffer*)ZSTD_customCalloc(maxNbBuffers * sizeof(Buffer), cMem); + if (bufPool->buffers==NULL) { + ZSTDMT_freeBufferPool(bufPool); return NULL; } bufPool->bufferSize = 64 KB; @@ -120,32 +140,19 @@ static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_custo return bufPool; } -static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) -{ - unsigned u; - DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); - if (!bufPool) return; /* compatibility with free on NULL */ - for (u=0; utotalBuffers; u++) { - DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start); - ZSTD_free(bufPool->bTable[u].start, bufPool->cMem); - } - ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); - ZSTD_free(bufPool, bufPool->cMem); -} - /* only works at initialization, not during compression */ static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool) { - size_t const poolSize = sizeof(*bufPool) - + (bufPool->totalBuffers - 1) * sizeof(buffer_t); + size_t const poolSize = sizeof(*bufPool); + size_t const arraySize = bufPool->totalBuffers * sizeof(Buffer); unsigned u; size_t totalBufferSize = 0; ZSTD_pthread_mutex_lock(&bufPool->poolMutex); for (u=0; utotalBuffers; u++) - totalBufferSize += bufPool->bTable[u].capacity; + totalBufferSize += bufPool->buffers[u].capacity; ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); - return poolSize + totalBufferSize; + return poolSize + arraySize + totalBufferSize; } /* ZSTDMT_setBufferSize() : @@ -161,9 +168,8 @@ static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const } -static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers) +static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers) { - unsigned const maxNbBuffers = 2*nbWorkers + 3; if (srcBufPool==NULL) return NULL; if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */ return srcBufPool; @@ -172,7 +178,7 @@ static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, size_t const bSize = srcBufPool->bufferSize; /* forward parameters */ ZSTDMT_bufferPool* newBufPool; ZSTDMT_freeBufferPool(srcBufPool); - newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem); if (newBufPool==NULL) return newBufPool; ZSTDMT_setBufferSize(newBufPool, bSize); return newBufPool; @@ -183,15 +189,15 @@ static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, * assumption : bufPool must be valid * @return : a buffer, with start pointer and size * note: allocation may fail, in this case, start==NULL and size==0 */ -static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) +static Buffer ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) { size_t const bSize = bufPool->bufferSize; DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize); ZSTD_pthread_mutex_lock(&bufPool->poolMutex); if (bufPool->nbBuffers) { /* try to use an existing buffer */ - buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)]; + Buffer const buf = bufPool->buffers[--(bufPool->nbBuffers)]; size_t const availBufferSize = buf.capacity; - bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer; + bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer; if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) { /* large enough, but not too much */ DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u", @@ -201,13 +207,13 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) } /* size conditions not respected : scratch this buffer, create new one */ DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing"); - ZSTD_free(buf.start, bufPool->cMem); + ZSTD_customFree(buf.start, bufPool->cMem); } ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); /* create new buffer */ DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer"); - { buffer_t buffer; - void* const start = ZSTD_malloc(bSize, bufPool->cMem); + { Buffer buffer; + void* const start = ZSTD_customMalloc(bSize, bufPool->cMem); buffer.start = start; /* note : start can be NULL if malloc fails ! */ buffer.capacity = (start==NULL) ? 0 : bSize; if (start==NULL) { @@ -225,17 +231,17 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) * @return : a buffer that is at least the buffer pool buffer size. * If a reallocation happens, the data in the input buffer is copied. */ -static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer) +static Buffer ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, Buffer buffer) { size_t const bSize = bufPool->bufferSize; if (buffer.capacity < bSize) { - void* const start = ZSTD_malloc(bSize, bufPool->cMem); - buffer_t newBuffer; + void* const start = ZSTD_customMalloc(bSize, bufPool->cMem); + Buffer newBuffer; newBuffer.start = start; newBuffer.capacity = start == NULL ? 0 : bSize; if (start != NULL) { assert(newBuffer.capacity >= buffer.capacity); - memcpy(newBuffer.start, buffer.start, buffer.capacity); + ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity); DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize); return newBuffer; } @@ -246,28 +252,36 @@ static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer) #endif /* store buffer for later re-use, up to pool capacity */ -static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf) +static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, Buffer buf) { DEBUGLOG(5, "ZSTDMT_releaseBuffer"); if (buf.start == NULL) return; /* compatible with release on NULL */ ZSTD_pthread_mutex_lock(&bufPool->poolMutex); if (bufPool->nbBuffers < bufPool->totalBuffers) { - bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */ + bufPool->buffers[bufPool->nbBuffers++] = buf; /* stored for later use */ DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u", (U32)buf.capacity, (U32)(bufPool->nbBuffers-1)); ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return; } ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); - /* Reached bufferPool capacity (should not happen) */ + /* Reached bufferPool capacity (note: should not happen) */ DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing "); - ZSTD_free(buf.start, bufPool->cMem); + ZSTD_customFree(buf.start, bufPool->cMem); } +/* We need 2 output buffers per worker since each dstBuff must be flushed after it is released. + * The 3 additional buffers are as follows: + * 1 buffer for input loading + * 1 buffer for "next input" when submitting current one + * 1 buffer stuck in queue */ +#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) (2*(nbWorkers) + 3) -/* ===== Seq Pool Wrapper ====== */ +/* After a worker releases its rawSeqStore, it is immediately ready for reuse. + * So we only need one seq buffer per worker. */ +#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) (nbWorkers) -static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0}; +/* ===== Seq Pool Wrapper ====== */ typedef ZSTDMT_bufferPool ZSTDMT_seqPool; @@ -276,23 +290,23 @@ static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool) return ZSTDMT_sizeof_bufferPool(seqPool); } -static rawSeqStore_t bufferToSeq(buffer_t buffer) +static RawSeqStore_t bufferToSeq(Buffer buffer) { - rawSeqStore_t seq = {NULL, 0, 0, 0}; + RawSeqStore_t seq = kNullRawSeqStore; seq.seq = (rawSeq*)buffer.start; seq.capacity = buffer.capacity / sizeof(rawSeq); return seq; } -static buffer_t seqToBuffer(rawSeqStore_t seq) +static Buffer seqToBuffer(RawSeqStore_t seq) { - buffer_t buffer; + Buffer buffer; buffer.start = seq.seq; buffer.capacity = seq.capacity * sizeof(rawSeq); return buffer; } -static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) +static RawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) { if (seqPool->bufferSize == 0) { return kNullRawSeqStore; @@ -301,13 +315,13 @@ static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) } #if ZSTD_RESIZE_SEQPOOL -static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) +static RawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, RawSeqStore_t seq) { return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq))); } #endif -static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) +static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, RawSeqStore_t seq) { ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); } @@ -319,7 +333,7 @@ static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq) static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem) { - ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem); if (seqPool == NULL) return NULL; ZSTDMT_setNbSeq(seqPool, 0); return seqPool; @@ -332,7 +346,7 @@ static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool) static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers) { - return ZSTDMT_expandBufferPool(pool, nbWorkers); + return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers)); } @@ -344,17 +358,21 @@ typedef struct { int totalCCtx; int availCCtx; ZSTD_customMem cMem; - ZSTD_CCtx* cctx[1]; /* variable size */ + ZSTD_CCtx** cctxs; } ZSTDMT_CCtxPool; -/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */ +/* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) { - int cid; - for (cid=0; cidtotalCCtx; cid++) - ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */ + if (!pool) return; ZSTD_pthread_mutex_destroy(&pool->poolMutex); - ZSTD_free(pool, pool->cMem); + if (pool->cctxs) { + int cid; + for (cid=0; cidtotalCCtx; cid++) + ZSTD_freeCCtx(pool->cctxs[cid]); /* free compatible with NULL */ + ZSTD_customFree(pool->cctxs, pool->cMem); + } + ZSTD_customFree(pool, pool->cMem); } /* ZSTDMT_createCCtxPool() : @@ -362,19 +380,24 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, ZSTD_customMem cMem) { - ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc( - sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem); + ZSTDMT_CCtxPool* const cctxPool = + (ZSTDMT_CCtxPool*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtxPool), cMem); assert(nbWorkers > 0); if (!cctxPool) return NULL; if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) { - ZSTD_free(cctxPool, cMem); + ZSTD_customFree(cctxPool, cMem); return NULL; } - cctxPool->cMem = cMem; cctxPool->totalCCtx = nbWorkers; + cctxPool->cctxs = (ZSTD_CCtx**)ZSTD_customCalloc(nbWorkers * sizeof(ZSTD_CCtx*), cMem); + if (!cctxPool->cctxs) { + ZSTDMT_freeCCtxPool(cctxPool); + return NULL; + } + cctxPool->cMem = cMem; + cctxPool->cctxs[0] = ZSTD_createCCtx_advanced(cMem); + if (!cctxPool->cctxs[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ - cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem); - if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers); return cctxPool; } @@ -396,16 +419,16 @@ static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) { ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); { unsigned const nbWorkers = cctxPool->totalCCtx; - size_t const poolSize = sizeof(*cctxPool) - + (nbWorkers-1) * sizeof(ZSTD_CCtx*); - unsigned u; + size_t const poolSize = sizeof(*cctxPool); + size_t const arraySize = cctxPool->totalCCtx * sizeof(ZSTD_CCtx*); size_t totalCCtxSize = 0; + unsigned u; for (u=0; ucctx[u]); + totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctxs[u]); } ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); assert(nbWorkers > 0); - return poolSize + totalCCtxSize; + return poolSize + arraySize + totalCCtxSize; } } @@ -415,7 +438,7 @@ static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); if (cctxPool->availCCtx) { cctxPool->availCCtx--; - { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx]; + { ZSTD_CCtx* const cctx = cctxPool->cctxs[cctxPool->availCCtx]; ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); return cctx; } } @@ -429,7 +452,7 @@ static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) if (cctx==NULL) return; /* compatibility with release on NULL */ ZSTD_pthread_mutex_lock(&pool->poolMutex); if (pool->availCCtx < pool->totalCCtx) - pool->cctx[pool->availCCtx++] = cctx; + pool->cctxs[pool->availCCtx++] = cctx; else { /* pool overflow : should not happen, since totalCCtx==nbWorkers */ DEBUGLOG(4, "CCtx pool overflow : free cctx"); @@ -443,7 +466,7 @@ static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) typedef struct { void const* start; size_t size; -} range_t; +} Range; typedef struct { /* All variables in the struct are protected by mutex. */ @@ -459,63 +482,83 @@ typedef struct { ZSTD_pthread_mutex_t ldmWindowMutex; ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */ ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */ -} serialState_t; +} SerialState; -static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize) +static int +ZSTDMT_serialState_reset(SerialState* serialState, + ZSTDMT_seqPool* seqPool, + ZSTD_CCtx_params params, + size_t jobSize, + const void* dict, size_t const dictSize, + ZSTD_dictContentType_e dictContentType) { /* Adjust parameters */ - if (params.ldmParams.enableLdm) { + if (params.ldmParams.enableLdm == ZSTD_ps_enable) { DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10); ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); assert(params.ldmParams.hashRateLog < 32); - serialState->ldmState.hashPower = - ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength); } else { - memset(¶ms.ldmParams, 0, sizeof(params.ldmParams)); + ZSTD_memset(¶ms.ldmParams, 0, sizeof(params.ldmParams)); } serialState->nextJobID = 0; if (params.fParams.checksumFlag) XXH64_reset(&serialState->xxhState, 0); - if (params.ldmParams.enableLdm) { + if (params.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_customMem cMem = params.customMem; unsigned const hashLog = params.ldmParams.hashLog; size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t); unsigned const bucketLog = params.ldmParams.hashLog - params.ldmParams.bucketSizeLog; - size_t const bucketSize = (size_t)1 << bucketLog; unsigned const prevBucketLog = serialState->params.ldmParams.hashLog - serialState->params.ldmParams.bucketSizeLog; + size_t const numBuckets = (size_t)1 << bucketLog; /* Size the seq pool tables */ ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize)); /* Reset the window */ - ZSTD_window_clear(&serialState->ldmState.window); - serialState->ldmWindow = serialState->ldmState.window; + ZSTD_window_init(&serialState->ldmState.window); /* Resize tables and output space if necessary. */ if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) { - ZSTD_free(serialState->ldmState.hashTable, cMem); - serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem); + ZSTD_customFree(serialState->ldmState.hashTable, cMem); + serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem); } if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) { - ZSTD_free(serialState->ldmState.bucketOffsets, cMem); - serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem); + ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); + serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem); } if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets) return 1; /* Zero the tables */ - memset(serialState->ldmState.hashTable, 0, hashSize); - memset(serialState->ldmState.bucketOffsets, 0, bucketSize); + ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize); + ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets); + + /* Update window state and fill hash table with dict */ + serialState->ldmState.loadedDictEnd = 0; + if (dictSize > 0) { + if (dictContentType == ZSTD_dct_rawContent) { + BYTE const* const dictEnd = (const BYTE*)dict + dictSize; + ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, /* forceNonContiguous */ 0); + ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, ¶ms.ldmParams); + serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base); + } else { + /* don't even load anything */ + } + } + + /* Initialize serialState's copy of ldmWindow. */ + serialState->ldmWindow = serialState->ldmState.window; } + serialState->params = params; serialState->params.jobSize = (U32)jobSize; return 0; } -static int ZSTDMT_serialState_init(serialState_t* serialState) +static int ZSTDMT_serialState_init(SerialState* serialState) { int initError = 0; - memset(serialState, 0, sizeof(*serialState)); + ZSTD_memset(serialState, 0, sizeof(*serialState)); initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL); initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL); initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL); @@ -523,20 +566,21 @@ static int ZSTDMT_serialState_init(serialState_t* serialState) return initError; } -static void ZSTDMT_serialState_free(serialState_t* serialState) +static void ZSTDMT_serialState_free(SerialState* serialState) { ZSTD_customMem cMem = serialState->params.customMem; ZSTD_pthread_mutex_destroy(&serialState->mutex); ZSTD_pthread_cond_destroy(&serialState->cond); ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex); ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond); - ZSTD_free(serialState->ldmState.hashTable, cMem); - ZSTD_free(serialState->ldmState.bucketOffsets, cMem); + ZSTD_customFree(serialState->ldmState.hashTable, cMem); + ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); } -static void ZSTDMT_serialState_update(serialState_t* serialState, - ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore, - range_t src, unsigned jobID) +static void +ZSTDMT_serialState_genSequences(SerialState* serialState, + RawSeqStore_t* seqStore, + Range src, unsigned jobID) { /* Wait for our turn */ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); @@ -547,14 +591,15 @@ static void ZSTDMT_serialState_update(serialState_t* serialState, /* A future job may error and skip our job */ if (serialState->nextJobID == jobID) { /* It is now our turn, do any processing necessary */ - if (serialState->params.ldmParams.enableLdm) { + if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) { size_t error; - assert(seqStore.seq != NULL && seqStore.pos == 0 && - seqStore.size == 0 && seqStore.capacity > 0); + DEBUGLOG(6, "ZSTDMT_serialState_genSequences: LDM update"); + assert(seqStore->seq != NULL && seqStore->pos == 0 && + seqStore->size == 0 && seqStore->capacity > 0); assert(src.size <= serialState->params.jobSize); - ZSTD_window_update(&serialState->ldmState.window, src.start, src.size); + ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0); error = ZSTD_ldm_generateSequences( - &serialState->ldmState, &seqStore, + &serialState->ldmState, seqStore, &serialState->params.ldmParams, src.start, src.size); /* We provide a large enough buffer to never fail. */ assert(!ZSTD_isError(error)); (void)error; @@ -573,17 +618,22 @@ static void ZSTDMT_serialState_update(serialState_t* serialState, serialState->nextJobID++; ZSTD_pthread_cond_broadcast(&serialState->cond); ZSTD_pthread_mutex_unlock(&serialState->mutex); +} - if (seqStore.size > 0) { - size_t const err = ZSTD_referenceExternalSequences( - jobCCtx, seqStore.seq, seqStore.size); - assert(serialState->params.ldmParams.enableLdm); - assert(!ZSTD_isError(err)); - (void)err; +static void +ZSTDMT_serialState_applySequences(const SerialState* serialState, /* just for an assert() check */ + ZSTD_CCtx* jobCCtx, + const RawSeqStore_t* seqStore) +{ + if (seqStore->size > 0) { + DEBUGLOG(5, "ZSTDMT_serialState_applySequences: uploading %u external sequences", (unsigned)seqStore->size); + assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable); (void)serialState; + assert(jobCCtx); + ZSTD_referenceExternalSequences(jobCCtx, seqStore->seq, seqStore->size); } } -static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState, +static void ZSTDMT_serialState_ensureFinished(SerialState* serialState, unsigned jobID, size_t cSize) { ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); @@ -607,36 +657,37 @@ static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState, /* ===== Worker thread ===== */ /* ------------------------------------------ */ -static const range_t kNullRange = { NULL, 0 }; +static const Range kNullRange = { NULL, 0 }; typedef struct { - size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ - size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ - ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */ - ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */ - ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */ - ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */ - ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */ - serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */ - buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ - range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */ - range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */ - unsigned jobID; /* set by mtctx, then read by worker => no barrier */ - unsigned firstJob; /* set by mtctx, then read by worker => no barrier */ - unsigned lastJob; /* set by mtctx, then read by worker => no barrier */ - ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */ - const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */ - unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */ - size_t dstFlushed; /* used only by mtctx */ - unsigned frameChecksumNeeded; /* used only by mtctx */ + size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ + size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ + ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */ + ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */ + ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */ + ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */ + ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */ + SerialState* serial; /* Thread-safe - used by mtctx and (all) workers */ + Buffer dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ + Range prefix; /* set by mtctx, then read by worker & mtctx => no barrier */ + Range src; /* set by mtctx, then read by worker & mtctx => no barrier */ + unsigned jobID; /* set by mtctx, then read by worker => no barrier */ + unsigned firstJob; /* set by mtctx, then read by worker => no barrier */ + unsigned lastJob; /* set by mtctx, then read by worker => no barrier */ + ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */ + const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */ + unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */ + size_t dstFlushed; /* used only by mtctx */ + unsigned frameChecksumNeeded; /* used only by mtctx */ } ZSTDMT_jobDescription; -#define JOB_ERROR(e) { \ - ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \ - job->cSize = e; \ - ZSTD_pthread_mutex_unlock(&job->job_mutex); \ - goto _endJob; \ -} +#define JOB_ERROR(e) \ + do { \ + ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \ + job->cSize = e; \ + ZSTD_pthread_mutex_unlock(&job->job_mutex); \ + goto _endJob; \ + } while (0) /* ZSTDMT_compressionJob() is a POOL_function type */ static void ZSTDMT_compressionJob(void* jobDescription) @@ -644,10 +695,11 @@ static void ZSTDMT_compressionJob(void* jobDescription) ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */ ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool); - rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); - buffer_t dstBuff = job->dstBuff; + RawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); + Buffer dstBuff = job->dstBuff; size_t lastCBlockSize = 0; + DEBUGLOG(5, "ZSTDMT_compressionJob: job %u", job->jobID); /* resources */ if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation)); if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */ @@ -655,7 +707,7 @@ static void ZSTDMT_compressionJob(void* jobDescription) if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation)); job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */ } - if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL) + if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL) JOB_ERROR(ERROR(memory_allocation)); /* Don't compute the checksum for chunks, since we compute it externally, @@ -663,38 +715,49 @@ static void ZSTDMT_compressionJob(void* jobDescription) */ if (job->jobID != 0) jobParams.fParams.checksumFlag = 0; /* Don't run LDM for the chunks, since we handle it externally */ - jobParams.ldmParams.enableLdm = 0; + jobParams.ldmParams.enableLdm = ZSTD_ps_disable; + /* Correct nbWorkers to 0. */ + jobParams.nbWorkers = 0; /* init */ + + /* Perform serial step as early as possible */ + ZSTDMT_serialState_genSequences(job->serial, &rawSeqStore, job->src, job->jobID); + if (job->cdict) { - size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize); + size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize); assert(job->firstJob); /* only allowed for first job */ if (ZSTD_isError(initError)) JOB_ERROR(initError); - } else { /* srcStart points at reloaded section */ + } else { U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size; { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob); if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError); } + if (!job->firstJob) { + size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0); + if (ZSTD_isError(err)) JOB_ERROR(err); + } + DEBUGLOG(6, "ZSTDMT_compressionJob: job %u: loading prefix of size %zu", job->jobID, job->prefix.size); { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, - job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */ + job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, ZSTD_dtlm_fast, NULL, /*cdict*/ - jobParams, pledgedSrcSize); + &jobParams, pledgedSrcSize); if (ZSTD_isError(initError)) JOB_ERROR(initError); } } - /* Perform serial step as early as possible, but after CCtx initialization */ - ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID); + /* External Sequences can only be applied after CCtx initialization */ + ZSTDMT_serialState_applySequences(job->serial, cctx, &rawSeqStore); if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */ - size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); + size_t const hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); if (ZSTD_isError(hSize)) JOB_ERROR(hSize); DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize); ZSTD_invalidateRepCodes(cctx); } - /* compress */ + /* compress the entire job by smaller chunks, for better granularity */ { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX; int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize); const BYTE* ip = (const BYTE*) job->src.start; @@ -706,7 +769,7 @@ static void ZSTDMT_compressionJob(void* jobDescription) DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks); assert(job->cSize == 0); for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { - size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize); + size_t const cSize = ZSTD_compressContinue_public(cctx, op, oend-op, ip, chunkSize); if (ZSTD_isError(cSize)) JOB_ERROR(cSize); ip += chunkSize; op += cSize; assert(op < oend); @@ -726,11 +789,18 @@ static void ZSTDMT_compressionJob(void* jobDescription) size_t const lastBlockSize1 = job->src.size & (chunkSize-1); size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1; size_t const cSize = (job->lastJob) ? - ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) : - ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize); + ZSTD_compressEnd_public(cctx, op, oend-op, ip, lastBlockSize) : + ZSTD_compressContinue_public(cctx, op, oend-op, ip, lastBlockSize); if (ZSTD_isError(cSize)) JOB_ERROR(cSize); lastCBlockSize = cSize; } } + if (!job->firstJob) { + /* Double check that we don't have an ext-dict, because then our + * repcode invalidation doesn't work. + */ + assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); + } + ZSTD_CCtx_trace(cctx, 0); _endJob: ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); @@ -755,10 +825,10 @@ static void ZSTDMT_compressionJob(void* jobDescription) /* ------------------------------------------ */ typedef struct { - range_t prefix; /* read-only non-owned prefix buffer */ - buffer_t buffer; + Range prefix; /* read-only non-owned prefix buffer */ + Buffer buffer; size_t filled; -} inBuff_t; +} InBuff_t; typedef struct { BYTE* buffer; /* The round input buffer. All jobs get references @@ -772,17 +842,26 @@ typedef struct { * the inBuff is sent to the worker thread. * pos <= capacity. */ -} roundBuff_t; +} RoundBuff_t; -static const roundBuff_t kNullRoundBuff = {NULL, 0, 0}; +static const RoundBuff_t kNullRoundBuff = {NULL, 0, 0}; #define RSYNC_LENGTH 32 +/* Don't create chunks smaller than the zstd block size. + * This stops us from regressing compression ratio too much, + * and ensures our output fits in ZSTD_compressBound(). + * + * If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then + * ZSTD_COMPRESSBOUND() will need to be updated. + */ +#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX +#define RSYNC_MIN_BLOCK_SIZE (1< one job is already prepared, but pool has shortage of workers. Don't create a new job. */ - inBuff_t inBuff; - roundBuff_t roundBuff; - serialState_t serial; - rsyncState_t rsync; - unsigned singleBlockingThread; + InBuff_t inBuff; + RoundBuff_t roundBuff; + SerialState serial; + RSyncState_t rsync; unsigned jobIDMask; unsigned doneJobID; unsigned nextJobID; @@ -810,6 +888,7 @@ struct ZSTDMT_CCtx_s { ZSTD_customMem cMem; ZSTD_CDict* cdictLocal; const ZSTD_CDict* cdict; + unsigned providedFactory: 1; }; static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem) @@ -820,7 +899,7 @@ static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZS ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex); ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond); } - ZSTD_free(jobTable, cMem); + ZSTD_customFree(jobTable, cMem); } /* ZSTDMT_allocJobsTable() @@ -832,7 +911,7 @@ static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_custom U32 const nbJobs = 1 << nbJobsLog2; U32 jobNb; ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*) - ZSTD_calloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem); + ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem); int initError = 0; if (jobTable==NULL) return NULL; *nbJobsPtr = nbJobs; @@ -863,12 +942,12 @@ static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) { /* ZSTDMT_CCtxParam_setNbWorkers(): * Internal use only */ -size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers) +static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers) { return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers); } -MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem) +MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool) { ZSTDMT_CCtx* mtctx; U32 nbJobs = nbWorkers + 2; @@ -881,16 +960,23 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, /* invalid custom allocator */ return NULL; - mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem); + mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem); if (!mtctx) return NULL; ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); mtctx->cMem = cMem; mtctx->allJobsCompleted = 1; - mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); + if (pool != NULL) { + mtctx->factory = pool; + mtctx->providedFactory = 1; + } + else { + mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); + mtctx->providedFactory = 0; + } mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */ mtctx->jobIDMask = nbJobs - 1; - mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem); mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem); mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); initError = ZSTDMT_serialState_init(&mtctx->serial); @@ -903,22 +989,18 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, return mtctx; } -ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem) +ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool) { #ifdef ZSTD_MULTITHREAD - return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem); + return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool); #else (void)nbWorkers; (void)cMem; + (void)pool; return NULL; #endif } -ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers) -{ - return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem); -} - /* ZSTDMT_releaseAllJobResources() : * note : ensure all workers are killed first ! */ @@ -926,13 +1008,21 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) { unsigned jobID; DEBUGLOG(3, "ZSTDMT_releaseAllJobResources"); - for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { - DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); - ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); - mtctx->jobs[jobID].dstBuff = g_nullBuffer; - mtctx->jobs[jobID].cSize = 0; + if (mtctx->jobs) { + for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { + /* Copy the mutex/cond out */ + ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex; + ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond; + + DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); + ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); + + /* Clear the job description, but keep the mutex/cond */ + ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID])); + mtctx->jobs[jobID].job_mutex = mutex; + mtctx->jobs[jobID].job_cond = cond; + } } - memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription)); mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; mtctx->allJobsCompleted = 1; @@ -956,7 +1046,8 @@ static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx) size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) { if (mtctx==NULL) return 0; /* compatible with free on NULL */ - POOL_free(mtctx->factory); /* stop and free worker threads */ + if (!mtctx->providedFactory) + POOL_free(mtctx->factory); /* stop and free worker threads */ ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */ ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); ZSTDMT_freeBufferPool(mtctx->bufPool); @@ -965,8 +1056,8 @@ size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) ZSTDMT_serialState_free(&mtctx->serial); ZSTD_freeCDict(mtctx->cdictLocal); if (mtctx->roundBuff.buffer) - ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem); - ZSTD_free(mtctx, mtctx->cMem); + ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); + ZSTD_customFree(mtctx, mtctx->cMem); return 0; } @@ -983,73 +1074,14 @@ size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx) + mtctx->roundBuff.capacity; } -/* Internal only */ -size_t -ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, - ZSTDMT_parameter parameter, - int value) -{ - DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter"); - switch(parameter) - { - case ZSTDMT_p_jobSize : - DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value); - return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value); - case ZSTDMT_p_overlapLog : - DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value); - return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value); - case ZSTDMT_p_rsyncable : - DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value); - return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value); - default : - return ERROR(parameter_unsupported); - } -} - -size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value) -{ - DEBUGLOG(4, "ZSTDMT_setMTCtxParameter"); - return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value); -} - -size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value) -{ - switch (parameter) { - case ZSTDMT_p_jobSize: - return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value); - case ZSTDMT_p_overlapLog: - return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value); - case ZSTDMT_p_rsyncable: - return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value); - default: - return ERROR(parameter_unsupported); - } -} - -/* Sets parameters relevant to the compression job, - * initializing others to default values. */ -static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params) -{ - ZSTD_CCtx_params jobParams = params; - /* Clear parameters related to multithreading */ - jobParams.forceWindow = 0; - jobParams.nbWorkers = 0; - jobParams.jobSize = 0; - jobParams.overlapLog = 0; - jobParams.rsyncable = 0; - memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t)); - memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem)); - return jobParams; -} - /* ZSTDMT_resize() : * @return : error code if fails, 0 on success */ static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers) { if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation); - FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) ); - mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers); + FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , ""); + mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, BUF_POOL_MAX_NB_BUFFERS(nbWorkers)); if (mtctx->bufPool == NULL) return ERROR(memory_allocation); mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers); if (mtctx->cctxPool == NULL) return ERROR(memory_allocation); @@ -1070,7 +1102,7 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)", compressionLevel); mtctx->params.compressionLevel = compressionLevel; - { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, 0, 0); + { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); cParams.windowLog = saved_wlog; mtctx->params.cParams = cParams; } @@ -1092,7 +1124,7 @@ ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx) { unsigned jobNb; unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1); DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)", - mtctx->doneJobID, lastJobNb, mtctx->jobReady) + mtctx->doneJobID, lastJobNb, mtctx->jobReady); for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) { unsigned const wJobID = jobNb & mtctx->jobIDMask; ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; @@ -1129,9 +1161,14 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx) size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; assert(flushed <= produced); + assert(jobPtr->consumed <= jobPtr->src.size); toFlush = produced - flushed; - if (toFlush==0 && (jobPtr->consumed >= jobPtr->src.size)) { - /* doneJobID is not-fully-flushed, but toFlush==0 : doneJobID should be compressing some more data */ + /* if toFlush==0, nothing is available to flush. + * However, jobID is expected to still be active: + * if jobID was already completed and fully flushed, + * ZSTDMT_flushProduced() should have already moved onto next job. + * Therefore, some input has not yet been consumed. */ + if (toFlush==0) { assert(jobPtr->consumed < jobPtr->src.size); } } @@ -1146,14 +1183,18 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx) /* ===== Multi-threaded compression ===== */ /* ------------------------------------------ */ -static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params) +static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params) { - if (params.ldmParams.enableLdm) + unsigned jobLog; + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* In Long Range Mode, the windowLog is typically oversized. * In which case, it's preferable to determine the jobSize - * based on chainLog instead. */ - return MAX(21, params.cParams.chainLog + 4); - return MAX(20, params.cParams.windowLog + 2); + * based on cycleLog instead. */ + jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3); + } else { + jobLog = MAX(20, params->cParams.windowLog + 2); + } + return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX); } static int ZSTDMT_overlapLog_default(ZSTD_strategy strat) @@ -1184,191 +1225,25 @@ static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) return ovlog; } -static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params) +static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params) { - int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy); - int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog); + int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy); + int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog); assert(0 <= overlapRLog && overlapRLog <= 8); - if (params.ldmParams.enableLdm) { + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* In Long Range Mode, the windowLog is typically oversized. * In which case, it's preferable to determine the jobSize * based on chainLog instead. * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */ - ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) + ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) - overlapRLog; } - assert(0 <= ovLog && ovLog <= 30); - DEBUGLOG(4, "overlapLog : %i", params.overlapLog); + assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX); + DEBUGLOG(4, "overlapLog : %i", params->overlapLog); DEBUGLOG(4, "overlap size : %i", 1 << ovLog); return (ovLog==0) ? 0 : (size_t)1 << ovLog; } -static unsigned -ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers) -{ - assert(nbWorkers>0); - { size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params); - size_t const jobMaxSize = jobSizeTarget << 2; - size_t const passSizeMax = jobMaxSize * nbWorkers; - unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1; - unsigned const nbJobsLarge = multiplier * nbWorkers; - unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1; - unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers); - return (multiplier>1) ? nbJobsLarge : nbJobsSmall; -} } - -/* ZSTDMT_compress_advanced_internal() : - * This is a blocking function : it will only give back control to caller after finishing its compression job. - */ -static size_t ZSTDMT_compress_advanced_internal( - ZSTDMT_CCtx* mtctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict, - ZSTD_CCtx_params params) -{ - ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params); - size_t const overlapSize = ZSTDMT_computeOverlapSize(params); - unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers); - size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs; - size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */ - const char* const srcStart = (const char*)src; - size_t remainingSrcSize = srcSize; - unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize)); /* presumes avgJobSize >= 256 KB, which should be the case */ - size_t frameStartPos = 0, dstBufferPos = 0; - assert(jobParams.nbWorkers == 0); - assert(mtctx->cctxPool->totalCCtx == params.nbWorkers); - - params.jobSize = (U32)avgJobSize; - DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ", - nbJobs, (U32)proposedJobSize, (U32)avgJobSize); - - if ((nbJobs==1) | (params.nbWorkers<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */ - ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0]; - DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode"); - if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams); - return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams); - } - - assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */ - ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) ); - if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize)) - return ERROR(memory_allocation); - - FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */ - - { unsigned u; - for (u=0; ujobs[u].prefix.start = srcStart + frameStartPos - dictSize; - mtctx->jobs[u].prefix.size = dictSize; - mtctx->jobs[u].src.start = srcStart + frameStartPos; - mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0); /* avoid job.src.size == 0 */ - mtctx->jobs[u].consumed = 0; - mtctx->jobs[u].cSize = 0; - mtctx->jobs[u].cdict = (u==0) ? cdict : NULL; - mtctx->jobs[u].fullFrameSize = srcSize; - mtctx->jobs[u].params = jobParams; - /* do not calculate checksum within sections, but write it in header for first section */ - mtctx->jobs[u].dstBuff = dstBuffer; - mtctx->jobs[u].cctxPool = mtctx->cctxPool; - mtctx->jobs[u].bufPool = mtctx->bufPool; - mtctx->jobs[u].seqPool = mtctx->seqPool; - mtctx->jobs[u].serial = &mtctx->serial; - mtctx->jobs[u].jobID = u; - mtctx->jobs[u].firstJob = (u==0); - mtctx->jobs[u].lastJob = (u==nbJobs-1); - - DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)jobSize); - DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12); - POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]); - - frameStartPos += jobSize; - dstBufferPos += dstBufferCapacity; - remainingSrcSize -= jobSize; - } } - - /* collect result */ - { size_t error = 0, dstPos = 0; - unsigned jobID; - for (jobID=0; jobIDjobs[jobID].job_mutex); - while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { - DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID); - ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex); - } - ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex); - DEBUGLOG(5, "ready to write job %u ", jobID); - - { size_t const cSize = mtctx->jobs[jobID].cSize; - if (ZSTD_isError(cSize)) error = cSize; - if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall); - if (jobID) { /* note : job 0 is written directly at dst, which is correct position */ - if (!error) - memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize); /* may overlap when job compressed within dst */ - if (jobID >= compressWithinDst) { /* job compressed into its own buffer, which must be released */ - DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst); - ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); - } } - mtctx->jobs[jobID].dstBuff = g_nullBuffer; - mtctx->jobs[jobID].cSize = 0; - dstPos += cSize ; - } - } /* for (jobID=0; jobIDserial.xxhState); - if (dstPos + 4 > dstCapacity) { - error = ERROR(dstSize_tooSmall); - } else { - DEBUGLOG(4, "writing checksum : %08X \n", checksum); - MEM_writeLE32((char*)dst + dstPos, checksum); - dstPos += 4; - } } - - if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos); - return error ? error : dstPos; - } -} - -size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict, - ZSTD_parameters params, - int overlapLog) -{ - ZSTD_CCtx_params cctxParams = mtctx->params; - cctxParams.cParams = params.cParams; - cctxParams.fParams = params.fParams; - assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX); - cctxParams.overlapLog = overlapLog; - return ZSTDMT_compress_advanced_internal(mtctx, - dst, dstCapacity, - src, srcSize, - cdict, cctxParams); -} - - -size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - int compressionLevel) -{ - ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0); - int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy); - params.fParams.contentSizeFlag = 1; - return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog); -} - - /* ====================================== */ /* ======= Streaming API ======= */ /* ====================================== */ @@ -1388,55 +1263,45 @@ size_t ZSTDMT_initCStream_internal( /* init */ if (params.nbWorkers != mtctx->params.nbWorkers) - FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) ); + FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, (unsigned)params.nbWorkers) , ""); if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN; - if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX; - - mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */ - if (mtctx->singleBlockingThread) { - ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params); - DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode"); - assert(singleThreadParams.nbWorkers == 0); - return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0], - dict, dictSize, cdict, - singleThreadParams, pledgedSrcSize); - } - - DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers); + if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX; if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */ ZSTDMT_waitForAllJobsCompleted(mtctx); - ZSTDMT_releaseAllJobResources(mtctx); - mtctx->allJobsCompleted = 1; + ZSTDMT_releaseAllJobResources(mtctx); /* Will set allJobsCompleted to 1 */ } mtctx->params = params; mtctx->frameContentSize = pledgedSrcSize; + ZSTD_freeCDict(mtctx->cdictLocal); if (dict) { - ZSTD_freeCDict(mtctx->cdictLocal); mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */ params.cParams, mtctx->cMem); mtctx->cdict = mtctx->cdictLocal; if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); } else { - ZSTD_freeCDict(mtctx->cdictLocal); mtctx->cdictLocal = NULL; mtctx->cdict = cdict; } - mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params); + mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(¶ms); DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10)); mtctx->targetSectionSize = params.jobSize; if (mtctx->targetSectionSize == 0) { - mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params); + mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(¶ms); } + assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX); + if (params.rsyncable) { /* Aim for the targetsectionSize as the average job size. */ - U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20); - U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20; - assert(jobSizeMB >= 1); + U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10); + U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10); + /* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our + * expected job size is at least 4x larger. */ + assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2); DEBUGLOG(4, "rsyncLog = %u", rsyncBits); mtctx->rsync.hash = 0; mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1; @@ -1448,7 +1313,7 @@ size_t ZSTDMT_initCStream_internal( ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); { /* If ldm is enabled we need windowSize space. */ - size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0; + size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0; /* Two buffers of slack, plus extra space for the overlap * This is the minimum slack that LDM works with. One extra because * flush might waste up to targetSectionSize-1 bytes. Another extra @@ -1463,8 +1328,8 @@ size_t ZSTDMT_initCStream_internal( size_t const capacity = MAX(windowSize, sectionsSize) + slackSize; if (mtctx->roundBuff.capacity < capacity) { if (mtctx->roundBuff.buffer) - ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem); - mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem); + ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); + mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem); if (mtctx->roundBuff.buffer == NULL) { mtctx->roundBuff.capacity = 0; return ERROR(memory_allocation); @@ -1483,56 +1348,33 @@ size_t ZSTDMT_initCStream_internal( mtctx->allJobsCompleted = 0; mtctx->consumed = 0; mtctx->produced = 0; - if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize)) - return ERROR(memory_allocation); - return 0; -} -size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, - const void* dict, size_t dictSize, - ZSTD_parameters params, - unsigned long long pledgedSrcSize) -{ - ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */ - DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize); - cctxParams.cParams = params.cParams; - cctxParams.fParams = params.fParams; - return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL, - cctxParams, pledgedSrcSize); -} - -size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx, - const ZSTD_CDict* cdict, - ZSTD_frameParameters fParams, - unsigned long long pledgedSrcSize) -{ - ZSTD_CCtx_params cctxParams = mtctx->params; - if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */ - cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict); - cctxParams.fParams = fParams; - return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict, - cctxParams, pledgedSrcSize); -} + /* update dictionary */ + ZSTD_freeCDict(mtctx->cdictLocal); + mtctx->cdictLocal = NULL; + mtctx->cdict = NULL; + if (dict) { + if (dictContentType == ZSTD_dct_rawContent) { + mtctx->inBuff.prefix.start = (const BYTE*)dict; + mtctx->inBuff.prefix.size = dictSize; + } else { + /* note : a loadPrefix becomes an internal CDict */ + mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, + ZSTD_dlm_byRef, dictContentType, + params.cParams, mtctx->cMem); + mtctx->cdict = mtctx->cdictLocal; + if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); + } + } else { + mtctx->cdict = cdict; + } + if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize, + dict, dictSize, dictContentType)) + return ERROR(memory_allocation); -/* ZSTDMT_resetCStream() : - * pledgedSrcSize can be zero == unknown (for the time being) - * prefer using ZSTD_CONTENTSIZE_UNKNOWN, - * as `0` might mean "empty" in the future */ -size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize) -{ - if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; - return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params, - pledgedSrcSize); -} -size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) { - ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0); - ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */ - DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel); - cctxParams.cParams = params.cParams; - cctxParams.fParams = params.fParams; - return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN); + return 0; } @@ -1598,7 +1440,7 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZS mtctx->roundBuff.pos += srcSize; mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; - /* Set the prefix */ + /* Set the prefix for next job */ if (!endFrame) { size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize); mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; @@ -1697,9 +1539,11 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u assert(mtctx->doneJobID < mtctx->nextJobID); assert(cSize >= mtctx->jobs[wJobID].dstFlushed); assert(mtctx->jobs[wJobID].dstBuff.start != NULL); - memcpy((char*)output->dst + output->pos, - (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, - toFlush); + if (toFlush > 0) { + ZSTD_memcpy((char*)output->dst + output->pos, + (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, + toFlush); + } output->pos += toFlush; mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */ @@ -1733,12 +1577,17 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u * If the data of the first job is broken up into two segments, we cover both * sections. */ -static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) +static Range ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) { unsigned const firstJobID = mtctx->doneJobID; unsigned const lastJobID = mtctx->nextJobID; unsigned jobID; + /* no need to check during first round */ + size_t roundBuffCapacity = mtctx->roundBuff.capacity; + size_t nbJobs1stRoundMin = roundBuffCapacity / mtctx->targetSectionSize; + if (lastJobID < nbJobs1stRoundMin) return kNullRange; + for (jobID = firstJobID; jobID < lastJobID; ++jobID) { unsigned const wJobID = jobID & mtctx->jobIDMask; size_t consumed; @@ -1748,7 +1597,7 @@ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); if (consumed < mtctx->jobs[wJobID].src.size) { - range_t range = mtctx->jobs[wJobID].prefix; + Range range = mtctx->jobs[wJobID].prefix; if (range.size == 0) { /* Empty prefix */ range = mtctx->jobs[wJobID].src; @@ -1764,26 +1613,30 @@ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) /** * Returns non-zero iff buffer and range overlap. */ -static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range) +static int ZSTDMT_isOverlapped(Buffer buffer, Range range) { BYTE const* const bufferStart = (BYTE const*)buffer.start; - BYTE const* const bufferEnd = bufferStart + buffer.capacity; BYTE const* const rangeStart = (BYTE const*)range.start; - BYTE const* const rangeEnd = rangeStart + range.size; if (rangeStart == NULL || bufferStart == NULL) return 0; - /* Empty ranges cannot overlap */ - if (bufferStart == bufferEnd || rangeStart == rangeEnd) - return 0; - return bufferStart < rangeEnd && rangeStart < bufferEnd; + { + BYTE const* const bufferEnd = bufferStart + buffer.capacity; + BYTE const* const rangeEnd = rangeStart + range.size; + + /* Empty ranges cannot overlap */ + if (bufferStart == bufferEnd || rangeStart == rangeEnd) + return 0; + + return bufferStart < rangeEnd && rangeStart < bufferEnd; + } } -static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) +static int ZSTDMT_doesOverlapWindow(Buffer buffer, ZSTD_window_t window) { - range_t extDict; - range_t prefix; + Range extDict; + Range prefix; DEBUGLOG(5, "ZSTDMT_doesOverlapWindow"); extDict.start = window.dictBase + window.lowLimit; @@ -1802,9 +1655,9 @@ static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) || ZSTDMT_isOverlapped(buffer, prefix); } -static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) +static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, Buffer buffer) { - if (mtctx->params.ldmParams.enableLdm) { + if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex; DEBUGLOG(5, "ZSTDMT_waitForLdmComplete"); DEBUGLOG(5, "source [0x%zx, 0x%zx)", @@ -1827,16 +1680,16 @@ static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) */ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) { - range_t const inUse = ZSTDMT_getInputDataInUse(mtctx); + Range const inUse = ZSTDMT_getInputDataInUse(mtctx); size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; - size_t const target = mtctx->targetSectionSize; - buffer_t buffer; + size_t const spaceNeeded = mtctx->targetSectionSize; + Buffer buffer; DEBUGLOG(5, "ZSTDMT_tryGetInputRange"); assert(mtctx->inBuff.buffer.start == NULL); - assert(mtctx->roundBuff.capacity >= target); + assert(mtctx->roundBuff.capacity >= spaceNeeded); - if (spaceLeft < target) { + if (spaceLeft < spaceNeeded) { /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. * Simply copy the prefix to the beginning in that case. */ @@ -1850,12 +1703,12 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) return 0; } ZSTDMT_waitForLdmComplete(mtctx, buffer); - memmove(start, mtctx->inBuff.prefix.start, prefixSize); + ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize); mtctx->inBuff.prefix.start = start; mtctx->roundBuff.pos = prefixSize; } buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; - buffer.capacity = target; + buffer.capacity = spaceNeeded; if (ZSTDMT_isOverlapped(buffer, inUse)) { DEBUGLOG(5, "Waiting for buffer..."); @@ -1882,7 +1735,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) typedef struct { size_t toLoad; /* The number of bytes to load from the input. */ int flush; /* Boolean declaring if we must flush because we found a synchronization point. */ -} syncPoint_t; +} SyncPoint; /** * Searches through the input for a synchronization point. If one is found, we @@ -1890,14 +1743,14 @@ typedef struct { * Otherwise, we will load as many bytes as possible and instruct the caller * to continue as normal. */ -static syncPoint_t +static SyncPoint findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) { BYTE const* const istart = (BYTE const*)input.src + input.pos; U64 const primePower = mtctx->rsync.primePower; U64 const hitMask = mtctx->rsync.hitMask; - syncPoint_t syncPoint; + SyncPoint syncPoint; U64 hash; BYTE const* prev; size_t pos; @@ -1907,6 +1760,11 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) if (!mtctx->params.rsyncable) /* Rsync is disabled. */ return syncPoint; + if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE) + /* We don't emit synchronization points if it would produce too small blocks. + * We don't have enough input to find a synchronization point, so don't look. + */ + return syncPoint; if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH) /* Not enough to compute the hash. * We will miss any synchronization points in this RSYNC_LENGTH byte @@ -1917,23 +1775,41 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) */ return syncPoint; /* Initialize the loop variables. */ - if (mtctx->inBuff.filled >= RSYNC_LENGTH) { - /* We have enough bytes buffered to initialize the hash. + if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) { + /* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions + * because they can't possibly be a sync point. So we can start + * part way through the input buffer. + */ + pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled; + if (pos >= RSYNC_LENGTH) { + prev = istart + pos - RSYNC_LENGTH; + hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); + } else { + assert(mtctx->inBuff.filled >= RSYNC_LENGTH); + prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; + hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos)); + hash = ZSTD_rollingHash_append(hash, istart, pos); + } + } else { + /* We have enough bytes buffered to initialize the hash, + * and have processed enough bytes to find a sync point. * Start scanning at the beginning of the input. */ + assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE); + assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH); pos = 0; prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); - } else { - /* We don't have enough bytes buffered to initialize the hash, but - * we know we have at least RSYNC_LENGTH bytes total. - * Start scanning after the first RSYNC_LENGTH bytes less the bytes - * already buffered. - */ - pos = RSYNC_LENGTH - mtctx->inBuff.filled; - prev = (BYTE const*)mtctx->inBuff.buffer.start - pos; - hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled); - hash = ZSTD_rollingHash_append(hash, istart, pos); + if ((hash & hitMask) == hitMask) { + /* We're already at a sync point so don't load any more until + * we're able to flush this sync point. + * This likely happened because the job table was full so we + * couldn't add our job. + */ + syncPoint.toLoad = 0; + syncPoint.flush = 1; + return syncPoint; + } } /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll * through the input. If we hit a synchronization point, then cut the @@ -1943,16 +1819,24 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) * then a block will be emitted anyways, but this is okay, since if we * are already synchronized we will remain synchronized. */ + assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); for (; pos < syncPoint.toLoad; ++pos) { BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; - /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */ + /* This assert is very expensive, and Debian compiles with asserts enabled. + * So disable it for now. We can get similar coverage by checking it at the + * beginning & end of the loop. + * assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); + */ hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); + assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE); if ((hash & hitMask) == hitMask) { syncPoint.toLoad = pos + 1; syncPoint.flush = 1; + ++pos; /* for assert */ break; } } + assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); return syncPoint; } @@ -1978,34 +1862,11 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, assert(output->pos <= output->size); assert(input->pos <= input->size); - if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */ - return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp); - } - if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) { /* current frame being ended. Only flush/end are allowed */ return ERROR(stage_wrong); } - /* single-pass shortcut (note : synchronous-mode) */ - if ( (!mtctx->params.rsyncable) /* rsyncable mode is disabled */ - && (mtctx->nextJobID == 0) /* just started */ - && (mtctx->inBuff.filled == 0) /* nothing buffered */ - && (!mtctx->jobReady) /* no job already created */ - && (endOp == ZSTD_e_end) /* end order */ - && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */ - size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx, - (char*)output->dst + output->pos, output->size - output->pos, - (const char*)input->src + input->pos, input->size - input->pos, - mtctx->cdict, mtctx->params); - if (ZSTD_isError(cSize)) return cSize; - input->pos = input->size; - output->pos += cSize; - mtctx->allJobsCompleted = 1; - mtctx->frameEnded = 1; - return 0; - } - /* fill input buffer */ if ( (!mtctx->jobReady) && (input->size > input->pos) ) { /* support NULL input */ @@ -2021,20 +1882,28 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start); } if (mtctx->inBuff.buffer.start != NULL) { - syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input); + SyncPoint const syncPoint = findSynchronizationPoint(mtctx, *input); if (syncPoint.flush && endOp == ZSTD_e_continue) { endOp = ZSTD_e_flush; } assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u", (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize); - memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad); + ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad); input->pos += syncPoint.toLoad; mtctx->inBuff.filled += syncPoint.toLoad; forwardInputProgress = syncPoint.toLoad>0; } - if ((input->pos < input->size) && (endOp == ZSTD_e_end)) - endOp = ZSTD_e_flush; /* can't end now : not all input consumed */ + } + if ((input->pos < input->size) && (endOp == ZSTD_e_end)) { + /* Can't end yet because the input is not fully consumed. + * We are in one of these cases: + * - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job. + * - We filled the input buffer: flush this job but don't end the frame. + * - We hit a synchronization point: flush this job but don't end the frame. + */ + assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable); + endOp = ZSTD_e_flush; } if ( (mtctx->jobReady) @@ -2043,7 +1912,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */ size_t const jobSize = mtctx->inBuff.filled; assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); - FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) ); + FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , ""); } /* check for potential compressed data ready to be flushed */ @@ -2053,47 +1922,3 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, return remainingToFlush; } } - - -size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input) -{ - FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) ); - - /* recommended next input size : fill current input buffer */ - return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */ -} - - -static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame) -{ - size_t const srcSize = mtctx->inBuff.filled; - DEBUGLOG(5, "ZSTDMT_flushStream_internal"); - - if ( mtctx->jobReady /* one job ready for a worker to pick up */ - || (srcSize > 0) /* still some data within input buffer */ - || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */ - DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)", - (U32)srcSize, (U32)endFrame); - FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) ); - } - - /* check if there is any data available to flush */ - return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame); -} - - -size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output) -{ - DEBUGLOG(5, "ZSTDMT_flushStream"); - if (mtctx->singleBlockingThread) - return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output); - return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush); -} - -size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output) -{ - DEBUGLOG(4, "ZSTDMT_endStream"); - if (mtctx->singleBlockingThread) - return ZSTD_endStream(mtctx->cctxPool->cctx[0], output); - return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end); -} diff --git a/lib/compress/zstdmt_compress.h b/lib/compress/zstdmt_compress.h index 12e6bcb3a33..91b489b9cb4 100644 --- a/lib/compress/zstdmt_compress.h +++ b/lib/compress/zstdmt_compress.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,120 +11,62 @@ #ifndef ZSTDMT_COMPRESS_H #define ZSTDMT_COMPRESS_H - #if defined (__cplusplus) - extern "C" { - #endif - +/* === Dependencies === */ +#include "../common/zstd_deps.h" /* size_t */ +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ +#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */ /* Note : This is an internal API. * These APIs used to be exposed with ZSTDLIB_API, * because it used to be the only way to invoke MT compression. - * Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2() - * instead. - * - * If you depend on these APIs and can't switch, then define - * ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library. - * However, we may completely remove these functions in a future - * release, so please switch soon. + * Now, you must use ZSTD_compress2 and ZSTD_compressStream2() instead. * * This API requires ZSTD_MULTITHREAD to be defined during compilation, * otherwise ZSTDMT_createCCtx*() will fail. */ -#ifdef ZSTD_LEGACY_MULTITHREADED_API -# define ZSTDMT_API ZSTDLIB_API -#else -# define ZSTDMT_API -#endif - -/* === Dependencies === */ -#include /* size_t */ -#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ -#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */ - - /* === Constants === */ -#ifndef ZSTDMT_NBWORKERS_MAX -# define ZSTDMT_NBWORKERS_MAX 200 +#ifndef ZSTDMT_NBWORKERS_MAX /* a different value can be selected at compile time */ +# define ZSTDMT_NBWORKERS_MAX ((sizeof(void*)==4) /*32-bit*/ ? 64 : 256) #endif -#ifndef ZSTDMT_JOBSIZE_MIN -# define ZSTDMT_JOBSIZE_MIN (1 MB) +#ifndef ZSTDMT_JOBSIZE_MIN /* a different value can be selected at compile time */ +# define ZSTDMT_JOBSIZE_MIN (512 KB) #endif +#define ZSTDMT_JOBLOG_MAX (MEM_32bits() ? 29 : 30) #define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB)) +/* ======================================================== + * === Private interface, for use by ZSTD_compress.c === + * === Not exposed in libzstd. Never invoke directly === + * ======================================================== */ + /* === Memory management === */ typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx; /* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ -ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers); -/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ -ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, - ZSTD_customMem cMem); -ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx); - -ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx); - - -/* === Simple one-pass compression function === */ - -ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - int compressionLevel); - +ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, + ZSTD_customMem cMem, + ZSTD_threadPool *pool); +size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx); +size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx); /* === Streaming functions === */ -ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel); -ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */ - -ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx); -ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input); - -ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ -ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ - - -/* === Advanced functions and parameters === */ - -ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict, - ZSTD_parameters params, - int overlapLog); - -ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, - const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */ - ZSTD_parameters params, - unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */ - -ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx, - const ZSTD_CDict* cdict, - ZSTD_frameParameters fparams, - unsigned long long pledgedSrcSize); /* note : zero means empty */ - -/* ZSTDMT_parameter : - * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */ -typedef enum { - ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */ - ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */ - ZSTDMT_p_rsyncable /* Enables rsyncable mode. */ -} ZSTDMT_parameter; - -/* ZSTDMT_setMTCtxParameter() : - * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter. - * The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__ - * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions. - * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ -ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value); - -/* ZSTDMT_getMTCtxParameter() : - * Query the ZSTDMT_CCtx for a parameter value. - * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ -ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value); +size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx); +/*! ZSTDMT_initCStream_internal() : + * Private use only. Init streaming operation. + * expects params to be valid. + * must receive dict, or cdict, or none, but not both. + * mtctx can be freshly constructed or reused from a prior compression. + * If mtctx is reused, memory allocations from the prior compression may not be freed, + * even if they are not needed for the current compression. + * @return : 0, or an error code */ +size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* mtctx, + const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); /*! ZSTDMT_compressStream_generic() : * Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream() @@ -133,16 +75,10 @@ ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter * 0 if fully flushed * or an error code * note : needs to be init using any ZSTD_initCStream*() variant */ -ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, - ZSTD_outBuffer* output, - ZSTD_inBuffer* input, - ZSTD_EndDirective endOp); - - -/* ======================================================== - * === Private interface, for use by ZSTD_compress.c === - * === Not exposed in libzstd. Never invoke directly === - * ======================================================== */ +size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective endOp); /*! ZSTDMT_toFlushNow() * Tell how many bytes are ready to be flushed immediately. @@ -152,15 +88,6 @@ ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, * therefore flushing is limited by speed of oldest job. */ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx); -/*! ZSTDMT_CCtxParam_setMTCtxParameter() - * like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */ -size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value); - -/*! ZSTDMT_CCtxParam_setNbWorkers() - * Set nbWorkers, and clamp it. - * Also reset jobSize and overlapLog */ -size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers); - /*! ZSTDMT_updateCParams_whileCompressing() : * Updates only a selected set of compression parameters, to remain compatible with current frame. * New parameters will be applied to next compression job. */ @@ -172,20 +99,4 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p */ ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx); - -/*! ZSTDMT_initCStream_internal() : - * Private use only. Init streaming operation. - * expects params to be valid. - * must receive dict, or cdict, or none, but not both. - * @return : 0, or an error code */ -size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs, - const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, - const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); - - -#if defined (__cplusplus) -} -#endif - #endif /* ZSTDMT_COMPRESS_H */ diff --git a/lib/decompress/huf_decompress.c b/lib/decompress/huf_decompress.c index 3f8bd297320..92020617824 100644 --- a/lib/decompress/huf_decompress.c +++ b/lib/decompress/huf_decompress.c @@ -1,52 +1,46 @@ /* ****************************************************************** - huff0 huffman decoder, - part of Finite State Entropy library - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + * huff0 huffman decoder, + * part of Finite State Entropy library + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * You can contact the author at : + * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Dependencies ****************************************************************/ -#include /* memcpy, memset */ -#include "compiler.h" -#include "bitstream.h" /* BIT_* */ -#include "fse.h" /* to compress headers */ -#define HUF_STATIC_LINKING_ONLY -#include "huf.h" -#include "error_private.h" +#include /* size_t */ +#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */ +#include "../common/compiler.h" +#include "../common/bitstream.h" /* BIT_* */ +#include "../common/fse.h" /* to compress headers */ +#include "../common/huf.h" +#include "../common/error_private.h" +#include "../common/zstd_internal.h" +#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_countTrailingZeros64 */ + +/* ************************************************************** +* Constants +****************************************************************/ + +#define HUF_DECODER_FAST_TABLELOG 11 /* ************************************************************** * Macros ****************************************************************/ +#ifdef HUF_DISABLE_FAST_DECODE +# define HUF_ENABLE_FAST_DECODE 0 +#else +# define HUF_ENABLE_FAST_DECODE 1 +#endif + /* These two optional macros force the use one way or another of the two * Huffman decompression implementations. You can't force in both directions * at the same time. @@ -56,12 +50,33 @@ #error "Cannot force the use of the X1 and X2 decoders at the same time!" #endif +/* When DYNAMIC_BMI2 is enabled, fast decoders are only called when bmi2 is + * supported at runtime, so we can add the BMI2 target attribute. + * When it is disabled, we will still get BMI2 if it is enabled statically. + */ +#if DYNAMIC_BMI2 +# define HUF_FAST_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE +#else +# define HUF_FAST_BMI2_ATTRS +#endif + +#ifdef __cplusplus +# define HUF_EXTERN_C extern "C" +#else +# define HUF_EXTERN_C +#endif +#define HUF_ASM_DECL HUF_EXTERN_C + +#if DYNAMIC_BMI2 +# define HUF_NEED_BMI2_FUNCTION 1 +#else +# define HUF_NEED_BMI2_FUNCTION 0 +#endif /* ************************************************************** * Error Management ****************************************************************/ #define HUF_isError ERR_isError -#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; } /* ************************************************************** @@ -74,6 +89,11 @@ /* ************************************************************** * BMI2 Variant Wrappers ****************************************************************/ +typedef size_t (*HUF_DecompressUsingDTableFn)(void *dst, size_t dstSize, + const void *cSrc, + size_t cSrcSize, + const HUF_DTable *DTable); + #if DYNAMIC_BMI2 #define HUF_DGEN(fn) \ @@ -86,7 +106,7 @@ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ \ - static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \ + static BMI2_TARGET_ATTRIBUTE size_t fn##_bmi2( \ void* dst, size_t dstSize, \ const void* cSrc, size_t cSrcSize, \ const HUF_DTable* DTable) \ @@ -95,9 +115,9 @@ } \ \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + size_t cSrcSize, HUF_DTable const* DTable, int flags) \ { \ - if (bmi2) { \ + if (flags & HUF_flags_bmi2) { \ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ @@ -107,9 +127,9 @@ #define HUF_DGEN(fn) \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + size_t cSrcSize, HUF_DTable const* DTable, int flags) \ { \ - (void)bmi2; \ + (void)flags; \ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } @@ -124,82 +144,381 @@ typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) { DTableDesc dtd; - memcpy(&dtd, table, sizeof(dtd)); + ZSTD_memcpy(&dtd, table, sizeof(dtd)); return dtd; } +static size_t HUF_initFastDStream(BYTE const* ip) { + BYTE const lastByte = ip[7]; + size_t const bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; + size_t const value = MEM_readLEST(ip) | 1; + assert(bitsConsumed <= 8); + assert(sizeof(size_t) == 8); + return value << bitsConsumed; +} + + +/** + * The input/output arguments to the Huffman fast decoding loop: + * + * ip [in/out] - The input pointers, must be updated to reflect what is consumed. + * op [in/out] - The output pointers, must be updated to reflect what is written. + * bits [in/out] - The bitstream containers, must be updated to reflect the current state. + * dt [in] - The decoding table. + * ilowest [in] - The beginning of the valid range of the input. Decoders may read + * down to this pointer. It may be below iend[0]. + * oend [in] - The end of the output stream. op[3] must not cross oend. + * iend [in] - The end of each input stream. ip[i] may cross iend[i], + * as long as it is above ilowest, but that indicates corruption. + */ +typedef struct { + BYTE const* ip[4]; + BYTE* op[4]; + U64 bits[4]; + void const* dt; + BYTE const* ilowest; + BYTE* oend; + BYTE const* iend[4]; +} HUF_DecompressFastArgs; + +typedef void (*HUF_DecompressFastLoopFn)(HUF_DecompressFastArgs*); + +/** + * Initializes args for the fast decoding loop. + * @returns 1 on success + * 0 if the fallback implementation should be used. + * Or an error code on failure. + */ +static size_t HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable) +{ + void const* dt = DTable + 1; + U32 const dtLog = HUF_getDTableDesc(DTable).tableLog; + + const BYTE* const istart = (const BYTE*)src; + + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)dstSize); + + /* The fast decoding loop assumes 64-bit little-endian. + * This condition is false on x32. + */ + if (!MEM_isLittleEndian() || MEM_32bits()) + return 0; + + /* Avoid nullptr addition */ + if (dstSize == 0) + return 0; + assert(dst != NULL); + + /* strict minimum : jump table + 1 byte per stream */ + if (srcSize < 10) + return ERROR(corruption_detected); + + /* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers. + * If table log is not correct at this point, fallback to the old decoder. + * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder. + */ + if (dtLog != HUF_DECODER_FAST_TABLELOG) + return 0; + + /* Read the jump table. */ + { + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = srcSize - (length1 + length2 + length3 + 6); + args->iend[0] = istart + 6; /* jumpTable */ + args->iend[1] = args->iend[0] + length1; + args->iend[2] = args->iend[1] + length2; + args->iend[3] = args->iend[2] + length3; + + /* HUF_initFastDStream() requires this, and this small of an input + * won't benefit from the ASM loop anyways. + */ + if (length1 < 8 || length2 < 8 || length3 < 8 || length4 < 8) + return 0; + if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */ + } + /* ip[] contains the position that is currently loaded into bits[]. */ + args->ip[0] = args->iend[1] - sizeof(U64); + args->ip[1] = args->iend[2] - sizeof(U64); + args->ip[2] = args->iend[3] - sizeof(U64); + args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64); + + /* op[] contains the output pointers. */ + args->op[0] = (BYTE*)dst; + args->op[1] = args->op[0] + (dstSize+3)/4; + args->op[2] = args->op[1] + (dstSize+3)/4; + args->op[3] = args->op[2] + (dstSize+3)/4; + + /* No point to call the ASM loop for tiny outputs. */ + if (args->op[3] >= oend) + return 0; + + /* bits[] is the bit container. + * It is read from the MSB down to the LSB. + * It is shifted left as it is read, and zeros are + * shifted in. After the lowest valid bit a 1 is + * set, so that CountTrailingZeros(bits[]) can be used + * to count how many bits we've consumed. + */ + args->bits[0] = HUF_initFastDStream(args->ip[0]); + args->bits[1] = HUF_initFastDStream(args->ip[1]); + args->bits[2] = HUF_initFastDStream(args->ip[2]); + args->bits[3] = HUF_initFastDStream(args->ip[3]); + + /* The decoders must be sure to never read beyond ilowest. + * This is lower than iend[0], but allowing decoders to read + * down to ilowest can allow an extra iteration or two in the + * fast loop. + */ + args->ilowest = istart; + + args->oend = oend; + args->dt = dt; + + return 1; +} + +static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs const* args, int stream, BYTE* segmentEnd) +{ + /* Validate that we haven't overwritten. */ + if (args->op[stream] > segmentEnd) + return ERROR(corruption_detected); + /* Validate that we haven't read beyond iend[]. + * Note that ip[] may be < iend[] because the MSB is + * the next bit to read, and we may have consumed 100% + * of the stream, so down to iend[i] - 8 is valid. + */ + if (args->ip[stream] < args->iend[stream] - 8) + return ERROR(corruption_detected); + + /* Construct the BIT_DStream_t. */ + assert(sizeof(size_t) == 8); + bit->bitContainer = MEM_readLEST(args->ip[stream]); + bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]); + bit->start = (const char*)args->ilowest; + bit->limitPtr = bit->start + sizeof(size_t); + bit->ptr = (const char*)args->ip[stream]; + + return 0; +} + +/* Calls X(N) for each stream 0, 1, 2, 3. */ +#define HUF_4X_FOR_EACH_STREAM(X) \ + do { \ + X(0); \ + X(1); \ + X(2); \ + X(3); \ + } while (0) + +/* Calls X(N, var) for each stream 0, 1, 2, 3. */ +#define HUF_4X_FOR_EACH_STREAM_WITH_VAR(X, var) \ + do { \ + X(0, (var)); \ + X(1, (var)); \ + X(2, (var)); \ + X(3, (var)); \ + } while (0) + #ifndef HUF_FORCE_DECOMPRESS_X2 /*-***************************/ /* single-symbol decoding */ /*-***************************/ -typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */ +typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decoding */ + +/** + * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at + * a time. + */ +static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) { + U64 D4; + if (MEM_isLittleEndian()) { + D4 = (U64)((symbol << 8) + nbBits); + } else { + D4 = (U64)(symbol + (nbBits << 8)); + } + assert(D4 < (1U << 16)); + D4 *= 0x0001000100010001ULL; + return D4; +} + +/** + * Increase the tableLog to targetTableLog and rescales the stats. + * If tableLog > targetTableLog this is a no-op. + * @returns New tableLog + */ +static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog) +{ + if (tableLog > targetTableLog) + return tableLog; + if (tableLog < targetTableLog) { + U32 const scale = targetTableLog - tableLog; + U32 s; + /* Increase the weight for all non-zero probability symbols by scale. */ + for (s = 0; s < nbSymbols; ++s) { + huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale); + } + /* Update rankVal to reflect the new weights. + * All weights except 0 get moved to weight + scale. + * Weights [1, scale] are empty. + */ + for (s = targetTableLog; s > scale; --s) { + rankVal[s] = rankVal[s - scale]; + } + for (s = scale; s > 0; --s) { + rankVal[s] = 0; + } + } + return targetTableLog; +} -size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) +typedef struct { + U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; + U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1]; + U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; + BYTE symbols[HUF_SYMBOLVALUE_MAX + 1]; + BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; +} HUF_ReadDTableX1_Workspace; + +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags) { U32 tableLog = 0; U32 nbSymbols = 0; size_t iSize; void* const dtPtr = DTable + 1; HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr; + HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace; - U32* rankVal; - BYTE* huffWeight; - size_t spaceUsed32 = 0; - - rankVal = (U32 *)workSpace + spaceUsed32; - spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; - huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32); - spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; - - if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge); + DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp)); + if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge); DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); - /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ + /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ - iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); + iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), flags); if (HUF_isError(iSize)) return iSize; + /* Table header */ { DTableDesc dtd = HUF_getDTableDesc(DTable); + U32 const maxTableLog = dtd.maxTableLog + 1; + U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG); + tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog); if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ dtd.tableType = 0; dtd.tableLog = (BYTE)tableLog; - memcpy(DTable, &dtd, sizeof(dtd)); + ZSTD_memcpy(DTable, &dtd, sizeof(dtd)); } - /* Calculate starting value for each rank */ - { U32 n, nextRankStart = 0; - for (n=1; n> 1; - U32 u; - HUF_DEltX1 D; - D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); - for (u = rankVal[w]; u < rankVal[w] + length; u++) - dt[u] = D; - rankVal[w] += length; - } } + /* Compute symbols and rankStart given rankVal: + * + * rankVal already contains the number of values of each weight. + * + * symbols contains the symbols ordered by weight. First are the rankVal[0] + * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on. + * symbols[0] is filled (but unused) to avoid a branch. + * + * rankStart contains the offset where each rank belongs in the DTable. + * rankStart[0] is not filled because there are no entries in the table for + * weight 0. + */ + { int n; + U32 nextRankStart = 0; + int const unroll = 4; + int const nLimit = (int)nbSymbols - unroll + 1; + for (n=0; n<(int)tableLog+1; n++) { + U32 const curr = nextRankStart; + nextRankStart += wksp->rankVal[n]; + wksp->rankStart[n] = curr; + } + for (n=0; n < nLimit; n += unroll) { + int u; + for (u=0; u < unroll; ++u) { + size_t const w = wksp->huffWeight[n+u]; + wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u); + } + } + for (; n < (int)nbSymbols; ++n) { + size_t const w = wksp->huffWeight[n]; + wksp->symbols[wksp->rankStart[w]++] = (BYTE)n; + } + } + /* fill DTable + * We fill all entries of each weight in order. + * That way length is a constant for each iteration of the outer loop. + * We can switch based on the length to a different inner loop which is + * optimized for that particular case. + */ + { U32 w; + int symbol = wksp->rankVal[0]; + int rankStart = 0; + for (w=1; wrankVal[w]; + int const length = (1 << w) >> 1; + int uStart = rankStart; + BYTE const nbBits = (BYTE)(tableLog + 1 - w); + int s; + int u; + switch (length) { + case 1: + for (s=0; ssymbols[symbol + s]; + D.nbBits = nbBits; + dt[uStart] = D; + uStart += 1; + } + break; + case 2: + for (s=0; ssymbols[symbol + s]; + D.nbBits = nbBits; + dt[uStart+0] = D; + dt[uStart+1] = D; + uStart += 2; + } + break; + case 4: + for (s=0; ssymbols[symbol + s], nbBits); + MEM_write64(dt + uStart, D4); + uStart += 4; + } + break; + case 8: + for (s=0; ssymbols[symbol + s], nbBits); + MEM_write64(dt + uStart, D4); + MEM_write64(dt + uStart + 4, D4); + uStart += 8; + } + break; + default: + for (s=0; ssymbols[symbol + s], nbBits); + for (u=0; u < length; u += 16) { + MEM_write64(dt + uStart + u + 0, D4); + MEM_write64(dt + uStart + u + 4, D4); + MEM_write64(dt + uStart + u + 8, D4); + MEM_write64(dt + uStart + u + 12, D4); + } + assert(u == length); + uStart += length; + } + break; + } + symbol += symbolCount; + rankStart += symbolCount * length; + } + } return iSize; } -size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_readDTableX1_wksp(DTable, src, srcSize, - workSpace, sizeof(workSpace)); -} - FORCE_INLINE_TEMPLATE BYTE HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog) { @@ -210,15 +529,19 @@ HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog } #define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \ - *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog) + do { *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog); } while (0) -#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \ - if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ - HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) +#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \ + do { \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \ + } while (0) -#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \ - if (MEM_64bits()) \ - HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) +#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \ + do { \ + if (MEM_64bits()) \ + HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \ + } while (0) HINT_INLINE size_t HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog) @@ -226,11 +549,15 @@ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, cons BYTE* const pStart = p; /* up to 4 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { - HUF_DECODE_SYMBOLX1_2(p, bitDPtr); - HUF_DECODE_SYMBOLX1_1(p, bitDPtr); - HUF_DECODE_SYMBOLX1_2(p, bitDPtr); - HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + if ((pEnd - p) > 3) { + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { + HUF_DECODE_SYMBOLX1_2(p, bitDPtr); + HUF_DECODE_SYMBOLX1_1(p, bitDPtr); + HUF_DECODE_SYMBOLX1_2(p, bitDPtr); + HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + } + } else { + BIT_reloadDStream(bitDPtr); } /* [0-3] symbols remaining */ @@ -242,7 +569,7 @@ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, cons while (p < pEnd) HUF_DECODE_SYMBOLX1_0(p, bitDPtr); - return pEnd-pStart; + return (size_t)(pEnd-pStart); } FORCE_INLINE_TEMPLATE size_t @@ -252,7 +579,7 @@ HUF_decompress1X1_usingDTable_internal_body( const HUF_DTable* DTable) { BYTE* op = (BYTE*)dst; - BYTE* const oend = op + dstSize; + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(op, (ptrdiff_t)dstSize); const void* dtPtr = DTable + 1; const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; BIT_DStream_t bitD; @@ -268,6 +595,10 @@ HUF_decompress1X1_usingDTable_internal_body( return dstSize; } +/* HUF_decompress4X1_usingDTable_internal_body(): + * Conditions : + * @dstSize >= 6 + */ FORCE_INLINE_TEMPLATE size_t HUF_decompress4X1_usingDTable_internal_body( void* dst, size_t dstSize, @@ -276,10 +607,12 @@ HUF_decompress4X1_usingDTable_internal_body( { /* Check */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; + BYTE* const olimit = oend - 3; const void* const dtPtr = DTable + 1; const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; @@ -304,39 +637,42 @@ HUF_decompress4X1_usingDTable_internal_body( BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; - U32 endSignal = BIT_DStream_unfinished; DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; + U32 endSignal = 1; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + assert(dstSize >= 6); /* validated above */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */ - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) { - HUF_DECODE_SYMBOLX1_2(op1, &bitD1); - HUF_DECODE_SYMBOLX1_2(op2, &bitD2); - HUF_DECODE_SYMBOLX1_2(op3, &bitD3); - HUF_DECODE_SYMBOLX1_2(op4, &bitD4); - HUF_DECODE_SYMBOLX1_1(op1, &bitD1); - HUF_DECODE_SYMBOLX1_1(op2, &bitD2); - HUF_DECODE_SYMBOLX1_1(op3, &bitD3); - HUF_DECODE_SYMBOLX1_1(op4, &bitD4); - HUF_DECODE_SYMBOLX1_2(op1, &bitD1); - HUF_DECODE_SYMBOLX1_2(op2, &bitD2); - HUF_DECODE_SYMBOLX1_2(op3, &bitD3); - HUF_DECODE_SYMBOLX1_2(op4, &bitD4); - HUF_DECODE_SYMBOLX1_0(op1, &bitD1); - HUF_DECODE_SYMBOLX1_0(op2, &bitD2); - HUF_DECODE_SYMBOLX1_0(op3, &bitD3); - HUF_DECODE_SYMBOLX1_0(op4, &bitD4); - BIT_reloadDStream(&bitD1); - BIT_reloadDStream(&bitD2); - BIT_reloadDStream(&bitD3); - BIT_reloadDStream(&bitD4); + if ((size_t)(oend - op4) >= sizeof(size_t)) { + for ( ; (endSignal) & (op4 < olimit) ; ) { + HUF_DECODE_SYMBOLX1_2(op1, &bitD1); + HUF_DECODE_SYMBOLX1_2(op2, &bitD2); + HUF_DECODE_SYMBOLX1_2(op3, &bitD3); + HUF_DECODE_SYMBOLX1_2(op4, &bitD4); + HUF_DECODE_SYMBOLX1_1(op1, &bitD1); + HUF_DECODE_SYMBOLX1_1(op2, &bitD2); + HUF_DECODE_SYMBOLX1_1(op3, &bitD3); + HUF_DECODE_SYMBOLX1_1(op4, &bitD4); + HUF_DECODE_SYMBOLX1_2(op1, &bitD1); + HUF_DECODE_SYMBOLX1_2(op2, &bitD2); + HUF_DECODE_SYMBOLX1_2(op3, &bitD3); + HUF_DECODE_SYMBOLX1_2(op4, &bitD4); + HUF_DECODE_SYMBOLX1_0(op1, &bitD1); + HUF_DECODE_SYMBOLX1_0(op2, &bitD2); + HUF_DECODE_SYMBOLX1_0(op3, &bitD3); + HUF_DECODE_SYMBOLX1_0(op4, &bitD4); + endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; + } } /* check corruption */ @@ -362,99 +698,248 @@ HUF_decompress4X1_usingDTable_internal_body( } } +#if HUF_NEED_BMI2_FUNCTION +static BMI2_TARGET_ATTRIBUTE +size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} +#endif -typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, - const void *cSrc, - size_t cSrcSize, - const HUF_DTable *DTable); +static +size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} -HUF_DGEN(HUF_decompress1X1_usingDTable_internal) -HUF_DGEN(HUF_decompress4X1_usingDTable_internal) +#if ZSTD_ENABLE_ASM_X86_64_BMI2 +HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN; +#endif -size_t HUF_decompress1X1_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) +static HUF_FAST_BMI2_ATTRS +void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) { - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 0) return ERROR(GENERIC); - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + U64 bits[4]; + BYTE const* ip[4]; + BYTE* op[4]; + U16 const* const dtable = (U16 const*)args->dt; + BYTE* const oend = args->oend; + BYTE const* const ilowest = args->ilowest; + + /* Copy the arguments to local variables */ + ZSTD_memcpy(&bits, &args->bits, sizeof(bits)); + ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip)); + ZSTD_memcpy(&op, &args->op, sizeof(op)); + + assert(MEM_isLittleEndian()); + assert(!MEM_32bits()); + + for (;;) { + BYTE* olimit; + int stream; + + /* Assert loop preconditions */ +#ifndef NDEBUG + for (stream = 0; stream < 4; ++stream) { + assert(op[stream] <= (stream == 3 ? oend : op[stream + 1])); + assert(ip[stream] >= ilowest); + } +#endif + /* Compute olimit */ + { + /* Each iteration produces 5 output symbols per stream */ + size_t const oiters = (size_t)(oend - op[3]) / 5; + /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes + * per stream. + */ + size_t const iiters = (size_t)(ip[0] - ilowest) / 7; + /* We can safely run iters iterations before running bounds checks */ + size_t const iters = MIN(oiters, iiters); + size_t const symbols = iters * 5; + + /* We can simply check that op[3] < olimit, instead of checking all + * of our bounds, since we can't hit the other bounds until we've run + * iters iterations, which only happens when op[3] == olimit. + */ + olimit = op[3] + symbols; + + /* Exit fast decoding loop once we reach the end. */ + if (op[3] == olimit) + break; + + /* Exit the decoding loop if any input pointer has crossed the + * previous one. This indicates corruption, and a precondition + * to our loop is that ip[i] >= ip[0]. + */ + for (stream = 1; stream < 4; ++stream) { + if (ip[stream] < ip[stream - 1]) + goto _out; + } + } + +#ifndef NDEBUG + for (stream = 1; stream < 4; ++stream) { + assert(ip[stream] >= ip[stream - 1]); + } +#endif + +#define HUF_4X1_DECODE_SYMBOL(_stream, _symbol) \ + do { \ + U64 const index = bits[(_stream)] >> 53; \ + U16 const entry = dtable[index]; \ + bits[(_stream)] <<= entry & 0x3F; \ + op[(_stream)][(_symbol)] = (BYTE)(entry >> 8); \ + } while (0) + +#define HUF_5X1_RELOAD_STREAM(_stream) \ + do { \ + U64 const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \ + U64 const nbBits = ctz & 7; \ + U64 const nbBytes = ctz >> 3; \ + op[(_stream)] += 5; \ + ip[(_stream)] -= nbBytes; \ + bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \ + bits[(_stream)] <<= nbBits; \ + } while (0) + + /* Manually unroll the loop because compilers don't consistently + * unroll the inner loops, which destroys performance. + */ + do { + /* Decode 5 symbols in each of the 4 streams */ + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 0); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 1); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 2); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 3); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 4); + + /* Reload each of the 4 the bitstreams */ + HUF_4X_FOR_EACH_STREAM(HUF_5X1_RELOAD_STREAM); + } while (op[3] < olimit); + +#undef HUF_4X1_DECODE_SYMBOL +#undef HUF_5X1_RELOAD_STREAM + } + +_out: + + /* Save the final values of each of the state variables back to args. */ + ZSTD_memcpy(&args->bits, &bits, sizeof(bits)); + ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip)); + ZSTD_memcpy(&args->op, &op, sizeof(op)); } -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) +/** + * @returns @p dstSize on success (>= 6) + * 0 if the fallback implementation should be used + * An error if an error occurred + */ +static HUF_FAST_BMI2_ATTRS +size_t +HUF_decompress4X1_usingDTable_internal_fast( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable, + HUF_DecompressFastLoopFn loopFn) { - const BYTE* ip = (const BYTE*) cSrc; + void const* dt = DTable + 1; + BYTE const* const ilowest = (BYTE const*)cSrc; + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)dstSize); + HUF_DecompressFastArgs args; + { size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + FORWARD_IF_ERROR(ret, "Failed to init fast loop args"); + if (ret == 0) + return 0; + } - size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); - if (HUF_isError(hSize)) return hSize; - if (hSize >= cSrcSize) return ERROR(srcSize_wrong); - ip += hSize; cSrcSize -= hSize; + assert(args.ip[0] >= args.ilowest); + loopFn(&args); + + /* Our loop guarantees that ip[] >= ilowest and that we haven't + * overwritten any op[]. + */ + assert(args.ip[0] >= ilowest); + assert(args.ip[0] >= ilowest); + assert(args.ip[1] >= ilowest); + assert(args.ip[2] >= ilowest); + assert(args.ip[3] >= ilowest); + assert(args.op[3] <= oend); + + assert(ilowest == args.ilowest); + assert(ilowest + 6 == args.iend[0]); + (void)ilowest; + + /* finish bit streams one by one. */ + { size_t const segmentSize = (dstSize+3) / 4; + BYTE* segmentEnd = (BYTE*)dst; + int i; + for (i = 0; i < 4; ++i) { + BIT_DStream_t bit; + if (segmentSize <= (size_t)(oend - segmentEnd)) + segmentEnd += segmentSize; + else + segmentEnd = oend; + FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption"); + /* Decompress and validate that we've produced exactly the expected length. */ + args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG); + if (args.op[i] != segmentEnd) return ERROR(corruption_detected); + } + } - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); + /* decoded size */ + assert(dstSize != 0); + return dstSize; } +HUF_DGEN(HUF_decompress1X1_usingDTable_internal) -size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) +static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable, int flags) { - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} + HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X1_usingDTable_internal_default; + HUF_DecompressFastLoopFn loopFn = HUF_decompress4X1_usingDTable_internal_fast_c_loop; -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); - return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); -} +#if DYNAMIC_BMI2 + if (flags & HUF_flags_bmi2) { + fallbackFn = HUF_decompress4X1_usingDTable_internal_bmi2; +# if ZSTD_ENABLE_ASM_X86_64_BMI2 + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop; + } +# endif + } else { + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); + } +#endif -size_t HUF_decompress4X1_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 0) return ERROR(GENERIC); - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop; + } +#endif + + if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) { + size_t const ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + if (ret != 0) + return ret; + } + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, +static size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize, - workSpace, wkspSize); + size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); -} - -size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) -{ - return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0); -} - - -size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} -size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); - return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); + return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } #endif /* HUF_FORCE_DECOMPRESS_X2 */ @@ -467,209 +952,322 @@ size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cS /* *************************/ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */ -typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; +typedef struct { BYTE symbol; } sortedSymbol_t; typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; +/** + * Constructs a HUF_DEltX2 in a U32. + */ +static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level) +{ + U32 seq; + DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0); + DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2); + DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3); + DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32)); + if (MEM_isLittleEndian()) { + seq = level == 1 ? symbol : (baseSeq + (symbol << 8)); + return seq + (nbBits << 16) + ((U32)level << 24); + } else { + seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol); + return (seq << 16) + (nbBits << 8) + (U32)level; + } +} -/* HUF_fillDTableX2Level2() : - * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ -static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed, - const U32* rankValOrigin, const int minWeight, - const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, - U32 nbBitsBaseline, U16 baseSeq) +/** + * Constructs a HUF_DEltX2. + */ +static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level) { HUF_DEltX2 DElt; - U32 rankVal[HUF_TABLELOG_MAX + 1]; + U32 const val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); + DEBUG_STATIC_ASSERT(sizeof(DElt) == sizeof(val)); + ZSTD_memcpy(&DElt, &val, sizeof(val)); + return DElt; +} - /* get pre-calculated rankVal */ - memcpy(rankVal, rankValOrigin, sizeof(rankVal)); +/** + * Constructs 2 HUF_DEltX2s and packs them into a U64. + */ +static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level) +{ + U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); + return (U64)DElt + ((U64)DElt << 32); +} - /* fill skipped values */ +/** + * Fills the DTable rank with all the symbols from [begin, end) that are each + * nbBits long. + * + * @param DTableRank The start of the rank in the DTable. + * @param begin The first symbol to fill (inclusive). + * @param end The last symbol to fill (exclusive). + * @param nbBits Each symbol is nbBits long. + * @param tableLog The table log. + * @param baseSeq If level == 1 { 0 } else { the first level symbol } + * @param level The level in the table. Must be 1 or 2. + */ +static void HUF_fillDTableX2ForWeight( + HUF_DEltX2* DTableRank, + sortedSymbol_t const* begin, sortedSymbol_t const* end, + U32 nbBits, U32 tableLog, + U16 baseSeq, int const level) +{ + U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */); + const sortedSymbol_t* ptr; + assert(level >= 1 && level <= 2); + switch (length) { + case 1: + for (ptr = begin; ptr != end; ++ptr) { + HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); + *DTableRank++ = DElt; + } + break; + case 2: + for (ptr = begin; ptr != end; ++ptr) { + HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); + DTableRank[0] = DElt; + DTableRank[1] = DElt; + DTableRank += 2; + } + break; + case 4: + for (ptr = begin; ptr != end; ++ptr) { + U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); + DTableRank += 4; + } + break; + case 8: + for (ptr = begin; ptr != end; ++ptr) { + U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2)); + DTableRank += 8; + } + break; + default: + for (ptr = begin; ptr != end; ++ptr) { + U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + HUF_DEltX2* const DTableRankEnd = DTableRank + length; + for (; DTableRank != DTableRankEnd; DTableRank += 8) { + ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2)); + } + } + break; + } +} + +/* HUF_fillDTableX2Level2() : + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ +static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits, + const U32* rankVal, const int minWeight, const int maxWeight1, + const sortedSymbol_t* sortedSymbols, U32 const* rankStart, + U32 nbBitsBaseline, U16 baseSeq) +{ + /* Fill skipped values (all positions up to rankVal[minWeight]). + * These are positions only get a single symbol because the combined weight + * is too large. + */ if (minWeight>1) { - U32 i, skipSize = rankVal[minWeight]; - MEM_writeLE16(&(DElt.sequence), baseSeq); - DElt.nbBits = (BYTE)(consumed); - DElt.length = 1; - for (i = 0; i < skipSize; i++) - DTable[i] = DElt; + U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */); + U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1); + int const skipSize = rankVal[minWeight]; + assert(length > 1); + assert((U32)skipSize < length); + switch (length) { + case 2: + assert(skipSize == 1); + ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2)); + break; + case 4: + assert(skipSize <= 4); + ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2)); + break; + default: + { + int i; + for (i = 0; i < skipSize; i += 8) { + ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2)); + } + } + } } - /* fill DTable */ - { U32 s; for (s=0; s= 1 */ - - rankVal[weight] += length; - } } + /* Fill each of the second level symbols by weight. */ + { + int w; + for (w = minWeight; w < maxWeight1; ++w) { + int const begin = rankStart[w]; + int const end = rankStart[w+1]; + U32 const nbBits = nbBitsBaseline - w; + U32 const totalBits = nbBits + consumedBits; + HUF_fillDTableX2ForWeight( + DTable + rankVal[w], + sortedSymbols + begin, sortedSymbols + end, + totalBits, targetLog, + baseSeq, /* level */ 2); + } + } } - static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, - const sortedSymbol_t* sortedList, const U32 sortedListSize, - const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const sortedSymbol_t* sortedList, + const U32* rankStart, rankValCol_t* rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { - U32 rankVal[HUF_TABLELOG_MAX + 1]; + U32* const rankVal = rankValOrigin[0]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; - U32 s; - - memcpy(rankVal, rankValOrigin, sizeof(rankVal)); - - /* fill DTable */ - for (s=0; s= minBits) { /* enough room for a second symbol */ - U32 sortedRank; + int w; + int const wEnd = (int)maxWeight + 1; + + /* Fill DTable in order of weight. */ + for (w = 1; w < wEnd; ++w) { + int const begin = (int)rankStart[w]; + int const end = (int)rankStart[w+1]; + U32 const nbBits = nbBitsBaseline - w; + + if (targetLog-nbBits >= minBits) { + /* Enough room for a second symbol. */ + int start = rankVal[w]; + U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */); int minWeight = nbBits + scaleLog; + int s; if (minWeight < 1) minWeight = 1; - sortedRank = rankStart[minWeight]; - HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits, - rankValOrigin[nbBits], minWeight, - sortedList+sortedRank, sortedListSize-sortedRank, - nbBitsBaseline, symbol); + /* Fill the DTable for every symbol of weight w. + * These symbols get at least 1 second symbol. + */ + for (s = begin; s != end; ++s) { + HUF_fillDTableX2Level2( + DTable + start, targetLog, nbBits, + rankValOrigin[nbBits], minWeight, wEnd, + sortedList, rankStart, + nbBitsBaseline, sortedList[s].symbol); + start += length; + } } else { - HUF_DEltX2 DElt; - MEM_writeLE16(&(DElt.sequence), symbol); - DElt.nbBits = (BYTE)(nbBits); - DElt.length = 1; - { U32 const end = start + length; - U32 u; - for (u = start; u < end; u++) DTable[u] = DElt; - } } - rankVal[weight] += length; + /* Only a single symbol. */ + HUF_fillDTableX2ForWeight( + DTable + rankVal[w], + sortedList + begin, sortedList + end, + nbBits, targetLog, + /* baseSeq */ 0, /* level */ 1); + } } } +typedef struct { + rankValCol_t rankVal[HUF_TABLELOG_MAX]; + U32 rankStats[HUF_TABLELOG_MAX + 1]; + U32 rankStart0[HUF_TABLELOG_MAX + 3]; + sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1]; + BYTE weightList[HUF_SYMBOLVALUE_MAX + 1]; + U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; +} HUF_ReadDTableX2_Workspace; + size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, - void* workSpace, size_t wkspSize) + void* workSpace, size_t wkspSize, int flags) { - U32 tableLog, maxW, sizeOfSort, nbSymbols; + U32 tableLog, maxW, nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); - U32 const maxTableLog = dtd.maxTableLog; + U32 maxTableLog = dtd.maxTableLog; size_t iSize; void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; U32 *rankStart; - rankValCol_t* rankVal; - U32* rankStats; - U32* rankStart0; - sortedSymbol_t* sortedSymbol; - BYTE* weightList; - size_t spaceUsed32 = 0; - - rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32); - spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2; - rankStats = (U32 *)workSpace + spaceUsed32; - spaceUsed32 += HUF_TABLELOG_MAX + 1; - rankStart0 = (U32 *)workSpace + spaceUsed32; - spaceUsed32 += HUF_TABLELOG_MAX + 2; - sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t); - spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2; - weightList = (BYTE *)((U32 *)workSpace + spaceUsed32); - spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; - - if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge); - - rankStart = rankStart0 + 1; - memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1)); + HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace; + + if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC); + + rankStart = wksp->rankStart0 + 1; + ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats)); + ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0)); DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); - /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ + /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ - iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); + iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), flags); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ + if (tableLog <= HUF_DECODER_FAST_TABLELOG && maxTableLog > HUF_DECODER_FAST_TABLELOG) maxTableLog = HUF_DECODER_FAST_TABLELOG; /* find maxWeight */ - for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ + for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ /* Get start index of each weight */ { U32 w, nextRankStart = 0; for (w=1; wrankStats[w]; + rankStart[w] = curr; } rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ - sizeOfSort = nextRankStart; + rankStart[maxW+1] = nextRankStart; } /* sort symbols by weight */ { U32 s; for (s=0; sweightList[s]; U32 const r = rankStart[w]++; - sortedSymbol[r].symbol = (BYTE)s; - sortedSymbol[r].weight = (BYTE)w; + wksp->sortedSymbol[r].symbol = (BYTE)s; } rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ } /* Build rankVal */ - { U32* const rankVal0 = rankVal[0]; + { U32* const rankVal0 = wksp->rankVal[0]; { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */ U32 nextRankVal = 0; U32 w; for (w=1; wrankStats[w] << (w+rescale); + rankVal0[w] = curr; } } { U32 const minBits = tableLog+1 - maxW; U32 consumed; for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) { - U32* const rankValPtr = rankVal[consumed]; + U32* const rankValPtr = wksp->rankVal[consumed]; U32 w; for (w = 1; w < maxW+1; w++) { rankValPtr[w] = rankVal0[w] >> consumed; } } } } HUF_fillDTableX2(dt, maxTableLog, - sortedSymbol, sizeOfSort, - rankStart0, rankVal, maxW, + wksp->sortedSymbol, + wksp->rankStart0, wksp->rankVal, maxW, tableLog+1); dtd.tableLog = (BYTE)maxTableLog; dtd.tableType = 1; - memcpy(DTable, &dtd, sizeof(dtd)); + ZSTD_memcpy(DTable, &dtd, sizeof(dtd)); return iSize; } -size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_readDTableX2_wksp(DTable, src, srcSize, - workSpace, sizeof(workSpace)); -} - FORCE_INLINE_TEMPLATE U32 HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - memcpy(op, dt+val, 2); + ZSTD_memcpy(op, &dt[val].sequence, 2); BIT_skipBits(DStream, dt[val].nbBits); return dt[val].length; } @@ -678,28 +1276,34 @@ FORCE_INLINE_TEMPLATE U32 HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - memcpy(op, dt+val, 1); - if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); - else { + ZSTD_memcpy(op, &dt[val].sequence, 1); + if (dt[val].length==1) { + BIT_skipBits(DStream, dt[val].nbBits); + } else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BIT_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); - } } + } + } return 1; } #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) + do { ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); } while (0) -#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ - if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ + do { \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \ + } while (0) -#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ - if (MEM_64bits()) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ + do { \ + if (MEM_64bits()) \ + ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \ + } while (0) HINT_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, @@ -708,19 +1312,37 @@ HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, BYTE* const pStart = p; /* up to 8 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_1(p, bitDPtr); - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) { + if (dtLog <= 11 && MEM_64bits()) { + /* up to 10 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) { + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + } else { + /* up to 8 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + } + } else { + BIT_reloadDStream(bitDPtr); } /* closer to end : up to 2 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + if ((size_t)(pEnd - p) >= 2) { + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - while (p <= pEnd-2) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + while (p <= pEnd-2) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + } if (p < pEnd) p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); @@ -741,7 +1363,7 @@ HUF_decompress1X2_usingDTable_internal_body( /* decode */ { BYTE* const ostart = (BYTE*) dst; - BYTE* const oend = ostart + dstSize; + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)dstSize); const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; DTableDesc const dtd = HUF_getDTableDesc(DTable); @@ -755,7 +1377,10 @@ HUF_decompress1X2_usingDTable_internal_body( return dstSize; } - +/* HUF_decompress4X2_usingDTable_internal_body(): + * Conditions: + * @dstSize >= 6 + */ FORCE_INLINE_TEMPLATE size_t HUF_decompress4X2_usingDTable_internal_body( void* dst, size_t dstSize, @@ -763,10 +1388,12 @@ HUF_decompress4X2_usingDTable_internal_body( const HUF_DTable* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; + BYTE* const olimit = oend - (sizeof(size_t)-1); const void* const dtPtr = DTable+1; const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; @@ -791,37 +1418,66 @@ HUF_decompress4X2_usingDTable_internal_body( BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; - U32 endSignal; + U32 endSignal = 1; DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; - if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + assert(dstSize >= 6 /* validated above */); CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); /* 16-32 symbols per loop (4-8 symbols per stream) */ - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) { - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_1(op1, &bitD1); - HUF_DECODE_SYMBOLX2_1(op2, &bitD2); - HUF_DECODE_SYMBOLX2_1(op3, &bitD3); - HUF_DECODE_SYMBOLX2_1(op4, &bitD4); - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_0(op1, &bitD1); - HUF_DECODE_SYMBOLX2_0(op2, &bitD2); - HUF_DECODE_SYMBOLX2_0(op3, &bitD3); - HUF_DECODE_SYMBOLX2_0(op4, &bitD4); - - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + if ((size_t)(oend - op4) >= sizeof(size_t)) { + for ( ; (endSignal) & (op4 < olimit); ) { +#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; +#else + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + endSignal = (U32)LIKELY((U32) + (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished)); +#endif + } } /* check corruption */ @@ -845,94 +1501,293 @@ HUF_decompress4X2_usingDTable_internal_body( } } -HUF_DGEN(HUF_decompress1X2_usingDTable_internal) -HUF_DGEN(HUF_decompress4X2_usingDTable_internal) +#if HUF_NEED_BMI2_FUNCTION +static BMI2_TARGET_ATTRIBUTE +size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} +#endif -size_t HUF_decompress1X2_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 1) return ERROR(GENERIC); - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +static +size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) +#if ZSTD_ENABLE_ASM_X86_64_BMI2 + +HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN; + +#endif + +static HUF_FAST_BMI2_ATTRS +void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) { - const BYTE* ip = (const BYTE*) cSrc; + U64 bits[4]; + BYTE const* ip[4]; + BYTE* op[4]; + BYTE* oend[4]; + HUF_DEltX2 const* const dtable = (HUF_DEltX2 const*)args->dt; + BYTE const* const ilowest = args->ilowest; + + /* Copy the arguments to local registers. */ + ZSTD_memcpy(&bits, &args->bits, sizeof(bits)); + ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip)); + ZSTD_memcpy(&op, &args->op, sizeof(op)); + + oend[0] = op[1]; + oend[1] = op[2]; + oend[2] = op[3]; + oend[3] = args->oend; + + assert(MEM_isLittleEndian()); + assert(!MEM_32bits()); + + for (;;) { + BYTE* olimit; + int stream; + + /* Assert loop preconditions */ +#ifndef NDEBUG + for (stream = 0; stream < 4; ++stream) { + assert(op[stream] <= oend[stream]); + assert(ip[stream] >= ilowest); + } +#endif + /* Compute olimit */ + { + /* Each loop does 5 table lookups for each of the 4 streams. + * Each table lookup consumes up to 11 bits of input, and produces + * up to 2 bytes of output. + */ + /* We can consume up to 7 bytes of input per iteration per stream. + * We also know that each input pointer is >= ip[0]. So we can run + * iters loops before running out of input. + */ + size_t iters = (size_t)(ip[0] - ilowest) / 7; + /* Each iteration can produce up to 10 bytes of output per stream. + * Each output stream my advance at different rates. So take the + * minimum number of safe iterations among all the output streams. + */ + for (stream = 0; stream < 4; ++stream) { + size_t const oiters = (size_t)(oend[stream] - op[stream]) / 10; + iters = MIN(iters, oiters); + } + + /* Each iteration produces at least 5 output symbols. So until + * op[3] crosses olimit, we know we haven't executed iters + * iterations yet. This saves us maintaining an iters counter, + * at the expense of computing the remaining # of iterations + * more frequently. + */ + olimit = op[3] + (iters * 5); + + /* Exit the fast decoding loop once we reach the end. */ + if (op[3] == olimit) + break; + + /* Exit the decoding loop if any input pointer has crossed the + * previous one. This indicates corruption, and a precondition + * to our loop is that ip[i] >= ip[0]. + */ + for (stream = 1; stream < 4; ++stream) { + if (ip[stream] < ip[stream - 1]) + goto _out; + } + } - size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, - workSpace, wkspSize); - if (HUF_isError(hSize)) return hSize; - if (hSize >= cSrcSize) return ERROR(srcSize_wrong); - ip += hSize; cSrcSize -= hSize; +#ifndef NDEBUG + for (stream = 1; stream < 4; ++stream) { + assert(ip[stream] >= ip[stream - 1]); + } +#endif - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); -} +#define HUF_4X2_DECODE_SYMBOL(_stream, _decode3) \ + do { \ + if ((_decode3) || (_stream) != 3) { \ + U64 const index = bits[(_stream)] >> 53; \ + size_t const entry = MEM_readLE32(&dtable[index]); \ + MEM_write16(op[(_stream)], (U16)entry); \ + bits[(_stream)] <<= (entry >> 16) & 0x3F; \ + op[(_stream)] += entry >> 24; \ + } \ + } while (0) + +#define HUF_5X2_RELOAD_STREAM(_stream, _decode3) \ + do { \ + if (_decode3) HUF_4X2_DECODE_SYMBOL(3, 1); \ + { \ + U64 const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \ + U64 const nbBits = ctz & 7; \ + U64 const nbBytes = ctz >> 3; \ + ip[(_stream)] -= nbBytes; \ + bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \ + bits[(_stream)] <<= nbBits; \ + } \ + } while (0) + +#if defined(__aarch64__) +# define HUF_4X2_4WAY 1 +#else +# define HUF_4X2_4WAY 0 +#endif +#define HUF_4X2_3WAY !HUF_4X2_4WAY + + /* Manually unroll the loop because compilers don't consistently + * unroll the inner loops, which destroys performance. + */ + do { + /* Decode 5 symbols from each of the first 3 or 4 streams. + * In the 3-way case the final stream will be decoded during + * the reload phase to reduce register pressure. + */ + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY); + + /* In the 3-way case decode one symbol from the final stream. */ + HUF_4X2_DECODE_SYMBOL(3, HUF_4X2_3WAY); + + /* In the 3-way case decode 4 symbols from the final stream & + * reload bitstreams. The final stream is reloaded last, meaning + * that all 5 symbols are decoded from the final stream before + * it is reloaded. + */ + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_5X2_RELOAD_STREAM, HUF_4X2_3WAY); + } while (op[3] < olimit); + } +#undef HUF_4X2_DECODE_SYMBOL +#undef HUF_5X2_RELOAD_STREAM -size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} +_out: -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); - return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); + /* Save the final values of each of the state variables back to args. */ + ZSTD_memcpy(&args->bits, &bits, sizeof(bits)); + ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip)); + ZSTD_memcpy(&args->op, &op, sizeof(op)); } -size_t HUF_decompress4X2_usingDTable( + +static HUF_FAST_BMI2_ATTRS size_t +HUF_decompress4X2_usingDTable_internal_fast( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) + const HUF_DTable* DTable, + HUF_DecompressFastLoopFn loopFn) { + void const* dt = DTable + 1; + const BYTE* const ilowest = (const BYTE*)cSrc; + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)dstSize); + HUF_DecompressFastArgs args; + { + size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + FORWARD_IF_ERROR(ret, "Failed to init asm args"); + if (ret == 0) + return 0; + } + + assert(args.ip[0] >= args.ilowest); + loopFn(&args); + + /* note : op4 already verified within main loop */ + assert(args.ip[0] >= ilowest); + assert(args.ip[1] >= ilowest); + assert(args.ip[2] >= ilowest); + assert(args.ip[3] >= ilowest); + assert(args.op[3] <= oend); + + assert(ilowest == args.ilowest); + assert(ilowest + 6 == args.iend[0]); + (void)ilowest; + + /* finish bitStreams one by one */ + { + size_t const segmentSize = (dstSize+3) / 4; + BYTE* segmentEnd = (BYTE*)dst; + int i; + for (i = 0; i < 4; ++i) { + BIT_DStream_t bit; + if (segmentSize <= (size_t)(oend - segmentEnd)) + segmentEnd += segmentSize; + else + segmentEnd = oend; + FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption"); + args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG); + if (args.op[i] != segmentEnd) + return ERROR(corruption_detected); + } + } + + /* decoded size */ + return dstSize; +} + +static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable, int flags) { - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 1) return ERROR(GENERIC); - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X2_usingDTable_internal_default; + HUF_DecompressFastLoopFn loopFn = HUF_decompress4X2_usingDTable_internal_fast_c_loop; + +#if DYNAMIC_BMI2 + if (flags & HUF_flags_bmi2) { + fallbackFn = HUF_decompress4X2_usingDTable_internal_bmi2; +# if ZSTD_ENABLE_ASM_X86_64_BMI2 + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop; + } +# endif + } else { + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); + } +#endif + +#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop; + } +#endif + + if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) { + size_t const ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + if (ret != 0) + return ret; + } + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, +HUF_DGEN(HUF_decompress1X2_usingDTable_internal) + +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; - size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, - workSpace, wkspSize); + size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, + workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags); } -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, +static size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) + void* workSpace, size_t wkspSize, int flags) { - return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0); -} - + const BYTE* ip = (const BYTE*) cSrc; -size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} + size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, + workSpace, wkspSize, flags); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; -size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); - return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } #endif /* HUF_FORCE_DECOMPRESS_X1 */ @@ -942,66 +1797,28 @@ size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS /* Universal decompression selectors */ /* ***********************************/ -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#endif -} - -size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#endif -} - #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; -static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = +static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */] = { /* single, double, quad */ - {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ - {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ - {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ - {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ - {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ - {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ - {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ - {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ - {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ - {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ - {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ - {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ - {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ - {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ - {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ - {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ + {{0,0}, {1,1}}, /* Q==0 : impossible */ + {{0,0}, {1,1}}, /* Q==1 : impossible */ + {{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */ + {{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */ + {{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */ + {{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */ + {{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */ + {{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */ + {{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */ + {{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */ + {{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */ + {{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */ + {{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */ + {{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */ + {{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */ + {{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */ }; #endif @@ -1028,188 +1845,92 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) U32 const D256 = (U32)(dstSize >> 8); U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); - DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */ + DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */ return DTime1 < DTime0; } #endif } - -typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); - -size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ -#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) - static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 }; -#endif - - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ - if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); -#else - return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); -#endif - } -} - -size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ - if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); -#else - return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : - HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; -#endif - } -} - -size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - - -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, - size_t dstSize, const void* cSrc, - size_t cSrcSize, void* workSpace, - size_t wkspSize) -{ - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize == 0) return ERROR(corruption_detected); - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#else - return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize): - HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#endif - } -} - size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) + void* workSpace, size_t wkspSize, int flags) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ - if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #else return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize): + cSrcSize, workSpace, wkspSize, flags): HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #endif } } -size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : + HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #endif } #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize); + size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } #endif -size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : + HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #endif } -size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); @@ -1219,14 +1940,14 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #else - return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) : - HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) : + HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #endif } } diff --git a/lib/decompress/huf_decompress_amd64.S b/lib/decompress/huf_decompress_amd64.S new file mode 100644 index 00000000000..dc1f3d92113 --- /dev/null +++ b/lib/decompress/huf_decompress_amd64.S @@ -0,0 +1,766 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "../common/portability_macros.h" + +#if defined(__ELF__) && defined(__GNUC__) +/* Stack marking + * ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart + */ +.section .note.GNU-stack,"",%progbits + +#if defined(__aarch64__) +/* Mark that this assembly supports BTI & PAC, because it is empty for aarch64. + * See: https://github.com/facebook/zstd/issues/3841 + * See: https://gcc.godbolt.org/z/sqr5T4ffK + * See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/ + * See: https://reviews.llvm.org/D62609 + */ +.pushsection .note.gnu.property, "a" +.p2align 3 +.long 4 /* size of the name - "GNU\0" */ +.long 0x10 /* size of descriptor */ +.long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */ +.asciz "GNU" +.long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */ +.long 4 /* pr_datasz - 4 bytes */ +.long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */ +.p2align 3 /* pr_padding - bring everything to 8 byte alignment */ +.popsection +#endif + +#endif + +// There appears to be an unreconcilable syntax difference between Linux and Darwin assemblers. +// Name of a private label (i.e. not exported to symbol table) on Darwin has to start with "L", +// on Linux has to start with ".". There's no way to have a name start with both "." and "L", so +// we have to use a macro. +#if defined(__APPLE__) +#define LOCAL_LABEL(label) L_ ## label +#else +#define LOCAL_LABEL(label) .L_ ## label +#endif + +#if ZSTD_ENABLE_ASM_X86_64_BMI2 + +/* Calling convention: + * + * %rdi (or %rcx on Windows) contains the first argument: HUF_DecompressAsmArgs*. + * %rbp isn't maintained (no frame pointer). + * %rsp contains the stack pointer that grows down. + * No red-zone is assumed, only addresses >= %rsp are used. + * All register contents are preserved. + */ + +ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop) +.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop +.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop +.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop +.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop +.text + +/* Sets up register mappings for clarity. + * op[], bits[], dtable & ip[0] each get their own register. + * ip[1,2,3] & olimit alias var[]. + * %rax is a scratch register. + */ + +#define op0 rsi +#define op1 rbx +#define op2 rcx +#define op3 rdi + +#define ip0 r8 +#define ip1 r9 +#define ip2 r10 +#define ip3 r11 + +#define bits0 rbp +#define bits1 rdx +#define bits2 r12 +#define bits3 r13 +#define dtable r14 +#define olimit r15 + +/* var[] aliases ip[1,2,3] & olimit + * ip[1,2,3] are saved every iteration. + * olimit is only used in compute_olimit. + */ +#define var0 r15 +#define var1 r9 +#define var2 r10 +#define var3 r11 + +/* 32-bit var registers */ +#define vard0 r15d +#define vard1 r9d +#define vard2 r10d +#define vard3 r11d + +/* Calls X(N) for each stream 0, 1, 2, 3. */ +#define FOR_EACH_STREAM(X) \ + X(0); \ + X(1); \ + X(2); \ + X(3) + +/* Calls X(N, idx) for each stream 0, 1, 2, 3. */ +#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \ + X(0, idx); \ + X(1, idx); \ + X(2, idx); \ + X(3, idx) + +/* Define both _HUF_* & HUF_* symbols because MacOS + * C symbols are prefixed with '_' & Linux symbols aren't. + */ +_HUF_decompress4X1_usingDTable_internal_fast_asm_loop: +HUF_decompress4X1_usingDTable_internal_fast_asm_loop: + ZSTD_CET_ENDBRANCH + .cfi_startproc + .cfi_def_cfa_offset 8 + .cfi_offset %rip, -8 + /* Save all registers - even if they are callee saved for simplicity. */ + push %rax + .cfi_def_cfa_offset 16 + .cfi_offset rax, -16 + push %rbx + .cfi_def_cfa_offset 24 + .cfi_offset rbx, -24 + push %rcx + .cfi_def_cfa_offset 32 + .cfi_offset rcx, -32 + push %rdx + .cfi_def_cfa_offset 40 + .cfi_offset rdx, -40 + push %rbp + .cfi_def_cfa_offset 48 + .cfi_offset rbp, -48 + push %rsi + .cfi_def_cfa_offset 56 + .cfi_offset rsi, -56 + push %rdi + .cfi_def_cfa_offset 64 + .cfi_offset rdi, -64 + push %r8 + .cfi_def_cfa_offset 72 + .cfi_offset r8, -72 + push %r9 + .cfi_def_cfa_offset 80 + .cfi_offset r9, -80 + push %r10 + .cfi_def_cfa_offset 88 + .cfi_offset r10, -88 + push %r11 + .cfi_def_cfa_offset 96 + .cfi_offset r11, -96 + push %r12 + .cfi_def_cfa_offset 104 + .cfi_offset r12, -104 + push %r13 + .cfi_def_cfa_offset 112 + .cfi_offset r13, -112 + push %r14 + .cfi_def_cfa_offset 120 + .cfi_offset r14, -120 + push %r15 + .cfi_def_cfa_offset 128 + .cfi_offset r15, -128 + + /* Read HUF_DecompressAsmArgs* args from %rax */ +#if defined(_WIN32) + movq %rcx, %rax +#else + movq %rdi, %rax +#endif + movq 0(%rax), %ip0 + movq 8(%rax), %ip1 + movq 16(%rax), %ip2 + movq 24(%rax), %ip3 + movq 32(%rax), %op0 + movq 40(%rax), %op1 + movq 48(%rax), %op2 + movq 56(%rax), %op3 + movq 64(%rax), %bits0 + movq 72(%rax), %bits1 + movq 80(%rax), %bits2 + movq 88(%rax), %bits3 + movq 96(%rax), %dtable + push %rax /* argument */ + .cfi_def_cfa_offset 136 + push 104(%rax) /* ilowest */ + .cfi_def_cfa_offset 144 + push 112(%rax) /* oend */ + .cfi_def_cfa_offset 152 + push %olimit /* olimit space */ + .cfi_def_cfa_offset 160 + + subq $24, %rsp + .cfi_def_cfa_offset 184 + +LOCAL_LABEL(4X1_compute_olimit): + /* Computes how many iterations we can do safely + * %r15, %rax may be clobbered + * rbx, rdx must be saved + * op3 & ip0 mustn't be clobbered + */ + movq %rbx, 0(%rsp) + movq %rdx, 8(%rsp) + + movq 32(%rsp), %rax /* rax = oend */ + subq %op3, %rax /* rax = oend - op3 */ + + /* r15 = (oend - op3) / 5 */ + movabsq $-3689348814741910323, %rdx + mulq %rdx + movq %rdx, %r15 + shrq $2, %r15 + + movq %ip0, %rax /* rax = ip0 */ + movq 40(%rsp), %rdx /* rdx = ilowest */ + subq %rdx, %rax /* rax = ip0 - ilowest */ + movq %rax, %rbx /* rbx = ip0 - ilowest */ + + /* rdx = (ip0 - ilowest) / 7 */ + movabsq $2635249153387078803, %rdx + mulq %rdx + subq %rdx, %rbx + shrq %rbx + addq %rbx, %rdx + shrq $2, %rdx + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + /* r15 = r15 * 5 */ + leaq (%r15, %r15, 4), %r15 + + /* olimit = op3 + r15 */ + addq %op3, %olimit + + movq 8(%rsp), %rdx + movq 0(%rsp), %rbx + + /* If (op3 + 20 > olimit) */ + movq %op3, %rax /* rax = op3 */ + cmpq %rax, %olimit /* op3 == olimit */ + je LOCAL_LABEL(4X1_exit) + + /* If (ip1 < ip0) go to exit */ + cmpq %ip0, %ip1 + jb LOCAL_LABEL(4X1_exit) + + /* If (ip2 < ip1) go to exit */ + cmpq %ip1, %ip2 + jb LOCAL_LABEL(4X1_exit) + + /* If (ip3 < ip2) go to exit */ + cmpq %ip2, %ip3 + jb LOCAL_LABEL(4X1_exit) + +/* Reads top 11 bits from bits[n] + * Loads dt[bits[n]] into var[n] + */ +#define GET_NEXT_DELT(n) \ + movq $53, %var##n; \ + shrxq %var##n, %bits##n, %var##n; \ + movzwl (%dtable,%var##n,2),%vard##n + +/* var[n] must contain the DTable entry computed with GET_NEXT_DELT + * Moves var[n] to %rax + * bits[n] <<= var[n] & 63 + * op[n][idx] = %rax >> 8 + * %ah is a way to access bits [8, 16) of %rax + */ +#define DECODE_FROM_DELT(n, idx) \ + movq %var##n, %rax; \ + shlxq %var##n, %bits##n, %bits##n; \ + movb %ah, idx(%op##n) + +/* Assumes GET_NEXT_DELT has been called. + * Calls DECODE_FROM_DELT then GET_NEXT_DELT + */ +#define DECODE_AND_GET_NEXT(n, idx) \ + DECODE_FROM_DELT(n, idx); \ + GET_NEXT_DELT(n) \ + +/* // ctz & nbBytes is stored in bits[n] + * // nbBits is stored in %rax + * ctz = CTZ[bits[n]] + * nbBits = ctz & 7 + * nbBytes = ctz >> 3 + * op[n] += 5 + * ip[n] -= nbBytes + * // Note: x86-64 is little-endian ==> no bswap + * bits[n] = MEM_readST(ip[n]) | 1 + * bits[n] <<= nbBits + */ +#define RELOAD_BITS(n) \ + bsfq %bits##n, %bits##n; \ + movq %bits##n, %rax; \ + andq $7, %rax; \ + shrq $3, %bits##n; \ + leaq 5(%op##n), %op##n; \ + subq %bits##n, %ip##n; \ + movq (%ip##n), %bits##n; \ + orq $1, %bits##n; \ + shlx %rax, %bits##n, %bits##n + + /* Store clobbered variables on the stack */ + movq %olimit, 24(%rsp) + movq %ip1, 0(%rsp) + movq %ip2, 8(%rsp) + movq %ip3, 16(%rsp) + + /* Call GET_NEXT_DELT for each stream */ + FOR_EACH_STREAM(GET_NEXT_DELT) + + .p2align 6 + +LOCAL_LABEL(4X1_loop_body): + /* Decode 5 symbols in each of the 4 streams (20 total) + * Must have called GET_NEXT_DELT for each stream + */ + FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0) + FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1) + FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2) + FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3) + FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4) + + /* Load ip[1,2,3] from stack (var[] aliases them) + * ip[] is needed for RELOAD_BITS + * Each will be stored back to the stack after RELOAD + */ + movq 0(%rsp), %ip1 + movq 8(%rsp), %ip2 + movq 16(%rsp), %ip3 + + /* Reload each stream & fetch the next table entry + * to prepare for the next iteration + */ + RELOAD_BITS(0) + GET_NEXT_DELT(0) + + RELOAD_BITS(1) + movq %ip1, 0(%rsp) + GET_NEXT_DELT(1) + + RELOAD_BITS(2) + movq %ip2, 8(%rsp) + GET_NEXT_DELT(2) + + RELOAD_BITS(3) + movq %ip3, 16(%rsp) + GET_NEXT_DELT(3) + + /* If op3 < olimit: continue the loop */ + cmp %op3, 24(%rsp) + ja LOCAL_LABEL(4X1_loop_body) + + /* Reload ip[1,2,3] from stack */ + movq 0(%rsp), %ip1 + movq 8(%rsp), %ip2 + movq 16(%rsp), %ip3 + + /* Re-compute olimit */ + jmp LOCAL_LABEL(4X1_compute_olimit) + +#undef GET_NEXT_DELT +#undef DECODE_FROM_DELT +#undef DECODE +#undef RELOAD_BITS +LOCAL_LABEL(4X1_exit): + addq $24, %rsp + .cfi_def_cfa_offset 160 + + /* Restore stack (oend & olimit) */ + pop %rax /* olimit */ + .cfi_def_cfa_offset 152 + pop %rax /* oend */ + .cfi_def_cfa_offset 144 + pop %rax /* ilowest */ + .cfi_def_cfa_offset 136 + pop %rax /* arg */ + .cfi_def_cfa_offset 128 + + /* Save ip / op / bits */ + movq %ip0, 0(%rax) + movq %ip1, 8(%rax) + movq %ip2, 16(%rax) + movq %ip3, 24(%rax) + movq %op0, 32(%rax) + movq %op1, 40(%rax) + movq %op2, 48(%rax) + movq %op3, 56(%rax) + movq %bits0, 64(%rax) + movq %bits1, 72(%rax) + movq %bits2, 80(%rax) + movq %bits3, 88(%rax) + + /* Restore registers */ + pop %r15 + .cfi_restore r15 + .cfi_def_cfa_offset 120 + pop %r14 + .cfi_restore r14 + .cfi_def_cfa_offset 112 + pop %r13 + .cfi_restore r13 + .cfi_def_cfa_offset 104 + pop %r12 + .cfi_restore r12 + .cfi_def_cfa_offset 96 + pop %r11 + .cfi_restore r11 + .cfi_def_cfa_offset 88 + pop %r10 + .cfi_restore r10 + .cfi_def_cfa_offset 80 + pop %r9 + .cfi_restore r9 + .cfi_def_cfa_offset 72 + pop %r8 + .cfi_restore r8 + .cfi_def_cfa_offset 64 + pop %rdi + .cfi_restore rdi + .cfi_def_cfa_offset 56 + pop %rsi + .cfi_restore rsi + .cfi_def_cfa_offset 48 + pop %rbp + .cfi_restore rbp + .cfi_def_cfa_offset 40 + pop %rdx + .cfi_restore rdx + .cfi_def_cfa_offset 32 + pop %rcx + .cfi_restore rcx + .cfi_def_cfa_offset 24 + pop %rbx + .cfi_restore rbx + .cfi_def_cfa_offset 16 + pop %rax + .cfi_restore rax + .cfi_def_cfa_offset 8 + ret + .cfi_endproc + +_HUF_decompress4X2_usingDTable_internal_fast_asm_loop: +HUF_decompress4X2_usingDTable_internal_fast_asm_loop: + ZSTD_CET_ENDBRANCH + .cfi_startproc + .cfi_def_cfa_offset 8 + .cfi_offset %rip, -8 + /* Save all registers - even if they are callee saved for simplicity. */ + push %rax + .cfi_def_cfa_offset 16 + .cfi_offset rax, -16 + push %rbx + .cfi_def_cfa_offset 24 + .cfi_offset rbx, -24 + push %rcx + .cfi_def_cfa_offset 32 + .cfi_offset rcx, -32 + push %rdx + .cfi_def_cfa_offset 40 + .cfi_offset rdx, -40 + push %rbp + .cfi_def_cfa_offset 48 + .cfi_offset rbp, -48 + push %rsi + .cfi_def_cfa_offset 56 + .cfi_offset rsi, -56 + push %rdi + .cfi_def_cfa_offset 64 + .cfi_offset rdi, -64 + push %r8 + .cfi_def_cfa_offset 72 + .cfi_offset r8, -72 + push %r9 + .cfi_def_cfa_offset 80 + .cfi_offset r9, -80 + push %r10 + .cfi_def_cfa_offset 88 + .cfi_offset r10, -88 + push %r11 + .cfi_def_cfa_offset 96 + .cfi_offset r11, -96 + push %r12 + .cfi_def_cfa_offset 104 + .cfi_offset r12, -104 + push %r13 + .cfi_def_cfa_offset 112 + .cfi_offset r13, -112 + push %r14 + .cfi_def_cfa_offset 120 + .cfi_offset r14, -120 + push %r15 + .cfi_def_cfa_offset 128 + .cfi_offset r15, -128 + + /* Read HUF_DecompressAsmArgs* args from %rax */ +#if defined(_WIN32) + movq %rcx, %rax +#else + movq %rdi, %rax +#endif + movq 0(%rax), %ip0 + movq 8(%rax), %ip1 + movq 16(%rax), %ip2 + movq 24(%rax), %ip3 + movq 32(%rax), %op0 + movq 40(%rax), %op1 + movq 48(%rax), %op2 + movq 56(%rax), %op3 + movq 64(%rax), %bits0 + movq 72(%rax), %bits1 + movq 80(%rax), %bits2 + movq 88(%rax), %bits3 + movq 96(%rax), %dtable + push %rax /* argument */ + .cfi_def_cfa_offset 136 + push %rax /* olimit */ + .cfi_def_cfa_offset 144 + push 104(%rax) /* ilowest */ + .cfi_def_cfa_offset 152 + + movq 112(%rax), %rax + push %rax /* oend3 */ + .cfi_def_cfa_offset 160 + + movq %op3, %rax + push %rax /* oend2 */ + .cfi_def_cfa_offset 168 + + movq %op2, %rax + push %rax /* oend1 */ + .cfi_def_cfa_offset 176 + + movq %op1, %rax + push %rax /* oend0 */ + .cfi_def_cfa_offset 184 + + /* Scratch space */ + subq $8, %rsp + .cfi_def_cfa_offset 192 + +LOCAL_LABEL(4X2_compute_olimit): + /* Computes how many iterations we can do safely + * %r15, %rax may be clobbered + * rdx must be saved + * op[1,2,3,4] & ip0 mustn't be clobbered + */ + movq %rdx, 0(%rsp) + + /* We can consume up to 7 input bytes each iteration. */ + movq %ip0, %rax /* rax = ip0 */ + movq 40(%rsp), %rdx /* rdx = ilowest */ + subq %rdx, %rax /* rax = ip0 - ilowest */ + movq %rax, %r15 /* r15 = ip0 - ilowest */ + + /* rdx = rax / 7 */ + movabsq $2635249153387078803, %rdx + mulq %rdx + subq %rdx, %r15 + shrq %r15 + addq %r15, %rdx + shrq $2, %rdx + + /* r15 = (ip0 - ilowest) / 7 */ + movq %rdx, %r15 + + /* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */ + movq 8(%rsp), %rax /* rax = oend0 */ + subq %op0, %rax /* rax = oend0 - op0 */ + movq 16(%rsp), %rdx /* rdx = oend1 */ + subq %op1, %rdx /* rdx = oend1 - op1 */ + + cmpq %rax, %rdx + cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ + + movq 24(%rsp), %rax /* rax = oend2 */ + subq %op2, %rax /* rax = oend2 - op2 */ + + cmpq %rax, %rdx + cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ + + movq 32(%rsp), %rax /* rax = oend3 */ + subq %op3, %rax /* rax = oend3 - op3 */ + + cmpq %rax, %rdx + cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ + + movabsq $-3689348814741910323, %rax + mulq %rdx + shrq $3, %rdx /* rdx = rdx / 10 */ + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + /* olimit = op3 + 5 * r15 */ + movq %r15, %rax + leaq (%op3, %rax, 4), %olimit + addq %rax, %olimit + + movq 0(%rsp), %rdx + + /* If (op3 + 10 > olimit) */ + movq %op3, %rax /* rax = op3 */ + cmpq %rax, %olimit /* op3 == olimit */ + je LOCAL_LABEL(4X2_exit) + + /* If (ip1 < ip0) go to exit */ + cmpq %ip0, %ip1 + jb LOCAL_LABEL(4X2_exit) + + /* If (ip2 < ip1) go to exit */ + cmpq %ip1, %ip2 + jb LOCAL_LABEL(4X2_exit) + + /* If (ip3 < ip2) go to exit */ + cmpq %ip2, %ip3 + jb LOCAL_LABEL(4X2_exit) + +#define DECODE(n, idx) \ + movq %bits##n, %rax; \ + shrq $53, %rax; \ + movzwl 0(%dtable,%rax,4),%r8d; \ + movzbl 2(%dtable,%rax,4),%r15d; \ + movzbl 3(%dtable,%rax,4),%eax; \ + movw %r8w, (%op##n); \ + shlxq %r15, %bits##n, %bits##n; \ + addq %rax, %op##n + +#define RELOAD_BITS(n) \ + bsfq %bits##n, %bits##n; \ + movq %bits##n, %rax; \ + shrq $3, %bits##n; \ + andq $7, %rax; \ + subq %bits##n, %ip##n; \ + movq (%ip##n), %bits##n; \ + orq $1, %bits##n; \ + shlxq %rax, %bits##n, %bits##n + + + movq %olimit, 48(%rsp) + + .p2align 6 + +LOCAL_LABEL(4X2_loop_body): + /* We clobber r8, so store it on the stack */ + movq %r8, 0(%rsp) + + /* Decode 5 symbols from each of the 4 streams (20 symbols total). */ + FOR_EACH_STREAM_WITH_INDEX(DECODE, 0) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 1) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 2) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 3) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 4) + + /* Reload r8 */ + movq 0(%rsp), %r8 + + FOR_EACH_STREAM(RELOAD_BITS) + + cmp %op3, 48(%rsp) + ja LOCAL_LABEL(4X2_loop_body) + jmp LOCAL_LABEL(4X2_compute_olimit) + +#undef DECODE +#undef RELOAD_BITS +LOCAL_LABEL(4X2_exit): + addq $8, %rsp + .cfi_def_cfa_offset 184 + /* Restore stack (oend & olimit) */ + pop %rax /* oend0 */ + .cfi_def_cfa_offset 176 + pop %rax /* oend1 */ + .cfi_def_cfa_offset 168 + pop %rax /* oend2 */ + .cfi_def_cfa_offset 160 + pop %rax /* oend3 */ + .cfi_def_cfa_offset 152 + pop %rax /* ilowest */ + .cfi_def_cfa_offset 144 + pop %rax /* olimit */ + .cfi_def_cfa_offset 136 + pop %rax /* arg */ + .cfi_def_cfa_offset 128 + + /* Save ip / op / bits */ + movq %ip0, 0(%rax) + movq %ip1, 8(%rax) + movq %ip2, 16(%rax) + movq %ip3, 24(%rax) + movq %op0, 32(%rax) + movq %op1, 40(%rax) + movq %op2, 48(%rax) + movq %op3, 56(%rax) + movq %bits0, 64(%rax) + movq %bits1, 72(%rax) + movq %bits2, 80(%rax) + movq %bits3, 88(%rax) + + /* Restore registers */ + pop %r15 + .cfi_restore r15 + .cfi_def_cfa_offset 120 + pop %r14 + .cfi_restore r14 + .cfi_def_cfa_offset 112 + pop %r13 + .cfi_restore r13 + .cfi_def_cfa_offset 104 + pop %r12 + .cfi_restore r12 + .cfi_def_cfa_offset 96 + pop %r11 + .cfi_restore r11 + .cfi_def_cfa_offset 88 + pop %r10 + .cfi_restore r10 + .cfi_def_cfa_offset 80 + pop %r9 + .cfi_restore r9 + .cfi_def_cfa_offset 72 + pop %r8 + .cfi_restore r8 + .cfi_def_cfa_offset 64 + pop %rdi + .cfi_restore rdi + .cfi_def_cfa_offset 56 + pop %rsi + .cfi_restore rsi + .cfi_def_cfa_offset 48 + pop %rbp + .cfi_restore rbp + .cfi_def_cfa_offset 40 + pop %rdx + .cfi_restore rdx + .cfi_def_cfa_offset 32 + pop %rcx + .cfi_restore rcx + .cfi_def_cfa_offset 24 + pop %rbx + .cfi_restore rbx + .cfi_def_cfa_offset 16 + pop %rax + .cfi_restore rax + .cfi_def_cfa_offset 8 + ret + .cfi_endproc + +#endif diff --git a/lib/decompress/zstd_ddict.c b/lib/decompress/zstd_ddict.c index 0af3d23bfe2..309ec0d0364 100644 --- a/lib/decompress/zstd_ddict.c +++ b/lib/decompress/zstd_ddict.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,18 +14,18 @@ /*-******************************************************* * Dependencies *********************************************************/ -#include /* memcpy, memmove, memset */ -#include "cpu.h" /* bmi2 */ -#include "mem.h" /* low level memory routines */ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ +#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ +#include "../common/cpu.h" /* bmi2 */ +#include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY -#include "fse.h" -#define HUF_STATIC_LINKING_ONLY -#include "huf.h" +#include "../common/fse.h" +#include "../common/huf.h" #include "zstd_decompress_internal.h" #include "zstd_ddict.h" #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) -# include "zstd_legacy.h" +# include "../legacy/zstd_legacy.h" #endif @@ -65,6 +65,10 @@ void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) dctx->virtualStart = ddict->dictContent; dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize; dctx->previousDstEnd = dctx->dictEnd; +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + dctx->dictContentBeginForFuzzing = dctx->prefixStart; + dctx->dictContentEndForFuzzing = dctx->previousDstEnd; +#endif if (ddict->entropyPresent) { dctx->litEntropy = 1; dctx->fseEntropy = 1; @@ -107,7 +111,7 @@ ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict, /* load entropy tables */ RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy( &ddict->entropy, ddict->dictContent, ddict->dictSize)), - dictionary_corrupted); + dictionary_corrupted, ""); ddict->entropyPresent = 1; return 0; } @@ -123,17 +127,17 @@ static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, ddict->dictContent = dict; if (!dict) dictSize = 0; } else { - void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem); + void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem); ddict->dictBuffer = internalBuffer; ddict->dictContent = internalBuffer; if (!internalBuffer) return ERROR(memory_allocation); - memcpy(internalBuffer, dict, dictSize); + ZSTD_memcpy(internalBuffer, dict, dictSize); } ddict->dictSize = dictSize; - ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + ddict->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */ /* parse dictionary content */ - FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) ); + FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , ""); return 0; } @@ -143,9 +147,9 @@ ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_customMem customMem) { - if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; - { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); + { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem); if (ddict == NULL) return NULL; ddict->cMem = customMem; { size_t const initResult = ZSTD_initDDict_internal(ddict, @@ -194,7 +198,7 @@ const ZSTD_DDict* ZSTD_initStaticDDict( if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */ if (sBufferSize < neededSpace) return NULL; if (dictLoadMethod == ZSTD_dlm_byCopy) { - memcpy(ddict+1, dict, dictSize); /* local copy */ + ZSTD_memcpy(ddict+1, dict, dictSize); /* local copy */ dict = ddict+1; } if (ZSTD_isError( ZSTD_initDDict_internal(ddict, @@ -209,8 +213,8 @@ size_t ZSTD_freeDDict(ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = ddict->cMem; - ZSTD_free(ddict->dictBuffer, cMem); - ZSTD_free(ddict, cMem); + ZSTD_customFree(ddict->dictBuffer, cMem); + ZSTD_customFree(ddict, cMem); return 0; } } @@ -236,5 +240,5 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; - return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); + return ddict->dictID; } diff --git a/lib/decompress/zstd_ddict.h b/lib/decompress/zstd_ddict.h index 0479d11bb03..c4ca8877a07 100644 --- a/lib/decompress/zstd_ddict.h +++ b/lib/decompress/zstd_ddict.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,8 +15,8 @@ /*-******************************************************* * Dependencies *********************************************************/ -#include /* size_t */ -#include "zstd.h" /* ZSTD_DDict, and several public functions */ +#include "../common/zstd_deps.h" /* size_t */ +#include "../zstd.h" /* ZSTD_DDict, and several public functions */ /*-******************************************************* diff --git a/lib/decompress/zstd_decompress.c b/lib/decompress/zstd_decompress.c index e42872ad96a..9eb98327ef3 100644 --- a/lib/decompress/zstd_decompress.c +++ b/lib/decompress/zstd_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -55,23 +55,166 @@ /*-******************************************************* * Dependencies *********************************************************/ -#include /* memcpy, memmove, memset */ -#include "cpu.h" /* bmi2 */ -#include "mem.h" /* low level memory routines */ +#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ +#include "../common/error_private.h" +#include "../common/zstd_internal.h" /* blockProperties_t */ +#include "../common/mem.h" /* low level memory routines */ +#include "../common/bits.h" /* ZSTD_highbit32 */ #define FSE_STATIC_LINKING_ONLY -#include "fse.h" -#define HUF_STATIC_LINKING_ONLY -#include "huf.h" -#include "zstd_internal.h" /* blockProperties_t */ +#include "../common/fse.h" +#include "../common/huf.h" +#include "../common/xxhash.h" /* XXH64_reset, XXH64_update, XXH64_digest, XXH64 */ #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ #include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */ #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) -# include "zstd_legacy.h" +# include "../legacy/zstd_legacy.h" #endif + +/************************************* + * Multiple DDicts Hashset internals * + *************************************/ + +#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4 +#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float. + * Currently, that means a 0.75 load factor. + * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded + * the load factor of the ddict hash set. + */ + +#define DDICT_HASHSET_TABLE_BASE_SIZE 64 +#define DDICT_HASHSET_RESIZE_FACTOR 2 + +/* Hash function to determine starting position of dict insertion within the table + * Returns an index between [0, hashSet->ddictPtrTableSize] + */ +static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) { + const U64 hash = XXH64(&dictID, sizeof(U32), 0); + /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */ + return hash & (hashSet->ddictPtrTableSize - 1); +} + +/* Adds DDict to a hashset without resizing it. + * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set. + * Returns 0 if successful, or a zstd error code if something went wrong. + */ +static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) { + const U32 dictID = ZSTD_getDictID_fromDDict(ddict); + size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); + const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1; + RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!"); + DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx); + while (hashSet->ddictPtrTable[idx] != NULL) { + /* Replace existing ddict if inserting ddict with same dictID */ + if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) { + DEBUGLOG(4, "DictID already exists, replacing rather than adding"); + hashSet->ddictPtrTable[idx] = ddict; + return 0; + } + idx &= idxRangeMask; + idx++; + } + DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx); + hashSet->ddictPtrTable[idx] = ddict; + hashSet->ddictPtrCount++; + return 0; +} + +/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and + * rehashes all values, allocates new table, frees old table. + * Returns 0 on success, otherwise a zstd error code. + */ +static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) { + size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR; + const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem); + const ZSTD_DDict** oldTable = hashSet->ddictPtrTable; + size_t oldTableSize = hashSet->ddictPtrTableSize; + size_t i; + + DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize); + RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!"); + hashSet->ddictPtrTable = newTable; + hashSet->ddictPtrTableSize = newTableSize; + hashSet->ddictPtrCount = 0; + for (i = 0; i < oldTableSize; ++i) { + if (oldTable[i] != NULL) { + FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), ""); + } + } + ZSTD_customFree((void*)oldTable, customMem); + DEBUGLOG(4, "Finished re-hash"); + return 0; +} + +/* Fetches a DDict with the given dictID + * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL. + */ +static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) { + size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); + const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1; + DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx); + for (;;) { + size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]); + if (currDictID == dictID || currDictID == 0) { + /* currDictID == 0 implies a NULL ddict entry */ + break; + } else { + idx &= idxRangeMask; /* Goes to start of table when we reach the end */ + idx++; + } + } + DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx); + return hashSet->ddictPtrTable[idx]; +} + +/* Allocates space for and returns a ddict hash set + * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with. + * Returns NULL if allocation failed. + */ +static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) { + ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem); + DEBUGLOG(4, "Allocating new hash set"); + if (!ret) + return NULL; + ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem); + if (!ret->ddictPtrTable) { + ZSTD_customFree(ret, customMem); + return NULL; + } + ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE; + ret->ddictPtrCount = 0; + return ret; +} + +/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself. + * Note: The ZSTD_DDict* within the table are NOT freed. + */ +static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) { + DEBUGLOG(4, "Freeing ddict hash set"); + if (hashSet && hashSet->ddictPtrTable) { + ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem); + } + if (hashSet) { + ZSTD_customFree(hashSet, customMem); + } +} + +/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set. + * Returns 0 on success, or a ZSTD error. + */ +static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) { + DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize); + if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) { + FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), ""); + } + FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), ""); + return 0; +} + /*-************************************************************* * Context management ***************************************************************/ @@ -88,20 +231,27 @@ size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); } static size_t ZSTD_startingInputLength(ZSTD_format_e format) { - size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ? - ZSTD_FRAMEHEADERSIZE_PREFIX - ZSTD_FRAMEIDSIZE : - ZSTD_FRAMEHEADERSIZE_PREFIX; - ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE); + size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format); /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) ); return startingInputLength; } +static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx) +{ + assert(dctx->streamStage == zdss_init); + dctx->format = ZSTD_f_zstd1; + dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; + dctx->outBufferMode = ZSTD_bm_buffered; + dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum; + dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict; + dctx->disableHufAsm = 0; + dctx->maxBlockSizeParam = 0; +} + static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) { - dctx->format = ZSTD_f_zstd1; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */ dctx->staticSize = 0; - dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; dctx->ddict = NULL; dctx->ddictLocal = NULL; dctx->dictEnd = NULL; @@ -111,10 +261,21 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) dctx->inBuffSize = 0; dctx->outBuffSize = 0; dctx->streamStage = zdss_init; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) dctx->legacyContext = NULL; dctx->previousLegacyVersion = 0; +#endif dctx->noForwardProgress = 0; - dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); + dctx->oversizedDuration = 0; + dctx->isFrameDecompression = 1; +#if DYNAMIC_BMI2 + dctx->bmi2 = ZSTD_cpuSupportsBmi2(); +#endif + dctx->ddictSet = NULL; + ZSTD_DCtx_resetParameters(dctx); +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + dctx->dictContentEndForFuzzing = NULL; +#endif } ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) @@ -130,11 +291,10 @@ ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) return dctx; } -ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) -{ - if (!customMem.customAlloc ^ !customMem.customFree) return NULL; +static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) { + if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; - { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem); + { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem); if (!dctx) return NULL; dctx->customMem = customMem; ZSTD_initDCtx_internal(dctx); @@ -142,10 +302,15 @@ ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) } } +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) +{ + return ZSTD_createDCtx_internal(customMem); +} + ZSTD_DCtx* ZSTD_createDCtx(void) { DEBUGLOG(3, "ZSTD_createDCtx"); - return ZSTD_createDCtx_advanced(ZSTD_defaultCMem); + return ZSTD_createDCtx_internal(ZSTD_defaultCMem); } static void ZSTD_clearDict(ZSTD_DCtx* dctx) @@ -162,13 +327,17 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx"); { ZSTD_customMem const cMem = dctx->customMem; ZSTD_clearDict(dctx); - ZSTD_free(dctx->inBuff, cMem); + ZSTD_customFree(dctx->inBuff, cMem); dctx->inBuff = NULL; #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (dctx->legacyContext) ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion); #endif - ZSTD_free(dctx, cMem); + if (dctx->ddictSet) { + ZSTD_freeDDictHashSet(dctx->ddictSet, cMem); + dctx->ddictSet = NULL; + } + ZSTD_customFree(dctx, cMem); return 0; } } @@ -177,7 +346,30 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) { size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx); - memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */ + ZSTD_memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */ +} + +/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on + * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then + * accordingly sets the ddict to be used to decompress the frame. + * + * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is. + * + * ZSTD_d_refMultipleDDicts must be enabled for this function to be called. + */ +static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) { + assert(dctx->refMultipleDDicts && dctx->ddictSet); + DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame"); + if (dctx->ddict) { + const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID); + if (frameDDict) { + DEBUGLOG(4, "DDict found!"); + ZSTD_clearDict(dctx); + dctx->dictID = dctx->fParams.dictID; + dctx->ddict = frameDDict; + dctx->dictUses = ZSTD_use_indefinitely; + } + } } @@ -203,6 +395,19 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size) return 0; } +/*! ZSTD_isSkippableFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + */ +unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size) +{ + if (size < ZSTD_FRAMEIDSIZE) return 0; + { U32 const magic = MEM_readLE32(buffer); + if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1; + } + return 0; +} + /** ZSTD_frameHeaderSize_internal() : * srcSize must be large enough to reach header size fields. * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. @@ -211,7 +416,7 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size) static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format) { size_t const minInputSize = ZSTD_startingInputLength(format); - RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong); + RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, ""); { BYTE const fhd = ((const BYTE*)src)[minInputSize-1]; U32 const dictID= fhd & 3; @@ -238,28 +443,54 @@ size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ -size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) +** or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) { const BYTE* ip = (const BYTE*)src; size_t const minInputSize = ZSTD_startingInputLength(format); - memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */ - if (srcSize < minInputSize) return minInputSize; - RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter"); + DEBUGLOG(5, "ZSTD_getFrameHeader_advanced: minInputSize = %zu, srcSize = %zu", minInputSize, srcSize); + + if (srcSize > 0) { + /* note : technically could be considered an assert(), since it's an invalid entry */ + RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter : src==NULL, but srcSize>0"); + } + if (srcSize < minInputSize) { + if (srcSize > 0 && format != ZSTD_f_zstd1_magicless) { + /* when receiving less than @minInputSize bytes, + * control these bytes at least correspond to a supported magic number + * in order to error out early if they don't. + **/ + size_t const toCopy = MIN(4, srcSize); + unsigned char hbuf[4]; MEM_writeLE32(hbuf, ZSTD_MAGICNUMBER); + assert(src != NULL); + ZSTD_memcpy(hbuf, src, toCopy); + if ( MEM_readLE32(hbuf) != ZSTD_MAGICNUMBER ) { + /* not a zstd frame : let's check if it's a skippable frame */ + MEM_writeLE32(hbuf, ZSTD_MAGIC_SKIPPABLE_START); + ZSTD_memcpy(hbuf, src, toCopy); + if ((MEM_readLE32(hbuf) & ZSTD_MAGIC_SKIPPABLE_MASK) != ZSTD_MAGIC_SKIPPABLE_START) { + RETURN_ERROR(prefix_unknown, + "first bytes don't correspond to any supported magic number"); + } } } + return minInputSize; + } + ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzers may not understand that zfhPtr will be read only if return value is zero, since they are 2 different signals */ if ( (format != ZSTD_f_zstd1_magicless) && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ if (srcSize < ZSTD_SKIPPABLEHEADERSIZE) return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */ - memset(zfhPtr, 0, sizeof(*zfhPtr)); - zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); + ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); zfhPtr->frameType = ZSTD_skippableFrame; + zfhPtr->dictID = MEM_readLE32(src) - ZSTD_MAGIC_SKIPPABLE_START; + zfhPtr->headerSize = ZSTD_SKIPPABLEHEADERSIZE; + zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); return 0; } - RETURN_ERROR(prefix_unknown); + RETURN_ERROR(prefix_unknown, ""); } /* ensure there is enough `srcSize` to fully read/decode frame header */ @@ -283,13 +514,15 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s if (!singleSegment) { BYTE const wlByte = ip[pos++]; U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; - RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge); + RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, ""); windowSize = (1ULL << windowLog); windowSize += (windowSize >> 3) * (wlByte&7); } switch(dictIDSizeCode) { - default: assert(0); /* impossible */ + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; case 0 : break; case 1 : dictID = ip[pos]; pos++; break; case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; @@ -297,7 +530,9 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s } switch(fcsID) { - default: assert(0); /* impossible */ + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; case 0 : if (singleSegment) frameContentSize = ip[pos]; break; case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; case 2 : frameContentSize = MEM_readLE32(ip+pos); break; @@ -321,12 +556,11 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ -size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize) +size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize) { return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1); } - /** ZSTD_getFrameContentSize() : * compatible with legacy mode * @return : decompressed size of the single frame pointed to be `src` if known, otherwise @@ -340,7 +574,7 @@ unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret; } #endif - { ZSTD_frameHeader zfh; + { ZSTD_FrameHeader zfh; if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR; if (zfh.frameType == ZSTD_skippableFrame) { @@ -355,35 +589,67 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize) size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE; U32 sizeU32; - RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong); + RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE); RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32, - frameParameter_unsupported); - { - size_t const skippableSize = skippableHeaderSize + sizeU32; - RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong); + frameParameter_unsupported, ""); + { size_t const skippableSize = skippableHeaderSize + sizeU32; + RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, ""); return skippableSize; } } +/*! ZSTD_readSkippableFrame() : + * Retrieves content of a skippable frame, and writes it to dst buffer. + * + * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, + * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested + * in the magicVariant. + * + * Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame. + * + * @return : number of bytes written or a ZSTD error. + */ +size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, + unsigned* magicVariant, /* optional, can be NULL */ + const void* src, size_t srcSize) +{ + RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); + + { U32 const magicNumber = MEM_readLE32(src); + size_t skippableFrameSize = readSkippableFrameSize(src, srcSize); + size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE; + + /* check input validity */ + RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, ""); + RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, ""); + RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, ""); + + /* deliver payload */ + if (skippableContentSize > 0 && dst != NULL) + ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize); + if (magicVariant != NULL) + *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START; + return skippableContentSize; + } +} + /** ZSTD_findDecompressedSize() : - * compatible with legacy mode * `srcSize` must be the exact length of some number of ZSTD compressed and/or * skippable frames - * @return : decompressed size of the frames contained */ + * note: compatible with legacy mode + * @return : decompressed size of the frames contained */ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) { unsigned long long totalDstSize = 0; - while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) { + while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) { U32 const magicNumber = MEM_readLE32(src); if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { size_t const skippableSize = readSkippableFrameSize(src, srcSize); - if (ZSTD_isError(skippableSize)) { - return ZSTD_CONTENTSIZE_ERROR; - } + if (ZSTD_isError(skippableSize)) return ZSTD_CONTENTSIZE_ERROR; assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; @@ -391,17 +657,17 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) continue; } - { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); - if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; + { unsigned long long const fcs = ZSTD_getFrameContentSize(src, srcSize); + if (fcs >= ZSTD_CONTENTSIZE_ERROR) return fcs; - /* check for overflow */ - if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; - totalDstSize += ret; + if (totalDstSize + fcs < totalDstSize) + return ZSTD_CONTENTSIZE_ERROR; /* check for overflow */ + totalDstSize += fcs; } + /* skip to next frame */ { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); - if (ZSTD_isError(frameSrcSize)) { - return ZSTD_CONTENTSIZE_ERROR; - } + if (ZSTD_isError(frameSrcSize)) return ZSTD_CONTENTSIZE_ERROR; + assert(frameSrcSize <= srcSize); src = (const BYTE *)src + frameSrcSize; srcSize -= frameSrcSize; @@ -431,20 +697,29 @@ unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize) /** ZSTD_decodeFrameHeader() : * `headerSize` must be the size provided by ZSTD_frameHeaderSize(). + * If multiple DDict references are enabled, also will choose the correct DDict to use. * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) { size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format); if (ZSTD_isError(result)) return result; /* invalid header */ RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small"); + + /* Reference DDict requested by frame if dctx references multiple ddicts */ + if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) { + ZSTD_DCtx_selectFrameDDict(dctx); + } + #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION /* Skip the dictID check in fuzzing mode, because it makes the search * harder. */ RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID), - dictionary_wrong); + dictionary_wrong, ""); #endif - if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0); + dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0; + if (dctx->validateChecksum) XXH64_reset(&dctx->xxhState, 0); + dctx->processedCSize += headerSize; return 0; } @@ -456,17 +731,17 @@ static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret) return frameSizeInfo; } -static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize) +static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize, ZSTD_format_e format) { ZSTD_frameSizeInfo frameSizeInfo; - memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo)); + ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo)); #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) - if (ZSTD_isLegacy(src, srcSize)) + if (format == ZSTD_f_zstd1 && ZSTD_isLegacy(src, srcSize)) return ZSTD_findFrameSizeInfoLegacy(src, srcSize); #endif - if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE) + if (format == ZSTD_f_zstd1 && (srcSize >= ZSTD_SKIPPABLEHEADERSIZE) && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); assert(ZSTD_isError(frameSizeInfo.compressedSize) || @@ -477,10 +752,10 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize const BYTE* const ipstart = ip; size_t remainingSize = srcSize; size_t nbBlocks = 0; - ZSTD_frameHeader zfh; + ZSTD_FrameHeader zfh; /* Extract Frame Header */ - { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize); + { size_t const ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format); if (ZSTD_isError(ret)) return ZSTD_errorFrameSizeInfo(ret); if (ret > 0) @@ -514,28 +789,31 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize ip += 4; } - frameSizeInfo.compressedSize = ip - ipstart; + frameSizeInfo.nbBlocks = nbBlocks; + frameSizeInfo.compressedSize = (size_t)(ip - ipstart); frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) ? zfh.frameContentSize - : nbBlocks * zfh.blockSizeMax; + : (unsigned long long)nbBlocks * zfh.blockSizeMax; return frameSizeInfo; } } +static size_t ZSTD_findFrameCompressedSize_advanced(const void *src, size_t srcSize, ZSTD_format_e format) { + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format); + return frameSizeInfo.compressedSize; +} + /** ZSTD_findFrameCompressedSize() : - * compatible with legacy mode - * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame - * `srcSize` must be at least as large as the frame contained - * @return : the compressed size of the frame starting at `src` */ + * See docs in zstd.h + * Note: compatible with legacy mode */ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) { - ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); - return frameSizeInfo.compressedSize; + return ZSTD_findFrameCompressedSize_advanced(src, srcSize, ZSTD_f_zstd1); } /** ZSTD_decompressBound() : * compatible with legacy mode - * `src` must point to the start of a ZSTD frame or a skippeable frame + * `src` must point to the start of a ZSTD frame or a skippable frame * `srcSize` must be at least as large as the frame contained * @return : the maximum decompressed size of the compressed source */ @@ -544,7 +822,7 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) unsigned long long bound = 0; /* Iterate over each frame */ while (srcSize > 0) { - ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1); size_t const compressedSize = frameSizeInfo.compressedSize; unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) @@ -557,27 +835,59 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) return bound; } +size_t ZSTD_decompressionMargin(void const* src, size_t srcSize) +{ + size_t margin = 0; + unsigned maxBlockSize = 0; -/*-************************************************************* - * Frame decoding - ***************************************************************/ + /* Iterate over each frame */ + while (srcSize > 0) { + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1); + size_t const compressedSize = frameSizeInfo.compressedSize; + unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; + ZSTD_FrameHeader zfh; + FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), ""); + if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) + return ERROR(corruption_detected); + + if (zfh.frameType == ZSTD_frame) { + /* Add the frame header to our margin */ + margin += zfh.headerSize; + /* Add the checksum to our margin */ + margin += zfh.checksumFlag ? 4 : 0; + /* Add 3 bytes per block */ + margin += 3 * frameSizeInfo.nbBlocks; + + /* Compute the max block size */ + maxBlockSize = MAX(maxBlockSize, zfh.blockSizeMax); + } else { + assert(zfh.frameType == ZSTD_skippableFrame); + /* Add the entire skippable frame size to our margin. */ + margin += compressedSize; + } -void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst) -{ - if (dst != dctx->previousDstEnd) { /* not contiguous */ - dctx->dictEnd = dctx->previousDstEnd; - dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); - dctx->prefixStart = dst; - dctx->previousDstEnd = dst; + assert(srcSize >= compressedSize); + src = (const BYTE*)src + compressedSize; + srcSize -= compressedSize; } + + /* Add the max block size back to the margin. */ + margin += maxBlockSize; + + return margin; } +/*-************************************************************* + * Frame decoding + ***************************************************************/ + /** ZSTD_insertBlock() : - insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ + * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) { - ZSTD_checkContinuity(dctx, blockStart); + DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize); + ZSTD_checkContinuity(dctx, blockStart, blockSize); dctx->previousDstEnd = (const char*)blockStart + blockSize; return blockSize; } @@ -587,12 +897,12 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_copyRawBlock"); + RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, ""); if (dst == NULL) { if (srcSize == 0) return 0; - RETURN_ERROR(dstBuffer_null); + RETURN_ERROR(dstBuffer_null, ""); } - RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall); - memcpy(dst, src, srcSize); + ZSTD_memmove(dst, src, srcSize); return srcSize; } @@ -600,15 +910,41 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, BYTE b, size_t regenSize) { + RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, ""); if (dst == NULL) { if (regenSize == 0) return 0; - RETURN_ERROR(dstBuffer_null); + RETURN_ERROR(dstBuffer_null, ""); } - RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall); - memset(dst, b, regenSize); + ZSTD_memset(dst, b, regenSize); return regenSize; } +static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, int streaming) +{ +#if ZSTD_TRACE + if (dctx->traceCtx && ZSTD_trace_decompress_end != NULL) { + ZSTD_Trace trace; + ZSTD_memset(&trace, 0, sizeof(trace)); + trace.version = ZSTD_VERSION_NUMBER; + trace.streaming = streaming; + if (dctx->ddict) { + trace.dictionaryID = ZSTD_getDictID_fromDDict(dctx->ddict); + trace.dictionarySize = ZSTD_DDict_dictSize(dctx->ddict); + trace.dictionaryIsCold = dctx->ddictIsCold; + } + trace.uncompressedSize = (size_t)uncompressedSize; + trace.compressedSize = (size_t)compressedSize; + trace.dctx = dctx; + ZSTD_trace_decompress_end(dctx->traceCtx, &trace); + } +#else + (void)dctx; + (void)uncompressedSize; + (void)compressedSize; + (void)streaming; +#endif +} + /*! ZSTD_decompressFrame() : * @dctx must be properly initialized @@ -618,9 +954,10 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void** srcPtr, size_t *srcSizePtr) { - const BYTE* ip = (const BYTE*)(*srcPtr); - BYTE* const ostart = (BYTE* const)dst; - BYTE* const oend = ostart + dstCapacity; + const BYTE* const istart = (const BYTE*)(*srcPtr); + const BYTE* ip = istart; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart; BYTE* op = ostart; size_t remainingSrcSize = *srcSizePtr; @@ -628,20 +965,26 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, /* check */ RETURN_ERROR_IF( - remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize, - srcSize_wrong); + remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize, + srcSize_wrong, ""); /* Frame Header */ - { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX); + { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal( + ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format); if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize, - srcSize_wrong); - FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) ); + srcSize_wrong, ""); + FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , ""); ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize; } + /* Shrink the blockSizeMax if enabled */ + if (dctx->maxBlockSizeParam != 0) + dctx->fParams.blockSizeMax = MIN(dctx->fParams.blockSizeMax, (unsigned)dctx->maxBlockSizeParam); + /* Loop on each block */ while (1) { + BYTE* oBlockEnd = oend; size_t decodedSize; blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); @@ -649,28 +992,51 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, ip += ZSTD_blockHeaderSize; remainingSrcSize -= ZSTD_blockHeaderSize; - RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong); + RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, ""); + + if (ip >= op && ip < oBlockEnd) { + /* We are decompressing in-place. Limit the output pointer so that we + * don't overwrite the block that we are currently reading. This will + * fail decompression if the input & output pointers aren't spaced + * far enough apart. + * + * This is important to set, even when the pointers are far enough + * apart, because ZSTD_decompressBlock_internal() can decide to store + * literals in the output buffer, after the block it is decompressing. + * Since we don't want anything to overwrite our input, we have to tell + * ZSTD_decompressBlock_internal to never write past ip. + * + * See ZSTD_allocateLiteralsBuffer() for reference. + */ + oBlockEnd = op + (ip - op); + } switch(blockProperties.blockType) { case bt_compressed: - decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1); + assert(dctx->isFrameDecompression == 1); + decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, not_streaming); break; case bt_raw : - decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize); + /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */ + decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize); break; case bt_rle : - decodedSize = ZSTD_setRleBlock(op, oend-op, *ip, blockProperties.origSize); + decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize); break; case bt_reserved : default: - RETURN_ERROR(corruption_detected); + RETURN_ERROR(corruption_detected, "invalid block type"); } - - if (ZSTD_isError(decodedSize)) return decodedSize; - if (dctx->fParams.checksumFlag) + FORWARD_IF_ERROR(decodedSize, "Block decompression failure"); + DEBUGLOG(5, "Decompressed block of dSize = %u", (unsigned)decodedSize); + if (dctx->validateChecksum) { XXH64_update(&dctx->xxhState, op, decodedSize); - op += decodedSize; + } + if (decodedSize) /* support dst = NULL,0 */ { + op += decodedSize; + } + assert(ip != NULL); ip += cBlockSize; remainingSrcSize -= cBlockSize; if (blockProperties.lastBlock) break; @@ -678,25 +1044,30 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize, - corruption_detected); + corruption_detected, ""); } if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ - U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState); - U32 checkRead; - RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong); - checkRead = MEM_readLE32(ip); - RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong); + RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, ""); + if (!dctx->forceIgnoreChecksum) { + U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState); + U32 checkRead; + checkRead = MEM_readLE32(ip); + RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, ""); + } ip += 4; remainingSrcSize -= 4; } - + ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0); /* Allow caller to get size read */ + DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %i, consuming %i bytes of input", (int)(op-ostart), (int)(ip - (const BYTE*)*srcPtr)); *srcPtr = ip; *srcSizePtr = remainingSrcSize; - return op-ostart; + return (size_t)(op-ostart); } -static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, @@ -713,10 +1084,10 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, dictSize = ZSTD_DDict_dictSize(ddict); } - while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) { + while (srcSize >= ZSTD_startingInputLength(dctx->format)) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) - if (ZSTD_isLegacy(src, srcSize)) { + if (dctx->format == ZSTD_f_zstd1 && ZSTD_isLegacy(src, srcSize)) { size_t decodedSize; size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize); if (ZSTD_isError(frameSize)) return frameSize; @@ -726,7 +1097,16 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize); if (ZSTD_isError(decodedSize)) return decodedSize; - assert(decodedSize <=- dstCapacity); + { + unsigned long long const expectedSize = ZSTD_getFrameContentSize(src, srcSize); + RETURN_ERROR_IF(expectedSize == ZSTD_CONTENTSIZE_ERROR, corruption_detected, "Corrupted frame header!"); + if (expectedSize != ZSTD_CONTENTSIZE_UNKNOWN) { + RETURN_ERROR_IF(expectedSize != decodedSize, corruption_detected, + "Frame header size does not match decoded size!"); + } + } + + assert(decodedSize <= dstCapacity); dst = (BYTE*)dst + decodedSize; dstCapacity -= decodedSize; @@ -737,28 +1117,29 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, } #endif - { U32 const magicNumber = MEM_readLE32(src); - DEBUGLOG(4, "reading magic number %08X (expecting %08X)", - (unsigned)magicNumber, ZSTD_MAGICNUMBER); + if (dctx->format == ZSTD_f_zstd1 && srcSize >= 4) { + U32 const magicNumber = MEM_readLE32(src); + DEBUGLOG(5, "reading magic number %08X", (unsigned)magicNumber); if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { + /* skippable frame detected : skip it */ size_t const skippableSize = readSkippableFrameSize(src, srcSize); - FORWARD_IF_ERROR(skippableSize); + FORWARD_IF_ERROR(skippableSize, "invalid skippable frame"); assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; - continue; + continue; /* check next frame */ } } if (ddict) { /* we were called from ZSTD_decompress_usingDDict */ - FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict)); + FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), ""); } else { /* this will initialize correctly with no dict if dict == NULL, so * use this in all cases but ddict */ - FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize)); + FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), ""); } - ZSTD_checkContinuity(dctx, dst); + ZSTD_checkContinuity(dctx, dst, dstCapacity); { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); @@ -766,18 +1147,17 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown) && (moreThan1Frame==1), srcSize_wrong, - "at least one frame successfully completed, but following " - "bytes are garbage: it's more likely to be a srcSize error, " - "specifying more bytes than compressed size of frame(s). This " - "error message replaces ERROR(prefix_unknown), which would be " - "confusing, as the first header is actually correct. Note that " - "one could be unlucky, it might be a corruption error instead, " - "happening right at the place where we expect zstd magic " - "bytes. But this is _much_ less likely than a srcSize field " - "error."); + "At least one frame successfully completed, " + "but following bytes are garbage: " + "it's more likely to be a srcSize error, " + "specifying more input bytes than size of frame(s). " + "Note: one could be unlucky, it might be a corruption error instead, " + "happening right at the place where we expect zstd magic bytes. " + "But this is _much_ less likely than a srcSize field error."); if (ZSTD_isError(res)) return res; assert(res <= dstCapacity); - dst = (BYTE*)dst + res; + if (res != 0) + dst = (BYTE*)dst + res; dstCapacity -= res; } moreThan1Frame = 1; @@ -785,7 +1165,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed"); - return (BYTE*)dst - (BYTE*)dststart; + return (size_t)((BYTE*)dst - (BYTE*)dststart); } size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, @@ -802,7 +1182,7 @@ static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx) switch (dctx->dictUses) { default: assert(0 /* Impossible */); - /* fall-through */ + ZSTD_FALLTHROUGH; case ZSTD_dont_use: ZSTD_clearDict(dctx); return NULL; @@ -824,8 +1204,8 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr { #if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1) size_t regenSize; - ZSTD_DCtx* const dctx = ZSTD_createDCtx(); - RETURN_ERROR_IF(dctx==NULL, memory_allocation); + ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem); + RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!"); regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); ZSTD_freeDCtx(dctx); return regenSize; @@ -843,12 +1223,32 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr ****************************************/ size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } +/** + * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we + * allow taking a partial block as the input. Currently only raw uncompressed blocks can + * be streamed. + * + * For blocks that can be streamed, this allows us to reduce the latency until we produce + * output, and avoid copying the input. + * + * @param inputSize - The total amount of input that the caller currently has. + */ +static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) { + if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock)) + return dctx->expected; + if (dctx->bType != bt_raw) + return dctx->expected; + return BOUNDED(1, inputSize, dctx->expected); +} + ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { switch(dctx->stage) { default: /* should not happen */ assert(0); + ZSTD_FALLTHROUGH; case ZSTDds_getFrameHeaderSize: + ZSTD_FALLTHROUGH; case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader; case ZSTDds_decodeBlockHeader: @@ -860,6 +1260,7 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { case ZSTDds_checkChecksum: return ZSTDnit_checksum; case ZSTDds_decodeSkippableHeader: + ZSTD_FALLTHROUGH; case ZSTDds_skipFrame: return ZSTDnit_skippableFrame; } @@ -875,8 +1276,10 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c { DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize); /* Sanity check */ - RETURN_ERROR_IF(srcSize != dctx->expected, srcSize_wrong, "not allowed"); - if (dstCapacity) ZSTD_checkContinuity(dctx, dst); + RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed"); + ZSTD_checkContinuity(dctx, dst, dstCapacity); + + dctx->processedCSize += srcSize; switch (dctx->stage) { @@ -885,22 +1288,22 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c if (dctx->format == ZSTD_f_zstd1) { /* allows header */ assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ - memcpy(dctx->headerBuffer, src, srcSize); + ZSTD_memcpy(dctx->headerBuffer, src, srcSize); dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */ dctx->stage = ZSTDds_decodeSkippableHeader; return 0; } } dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; - memcpy(dctx->headerBuffer, src, srcSize); + ZSTD_memcpy(dctx->headerBuffer, src, srcSize); dctx->expected = dctx->headerSize - srcSize; dctx->stage = ZSTDds_decodeFrameHeader; return 0; case ZSTDds_decodeFrameHeader: assert(src != NULL); - memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize); - FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize)); + ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize); + FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), ""); dctx->expected = ZSTD_blockHeaderSize; dctx->stage = ZSTDds_decodeBlockHeader; return 0; @@ -909,6 +1312,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c { blockProperties_t bp; size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ZSTD_isError(cBlockSize)) return cBlockSize; + RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum"); dctx->expected = cBlockSize; dctx->bType = bp.blockType; dctx->rleSize = bp.origSize; @@ -940,50 +1344,68 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c { case bt_compressed: DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); - rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1); + assert(dctx->isFrameDecompression == 1); + rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, is_streaming); + dctx->expected = 0; /* Streaming not supported */ break; case bt_raw : + assert(srcSize <= dctx->expected); rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); + FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed"); + assert(rSize == srcSize); + dctx->expected -= rSize; break; case bt_rle : rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize); + dctx->expected = 0; /* Streaming not supported */ break; case bt_reserved : /* should never happen */ default: - RETURN_ERROR(corruption_detected); + RETURN_ERROR(corruption_detected, "invalid block type"); } - if (ZSTD_isError(rSize)) return rSize; + FORWARD_IF_ERROR(rSize, ""); + RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum"); DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize); dctx->decodedSize += rSize; - if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); + if (dctx->validateChecksum) XXH64_update(&dctx->xxhState, dst, rSize); + dctx->previousDstEnd = (char*)dst + rSize; + + /* Stay on the same stage until we are finished streaming the block. */ + if (dctx->expected > 0) { + return rSize; + } if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize); RETURN_ERROR_IF( dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && dctx->decodedSize != dctx->fParams.frameContentSize, - corruption_detected); + corruption_detected, ""); if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ dctx->expected = 4; dctx->stage = ZSTDds_checkChecksum; } else { + ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1); dctx->expected = 0; /* ends here */ dctx->stage = ZSTDds_getFrameHeaderSize; } } else { dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTD_blockHeaderSize; - dctx->previousDstEnd = (char*)dst + rSize; } return rSize; } case ZSTDds_checkChecksum: assert(srcSize == 4); /* guaranteed by dctx->expected */ - { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); - U32 const check32 = MEM_readLE32(src); - DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32); - RETURN_ERROR_IF(check32 != h32, checksum_wrong); + { + if (dctx->validateChecksum) { + U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); + U32 const check32 = MEM_readLE32(src); + DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32); + RETURN_ERROR_IF(check32 != h32, checksum_wrong, ""); + } + ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1); dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; return 0; @@ -992,7 +1414,8 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c case ZSTDds_decodeSkippableHeader: assert(src != NULL); assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE); - memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */ + assert(dctx->format != ZSTD_f_zstd1_magicless); + ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */ dctx->stage = ZSTDds_skipFrame; return 0; @@ -1004,7 +1427,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c default: assert(0); /* impossible */ - RETURN_ERROR(GENERIC); /* some compiler require default to do something */ + RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */ } } @@ -1015,6 +1438,10 @@ static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dict dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); dctx->prefixStart = dict; dctx->previousDstEnd = (const char*)dict + dictSize; +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + dctx->dictContentBeginForFuzzing = dctx->prefixStart; + dctx->dictContentEndForFuzzing = dctx->previousDstEnd; +#endif return 0; } @@ -1028,7 +1455,7 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; - RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted); + RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small"); assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */ dictPtr += 8; /* skip header = magic + dictID */ @@ -1041,66 +1468,72 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, /* in minimal huffman, we always use X1 variants */ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, - workspace, workspaceSize); + workspace, workspaceSize, /* flags */ 0); #else size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable, - dictPtr, dictEnd - dictPtr, - workspace, workspaceSize); + dictPtr, (size_t)(dictEnd - dictPtr), + workspace, workspaceSize, /* flags */ 0); #endif - RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted); + RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, ""); dictPtr += hSize; } { short offcodeNCount[MaxOff+1]; unsigned offcodeMaxValue = MaxOff, offcodeLog; - size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); - RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted); - RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted); - RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted); + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr)); + RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, ""); + RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->OFTable, offcodeNCount, offcodeMaxValue, OF_base, OF_bits, - offcodeLog); + offcodeLog, + entropy->workspace, sizeof(entropy->workspace), + /* bmi2 */0); dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; - size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); - RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted); - RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted); - RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted); + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); + RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, ""); + RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->MLTable, matchlengthNCount, matchlengthMaxValue, ML_base, ML_bits, - matchlengthLog); + matchlengthLog, + entropy->workspace, sizeof(entropy->workspace), + /* bmi2 */ 0); dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; - size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); - RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted); - RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted); - RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted); + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); + RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, ""); + RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->LLTable, litlengthNCount, litlengthMaxValue, LL_base, LL_bits, - litlengthLog); + litlengthLog, + entropy->workspace, sizeof(entropy->workspace), + /* bmi2 */ 0); dictPtr += litlengthHeaderSize; } - RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted); + RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); { int i; size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); for (i=0; i<3; i++) { U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; - RETURN_ERROR_IF(rep==0 || rep >= dictContentSize, - dictionary_corrupted); + RETURN_ERROR_IF(rep==0 || rep > dictContentSize, + dictionary_corrupted, ""); entropy->rep[i] = rep; } } - return dictPtr - (const BYTE*)dict; + return (size_t)(dictPtr - (const BYTE*)dict); } static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) @@ -1114,7 +1547,7 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict /* load entropy tables */ { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize); - RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted); + RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, ""); dict = (const char*)dict + eSize; dictSize -= eSize; } @@ -1127,18 +1560,24 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) { assert(dctx != NULL); +#if ZSTD_TRACE + dctx->traceCtx = (ZSTD_trace_decompress_begin != NULL) ? ZSTD_trace_decompress_begin(dctx) : 0; +#endif dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */ dctx->stage = ZSTDds_getFrameHeaderSize; + dctx->processedCSize = 0; dctx->decodedSize = 0; dctx->previousDstEnd = NULL; dctx->prefixStart = NULL; dctx->virtualStart = NULL; dctx->dictEnd = NULL; - dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + dctx->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */ dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; + dctx->bType = bt_reserved; + dctx->isFrameDecompression = 1; ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); - memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ + ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ dctx->LLTptr = dctx->entropy.LLTable; dctx->MLTptr = dctx->entropy.MLTable; dctx->OFTptr = dctx->entropy.OFTable; @@ -1148,11 +1587,11 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { - FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) ); + FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); if (dict && dictSize) RETURN_ERROR_IF( ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)), - dictionary_corrupted); + dictionary_corrupted, ""); return 0; } @@ -1171,7 +1610,7 @@ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) DEBUGLOG(4, "DDict is %s", dctx->ddictIsCold ? "~cold~" : "hot!"); } - FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) ); + FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); if (ddict) { /* NULL ddict is equivalent to no dictionary */ ZSTD_copyDDictParameters(dctx, ddict); } @@ -1195,7 +1634,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) * This could for one of the following reasons : * - The frame does not require a dictionary (most common case). * - The frame was built with dictID intentionally removed. - * Needed dictionary is a hidden information. + * Needed dictionary is a hidden piece of information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, frame header could not be decoded. * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. @@ -1204,7 +1643,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) * ZSTD_getFrameHeader(), which will provide a more precise error code. */ unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) { - ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 }; + ZSTD_FrameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 }; size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); if (ZSTD_isError(hError)) return 0; return zfp.dictID; @@ -1233,7 +1672,7 @@ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, ZSTD_DStream* ZSTD_createDStream(void) { DEBUGLOG(3, "ZSTD_createDStream"); - return ZSTD_createDStream_advanced(ZSTD_defaultCMem); + return ZSTD_createDCtx_internal(ZSTD_defaultCMem); } ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) @@ -1243,7 +1682,7 @@ ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) { - return ZSTD_createDCtx_advanced(customMem); + return ZSTD_createDCtx_internal(customMem); } size_t ZSTD_freeDStream(ZSTD_DStream* zds) @@ -1262,11 +1701,11 @@ size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { - RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong); + RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); - if (dict && dictSize >= 8) { + if (dict && dictSize != 0) { dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem); - RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation); + RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!"); dctx->ddict = dctx->ddictLocal; dctx->dictUses = ZSTD_use_indefinitely; } @@ -1285,7 +1724,7 @@ size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSi size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) { - FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType)); + FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), ""); dctx->dictUses = ZSTD_use_once; return 0; } @@ -1297,21 +1736,23 @@ size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSiz /* ZSTD_initDStream_usingDict() : - * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX. + * return : expected size, aka ZSTD_startingInputLength(). * this function cannot fail */ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize) { DEBUGLOG(4, "ZSTD_initDStream_usingDict"); - FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) ); - FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) ); - return ZSTD_FRAMEHEADERSIZE_PREFIX; + FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , ""); + return ZSTD_startingInputLength(zds->format); } /* note : this variant can't fail */ size_t ZSTD_initDStream(ZSTD_DStream* zds) { DEBUGLOG(4, "ZSTD_initDStream"); - return ZSTD_initDStream_usingDDict(zds, NULL); + FORWARD_IF_ERROR(ZSTD_DCtx_reset(zds, ZSTD_reset_session_only), ""); + FORWARD_IF_ERROR(ZSTD_DCtx_refDDict(zds, NULL), ""); + return ZSTD_startingInputLength(zds->format); } /* ZSTD_initDStream_usingDDict() : @@ -1319,28 +1760,40 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds) * this function cannot fail */ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) { - FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) ); - FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) ); - return ZSTD_FRAMEHEADERSIZE_PREFIX; + DEBUGLOG(4, "ZSTD_initDStream_usingDDict"); + FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , ""); + return ZSTD_startingInputLength(dctx->format); } /* ZSTD_resetDStream() : - * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX. + * return : expected size, aka ZSTD_startingInputLength(). * this function cannot fail */ size_t ZSTD_resetDStream(ZSTD_DStream* dctx) { - FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only)); - return ZSTD_FRAMEHEADERSIZE_PREFIX; + DEBUGLOG(4, "ZSTD_resetDStream"); + FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), ""); + return ZSTD_startingInputLength(dctx->format); } size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) { - RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong); + RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); if (ddict) { dctx->ddict = ddict; dctx->dictUses = ZSTD_use_indefinitely; + if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) { + if (dctx->ddictSet == NULL) { + dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem); + if (!dctx->ddictSet) { + RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!"); + } + } + assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */ + FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), ""); + } } return 0; } @@ -1353,16 +1806,16 @@ size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize) ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax); size_t const min = (size_t)1 << bounds.lowerBound; size_t const max = (size_t)1 << bounds.upperBound; - RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong); - RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound); - RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound); + RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); + RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, ""); + RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, ""); dctx->maxWindowSize = maxWindowSize; return 0; } size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format) { - return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, format); + return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format); } ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) @@ -1378,6 +1831,27 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) bounds.upperBound = (int)ZSTD_f_zstd1_magicless; ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); return bounds; + case ZSTD_d_stableOutBuffer: + bounds.lowerBound = (int)ZSTD_bm_buffered; + bounds.upperBound = (int)ZSTD_bm_stable; + return bounds; + case ZSTD_d_forceIgnoreChecksum: + bounds.lowerBound = (int)ZSTD_d_validateChecksum; + bounds.upperBound = (int)ZSTD_d_ignoreChecksum; + return bounds; + case ZSTD_d_refMultipleDDicts: + bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict; + bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts; + return bounds; + case ZSTD_d_disableHuffmanAssembly: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_d_maxBlockSize: + bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; + bounds.upperBound = ZSTD_BLOCKSIZE_MAX; + return bounds; + default:; } bounds.error = ERROR(parameter_unsupported); @@ -1397,12 +1871,41 @@ static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) } #define CHECK_DBOUNDS(p,v) { \ - RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound); \ + RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \ +} + +size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value) +{ + switch (param) { + case ZSTD_d_windowLogMax: + *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize); + return 0; + case ZSTD_d_format: + *value = (int)dctx->format; + return 0; + case ZSTD_d_stableOutBuffer: + *value = (int)dctx->outBufferMode; + return 0; + case ZSTD_d_forceIgnoreChecksum: + *value = (int)dctx->forceIgnoreChecksum; + return 0; + case ZSTD_d_refMultipleDDicts: + *value = (int)dctx->refMultipleDDicts; + return 0; + case ZSTD_d_disableHuffmanAssembly: + *value = (int)dctx->disableHufAsm; + return 0; + case ZSTD_d_maxBlockSize: + *value = dctx->maxBlockSizeParam; + return 0; + default:; + } + RETURN_ERROR(parameter_unsupported, ""); } size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value) { - RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong); + RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); switch(dParam) { case ZSTD_d_windowLogMax: if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT; @@ -1413,9 +1916,32 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value CHECK_DBOUNDS(ZSTD_d_format, value); dctx->format = (ZSTD_format_e)value; return 0; + case ZSTD_d_stableOutBuffer: + CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value); + dctx->outBufferMode = (ZSTD_bufferMode_e)value; + return 0; + case ZSTD_d_forceIgnoreChecksum: + CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value); + dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value; + return 0; + case ZSTD_d_refMultipleDDicts: + CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value); + if (dctx->staticSize != 0) { + RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!"); + } + dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; + return 0; + case ZSTD_d_disableHuffmanAssembly: + CHECK_DBOUNDS(ZSTD_d_disableHuffmanAssembly, value); + dctx->disableHufAsm = value != 0; + return 0; + case ZSTD_d_maxBlockSize: + if (value != 0) CHECK_DBOUNDS(ZSTD_d_maxBlockSize, value); + dctx->maxBlockSizeParam = value; + return 0; default:; } - RETURN_ERROR(parameter_unsupported); + RETURN_ERROR(parameter_unsupported, ""); } size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset) @@ -1424,13 +1950,13 @@ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset) || (reset == ZSTD_reset_session_and_parameters) ) { dctx->streamStage = zdss_init; dctx->noForwardProgress = 0; + dctx->isFrameDecompression = 1; } if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { - RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong); + RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); - dctx->format = ZSTD_f_zstd1; - dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; + ZSTD_DCtx_resetParameters(dctx); } return 0; } @@ -1441,17 +1967,29 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) return ZSTD_sizeof_DCtx(dctx); } -size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) +static size_t ZSTD_decodingBufferSize_internal(unsigned long long windowSize, unsigned long long frameContentSize, size_t blockSizeMax) { - size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); - unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2); + size_t const blockSize = MIN((size_t)MIN(windowSize, ZSTD_BLOCKSIZE_MAX), blockSizeMax); + /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block + * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing + * the block at the beginning of the output buffer, and maintain a full window. + * + * We need another blockSize worth of buffer so that we can store split + * literals at the end of the block without overwriting the extDict window. + */ + unsigned long long const neededRBSize = windowSize + (blockSize * 2) + (WILDCOPY_OVERLENGTH * 2); unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); size_t const minRBSize = (size_t) neededSize; RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize, - frameParameter_windowTooLarge); + frameParameter_windowTooLarge, ""); return minRBSize; } +size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) +{ + return ZSTD_decodingBufferSize_internal(windowSize, frameContentSize, ZSTD_BLOCKSIZE_MAX); +} + size_t ZSTD_estimateDStreamSize(size_t windowSize) { size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX); @@ -1463,37 +2001,102 @@ size_t ZSTD_estimateDStreamSize(size_t windowSize) size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) { U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ - ZSTD_frameHeader zfh; + ZSTD_FrameHeader zfh; size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize); if (ZSTD_isError(err)) return err; - RETURN_ERROR_IF(err>0, srcSize_wrong); + RETURN_ERROR_IF(err>0, srcSize_wrong, ""); RETURN_ERROR_IF(zfh.windowSize > windowSizeMax, - frameParameter_windowTooLarge); + frameParameter_windowTooLarge, ""); return ZSTD_estimateDStreamSize((size_t)zfh.windowSize); } /* ***** Decompression ***** */ -MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) { - size_t const length = MIN(dstCapacity, srcSize); - memcpy(dst, src, length); - return length; + return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR; } +static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) +{ + if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize)) + zds->oversizedDuration++; + else + zds->oversizedDuration = 0; +} + +static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds) +{ + return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION; +} + +/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */ +static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output) +{ + ZSTD_outBuffer const expect = zds->expectedOutBuffer; + /* No requirement when ZSTD_obm_stable is not enabled. */ + if (zds->outBufferMode != ZSTD_bm_stable) + return 0; + /* Any buffer is allowed in zdss_init, this must be the same for every other call until + * the context is reset. + */ + if (zds->streamStage == zdss_init) + return 0; + /* The buffer must match our expectation exactly. */ + if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size) + return 0; + RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!"); +} + +/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream() + * and updates the stage and the output buffer state. This call is extracted so it can be + * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode. + * NOTE: You must break after calling this function since the streamStage is modified. + */ +static size_t ZSTD_decompressContinueStream( + ZSTD_DStream* zds, char** op, char* oend, + void const* src, size_t srcSize) { + int const isSkipFrame = ZSTD_isSkipFrame(zds); + if (zds->outBufferMode == ZSTD_bm_buffered) { + size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart; + size_t const decodedSize = ZSTD_decompressContinue(zds, + zds->outBuff + zds->outStart, dstSize, src, srcSize); + FORWARD_IF_ERROR(decodedSize, ""); + if (!decodedSize && !isSkipFrame) { + zds->streamStage = zdss_read; + } else { + zds->outEnd = zds->outStart + decodedSize; + zds->streamStage = zdss_flush; + } + } else { + /* Write directly into the output buffer */ + size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op); + size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize); + FORWARD_IF_ERROR(decodedSize, ""); + *op += decodedSize; + /* Flushing is not needed. */ + zds->streamStage = zdss_read; + assert(*op <= oend); + assert(zds->outBufferMode == ZSTD_bm_stable); + } + return 0; +} size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { - const char* const istart = (const char*)(input->src) + input->pos; - const char* const iend = (const char*)(input->src) + input->size; + const char* const src = (const char*)input->src; + const char* const istart = input->pos != 0 ? src + input->pos : src; + const char* const iend = input->size != 0 ? src + input->size : src; const char* ip = istart; - char* const ostart = (char*)(output->dst) + output->pos; - char* const oend = (char*)(output->dst) + output->size; + char* const dst = (char*)output->dst; + char* const ostart = output->pos != 0 ? dst + output->pos : dst; + char* const oend = output->size != 0 ? dst + output->size : dst; char* op = ostart; U32 someMoreWork = 1; DEBUGLOG(5, "ZSTD_decompressStream"); + assert(zds != NULL); RETURN_ERROR_IF( input->pos > input->size, srcSize_wrong, @@ -1505,6 +2108,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB "forbidden. out: pos: %u vs size: %u", (U32)output->pos, (U32)output->size); DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos)); + FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), ""); while (someMoreWork) { switch(zds->streamStage) @@ -1513,9 +2117,12 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB DEBUGLOG(5, "stage zdss_init => transparent reset "); zds->streamStage = zdss_loadHeader; zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) zds->legacyVersion = 0; +#endif zds->hostageByte = 0; - /* fall-through */ + zds->expectedOutBuffer = *output; + ZSTD_FALLTHROUGH; case zdss_loadHeader : DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); @@ -1529,7 +2136,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB } } #endif { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); - DEBUGLOG(5, "header size : %u", (U32)hSize); + if (zds->refMultipleDDicts && zds->ddictSet) { + ZSTD_DCtx_selectFrameDDict(zds); + } if (ZSTD_isError(hSize)) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); @@ -1542,7 +2151,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB "legacy support is incompatible with static dctx"); FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext, zds->previousLegacyVersion, legacyVersion, - dict, dictSize)); + dict, dictSize), ""); zds->legacyVersion = zds->previousLegacyVersion = legacyVersion; { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input); if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */ @@ -1557,43 +2166,59 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB assert(iend >= ip); if (toLoad > remainingInput) { /* not enough input to load full header */ if (remainingInput > 0) { - memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput); + ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput); zds->lhSize += remainingInput; } input->pos = input->size; - return (MAX(ZSTD_FRAMEHEADERSIZE_MIN, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ + /* check first few bytes */ + FORWARD_IF_ERROR( + ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format), + "First few bytes detected incorrect" ); + /* return hint input size */ + return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ } assert(ip != NULL); - memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; + ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; break; } } /* check for single-pass mode opportunity */ - if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */ + if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN + && zds->fParams.frameType != ZSTD_skippableFrame && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { - size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart); + size_t const cSize = ZSTD_findFrameCompressedSize_advanced(istart, (size_t)(iend-istart), zds->format); if (cSize <= (size_t)(iend-istart)) { /* shortcut : using single-pass mode */ - size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds)); + size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds)); if (ZSTD_isError(decompressedSize)) return decompressedSize; - DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()") + DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()"); + assert(istart != NULL); ip = istart + cSize; - op += decompressedSize; + op = op ? op + decompressedSize : op; /* can occur if frameContentSize = 0 (empty frame) */ zds->expected = 0; zds->streamStage = zdss_init; someMoreWork = 0; break; } } + /* Check output buffer is large enough for ZSTD_odm_stable. */ + if (zds->outBufferMode == ZSTD_bm_stable + && zds->fParams.frameType != ZSTD_skippableFrame + && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN + && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) { + RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small"); + } + /* Consume header (see ZSTDds_decodeFrameHeader) */ DEBUGLOG(4, "Consume header"); - FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds))); + FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), ""); - if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ + if (zds->format == ZSTD_f_zstd1 + && (MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE); zds->stage = ZSTDds_skipFrame; } else { - FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize)); + FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), ""); zds->expected = ZSTD_blockHeaderSize; zds->stage = ZSTDds_decodeBlockHeader; } @@ -1604,40 +2229,50 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB (U32)(zds->maxWindowSize >> 10) ); zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize, - frameParameter_windowTooLarge); + frameParameter_windowTooLarge, ""); + if (zds->maxBlockSizeParam != 0) + zds->fParams.blockSizeMax = MIN(zds->fParams.blockSizeMax, (unsigned)zds->maxBlockSizeParam); /* Adapt buffer sizes to frame header instructions */ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */); - size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize); - if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) { - size_t const bufferSize = neededInBuffSize + neededOutBuffSize; - DEBUGLOG(4, "inBuff : from %u to %u", - (U32)zds->inBuffSize, (U32)neededInBuffSize); - DEBUGLOG(4, "outBuff : from %u to %u", - (U32)zds->outBuffSize, (U32)neededOutBuffSize); - if (zds->staticSize) { /* static DCtx */ - DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize); - assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */ - RETURN_ERROR_IF( - bufferSize > zds->staticSize - sizeof(ZSTD_DCtx), - memory_allocation); - } else { - ZSTD_free(zds->inBuff, zds->customMem); - zds->inBuffSize = 0; - zds->outBuffSize = 0; - zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem); - RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation); - } - zds->inBuffSize = neededInBuffSize; - zds->outBuff = zds->inBuff + zds->inBuffSize; - zds->outBuffSize = neededOutBuffSize; - } } + size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered + ? ZSTD_decodingBufferSize_internal(zds->fParams.windowSize, zds->fParams.frameContentSize, zds->fParams.blockSizeMax) + : 0; + + ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); + + { int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize); + int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); + + if (tooSmall || tooLarge) { + size_t const bufferSize = neededInBuffSize + neededOutBuffSize; + DEBUGLOG(4, "inBuff : from %u to %u", + (U32)zds->inBuffSize, (U32)neededInBuffSize); + DEBUGLOG(4, "outBuff : from %u to %u", + (U32)zds->outBuffSize, (U32)neededOutBuffSize); + if (zds->staticSize) { /* static DCtx */ + DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize); + assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */ + RETURN_ERROR_IF( + bufferSize > zds->staticSize - sizeof(ZSTD_DCtx), + memory_allocation, ""); + } else { + ZSTD_customFree(zds->inBuff, zds->customMem); + zds->inBuffSize = 0; + zds->outBuffSize = 0; + zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem); + RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, ""); + } + zds->inBuffSize = neededInBuffSize; + zds->outBuff = zds->inBuff + zds->inBuffSize; + zds->outBuffSize = neededOutBuffSize; + } } } zds->streamStage = zdss_read; - /* fall-through */ + ZSTD_FALLTHROUGH; case zdss_read: DEBUGLOG(5, "stage zdss_read"); - { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip)); DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize); if (neededInSize==0) { /* end of frame */ zds->streamStage = zdss_init; @@ -1645,59 +2280,56 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB break; } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ - int const isSkipFrame = ZSTD_isSkipFrame(zds); - size_t const decodedSize = ZSTD_decompressContinue(zds, - zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), - ip, neededInSize); - if (ZSTD_isError(decodedSize)) return decodedSize; + FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), ""); + assert(ip != NULL); ip += neededInSize; - if (!decodedSize && !isSkipFrame) break; /* this was just a header */ - zds->outEnd = zds->outStart + decodedSize; - zds->streamStage = zdss_flush; + /* Function modifies the stage so we must break */ break; } } if (ip==iend) { someMoreWork = 0; break; } /* no more input */ zds->streamStage = zdss_load; - /* fall-through */ + ZSTD_FALLTHROUGH; case zdss_load: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); size_t const toLoad = neededInSize - zds->inPos; int const isSkipFrame = ZSTD_isSkipFrame(zds); size_t loadedSize; + /* At this point we shouldn't be decompressing a block that we can stream. */ + assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip))); if (isSkipFrame) { loadedSize = MIN(toLoad, (size_t)(iend-ip)); } else { RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos, corruption_detected, "should never happen"); - loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip); + loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip)); + } + if (loadedSize != 0) { + /* ip may be NULL */ + ip += loadedSize; + zds->inPos += loadedSize; } - ip += loadedSize; - zds->inPos += loadedSize; if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ - { size_t const decodedSize = ZSTD_decompressContinue(zds, - zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart, - zds->inBuff, neededInSize); - if (ZSTD_isError(decodedSize)) return decodedSize; - zds->inPos = 0; /* input is consumed */ - if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; break; } /* this was just a header */ - zds->outEnd = zds->outStart + decodedSize; - } } - zds->streamStage = zdss_flush; - /* fall-through */ - + zds->inPos = 0; /* input is consumed */ + FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), ""); + /* Function modifies the stage so we must break */ + break; + } case zdss_flush: - { size_t const toFlushSize = zds->outEnd - zds->outStart; - size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize); - op += flushedSize; + { + size_t const toFlushSize = zds->outEnd - zds->outStart; + size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize); + + op = op ? op + flushedSize : op; + zds->outStart += flushedSize; if (flushedSize == toFlushSize) { /* flush completed */ zds->streamStage = zdss_read; if ( (zds->outBuffSize < zds->fParams.frameContentSize) - && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { + && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", (int)(zds->outBuffSize - zds->outStart), (U32)zds->fParams.blockSizeMax); @@ -1711,17 +2343,21 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB default: assert(0); /* impossible */ - RETURN_ERROR(GENERIC); /* some compiler require default to do something */ + RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */ } } /* result */ input->pos = (size_t)(ip - (const char*)(input->src)); output->pos = (size_t)(op - (char*)(output->dst)); + + /* Update the expected output buffer for ZSTD_obm_stable. */ + zds->expectedOutBuffer = *output; + if ((ip==istart) && (op==ostart)) { /* no forward progress */ zds->noForwardProgress ++; if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) { - RETURN_ERROR_IF(op==oend, dstSize_tooSmall); - RETURN_ERROR_IF(ip==iend, srcSize_wrong); + RETURN_ERROR_IF(op==oend, noForwardProgress_destFull, ""); + RETURN_ERROR_IF(ip==iend, noForwardProgress_inputEmpty, ""); assert(0); } } else { @@ -1758,11 +2394,17 @@ size_t ZSTD_decompressStream_simpleArgs ( void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos) { - ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; - ZSTD_inBuffer input = { src, srcSize, *srcPos }; - /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ - size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; + ZSTD_outBuffer output; + ZSTD_inBuffer input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; + { size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; + } } diff --git a/lib/decompress/zstd_decompress_block.c b/lib/decompress/zstd_decompress_block.c index 7707c28bb08..56e1f9eda28 100644 --- a/lib/decompress/zstd_decompress_block.c +++ b/lib/decompress/zstd_decompress_block.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,18 +14,17 @@ /*-******************************************************* * Dependencies *********************************************************/ -#include /* memcpy, memmove, memset */ -#include "compiler.h" /* prefetch */ -#include "cpu.h" /* bmi2 */ -#include "mem.h" /* low level memory routines */ +#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ +#include "../common/compiler.h" /* prefetch */ +#include "../common/mem.h" /* low level memory routines */ +#include #define FSE_STATIC_LINKING_ONLY -#include "fse.h" -#define HUF_STATIC_LINKING_ONLY -#include "huf.h" -#include "zstd_internal.h" +#include "../common/fse.h" +#include "../common/huf.h" +#include "../common/zstd_internal.h" #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ -#include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" +#include "../common/bits.h" /* ZSTD_highbit32 */ /*_******************************************************* * Macros @@ -44,19 +43,26 @@ /*_******************************************************* * Memory operations **********************************************************/ -static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } +static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); } /*-************************************************************* * Block decoding ***************************************************************/ +static size_t ZSTD_blockSizeMax(ZSTD_DCtx const* dctx) +{ + size_t const blockSizeMax = dctx->isFrameDecompression ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX; + assert(blockSizeMax <= ZSTD_BLOCKSIZE_MAX); + return blockSizeMax; +} + /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { - RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong); + RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, ""); { U32 const cBlockHeader = MEM_readLE24(src); U32 const cSize = cBlockHeader >> 3; @@ -64,39 +70,95 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); bpPtr->origSize = cSize; /* only useful for RLE */ if (bpPtr->blockType == bt_rle) return 1; - RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected); + RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, ""); return cSize; } } +/* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */ +static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize, + const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately) +{ + size_t const blockSizeMax = ZSTD_blockSizeMax(dctx); + assert(litSize <= blockSizeMax); + assert(dctx->isFrameDecompression || streaming == not_streaming); + assert(expectedWriteSize <= blockSizeMax); + if (streaming == not_streaming && dstCapacity > blockSizeMax + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH) { + /* If we aren't streaming, we can just put the literals after the output + * of the current block. We don't need to worry about overwriting the + * extDict of our window, because it doesn't exist. + * So if we have space after the end of the block, just put it there. + */ + dctx->litBuffer = (BYTE*)dst + blockSizeMax + WILDCOPY_OVERLENGTH; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_in_dst; + } else if (litSize <= ZSTD_LITBUFFEREXTRASIZE) { + /* Literals fit entirely within the extra buffer, put them there to avoid + * having to split the literals. + */ + dctx->litBuffer = dctx->litExtraBuffer; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_not_in_dst; + } else { + assert(blockSizeMax > ZSTD_LITBUFFEREXTRASIZE); + /* Literals must be split between the output block and the extra lit + * buffer. We fill the extra lit buffer with the tail of the literals, + * and put the rest of the literals at the end of the block, with + * WILDCOPY_OVERLENGTH of buffer room to allow for overreads. + * This MUST not write more than our maxBlockSize beyond dst, because in + * streaming mode, that could overwrite part of our extDict window. + */ + if (splitImmediately) { + /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */ + dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; + dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE; + } else { + /* initially this will be stored entirely in dst during huffman decoding, it will partially be shifted to litExtraBuffer after */ + dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize; + dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize; + } + dctx->litBufferLocation = ZSTD_split; + assert(dctx->litBufferEnd <= (BYTE*)dst + expectedWriteSize); + } +} -/* Hidden declaration for fullbench */ -size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, - const void* src, size_t srcSize); /*! ZSTD_decodeLiteralsBlock() : + * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored + * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current + * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being + * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write. + * * @return : nb of bytes read from src (< srcSize ) * note : symbol not declared but exposed for fullbench */ -size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, - const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ +static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, + const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */ + void* dst, size_t dstCapacity, const streaming_operation streaming) { - RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected); + DEBUGLOG(5, "ZSTD_decodeLiteralsBlock"); + RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, ""); { const BYTE* const istart = (const BYTE*) src; - symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); + SymbolEncodingType_e const litEncType = (SymbolEncodingType_e)(istart[0] & 3); + size_t const blockSizeMax = ZSTD_blockSizeMax(dctx); switch(litEncType) { case set_repeat: - RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted); - /* fall-through */ + DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block"); + RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, ""); + ZSTD_FALLTHROUGH; case set_compressed: - RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3"); + RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need up to 5 for case 3"); { size_t lhSize, litSize, litCSize; U32 singleStream=0; U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); size_t hufSuccess; + size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); + int const flags = 0 + | (ZSTD_DCtx_get_bmi2(dctx) ? HUF_flags_bmi2 : 0) + | (dctx->disableHufAsm ? HUF_flags_disableAsm : 0); switch(lhlCode) { case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -116,11 +178,18 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, /* 2 - 2 - 18 - 18 */ lhSize = 5; litSize = (lhc >> 4) & 0x3FFFF; - litCSize = (lhc >> 22) + (istart[4] << 10); + litCSize = (lhc >> 22) + ((size_t)istart[4] << 10); break; } - RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected); - RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected); + RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); + if (!singleStream) + RETURN_ERROR_IF(litSize < MIN_LITERALS_FOR_4_STREAMS, literals_headerWrong, + "Not enough literals (%zu) for the 4-streams mode (min %u)", + litSize, MIN_LITERALS_FOR_4_STREAMS); + RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); + RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, ""); + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0); /* prefetch huffman table if cold */ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) { @@ -129,13 +198,14 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, if (litEncType==set_repeat) { if (singleStream) { - hufSuccess = HUF_decompress1X_usingDTable_bmi2( + hufSuccess = HUF_decompress1X_usingDTable( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, dctx->bmi2); + dctx->HUFptr, flags); } else { - hufSuccess = HUF_decompress4X_usingDTable_bmi2( + assert(litSize >= MIN_LITERALS_FOR_4_STREAMS); + hufSuccess = HUF_decompress4X_usingDTable( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, dctx->bmi2); + dctx->HUFptr, flags); } } else { if (singleStream) { @@ -143,34 +213,43 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, hufSuccess = HUF_decompress1X_DCtx_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace)); + sizeof(dctx->workspace), flags); #else - hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2( + hufSuccess = HUF_decompress1X1_DCtx_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), dctx->bmi2); + sizeof(dctx->workspace), flags); #endif } else { - hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2( + hufSuccess = HUF_decompress4X_hufOnly_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), dctx->bmi2); + sizeof(dctx->workspace), flags); } } + if (dctx->litBufferLocation == ZSTD_split) + { + assert(litSize > ZSTD_LITBUFFEREXTRASIZE); + ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); + ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE); + dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; + dctx->litBufferEnd -= WILDCOPY_OVERLENGTH; + assert(dctx->litBufferEnd <= (BYTE*)dst + blockSizeMax); + } - RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected); + RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, ""); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; dctx->litEntropy = 1; if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; - memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case set_basic: { size_t litSize, lhSize; U32 const lhlCode = ((istart[0]) >> 2) & 3; + size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -183,27 +262,42 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, break; case 3: lhSize = 3; + RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize = 3"); litSize = MEM_readLE24(istart) >> 4; break; } + RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); + RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ - RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected); - memcpy(dctx->litBuffer, istart+lhSize, litSize); + RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, ""); + if (dctx->litBufferLocation == ZSTD_split) + { + ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize - ZSTD_LITBUFFEREXTRASIZE); + ZSTD_memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); + } + else + { + ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize); + } dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; - memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return lhSize+litSize; } /* direct reference into compressed stream */ dctx->litPtr = istart+lhSize; dctx->litSize = litSize; + dctx->litBufferEnd = dctx->litPtr + litSize; + dctx->litBufferLocation = ZSTD_not_in_dst; return lhSize+litSize; } case set_rle: { U32 const lhlCode = ((istart[0]) >> 2) & 3; size_t litSize, lhSize; + size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -212,16 +306,28 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, break; case 1: lhSize = 2; + RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 3"); litSize = MEM_readLE16(istart) >> 4; break; case 3: lhSize = 3; + RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 4"); litSize = MEM_readLE24(istart) >> 4; - RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4"); break; } - RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected); - memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); + RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); + RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); + if (dctx->litBufferLocation == ZSTD_split) + { + ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize - ZSTD_LITBUFFEREXTRASIZE); + ZSTD_memset(dctx->litExtraBuffer, istart[lhSize], ZSTD_LITBUFFEREXTRASIZE); + } + else + { + ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize); + } dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return lhSize+1; @@ -232,14 +338,26 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, } } +/* Hidden declaration for fullbench */ +size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx, + const void* src, size_t srcSize, + void* dst, size_t dstCapacity); +size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx, + const void* src, size_t srcSize, + void* dst, size_t dstCapacity) +{ + dctx->isFrameDecompression = 0; + return ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, not_streaming); +} + /* Default FSE distribution tables. * These are pre-calculated FSE decoding tables using default distributions as defined in specification : - * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions * They were generated programmatically with following method : * - start from default distributions, present in /lib/common/zstd_internal.h * - generate tables normally, using ZSTD_buildFSETable() * - printout the content of tables - * - pretify output, report below, test with fuzzer to ensure it's correct */ + * - prettify output, report below, test with fuzzer to ensure it's correct */ /* Default FSE distribution table for Literal Lengths */ static const ZSTD_seqSymbol LL_defaultDTable[(1<nbBits = 0; cell->nextState = 0; assert(nbAddBits < 255); - cell->nbAdditionalBits = (BYTE)nbAddBits; + cell->nbAdditionalBits = nbAddBits; cell->baseValue = baseValue; } @@ -362,23 +480,26 @@ static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddB * generate FSE decoding table for one symbol (ll, ml or off) * cannot fail if input is valid => * all inputs are presumed validated at this stage */ -void -ZSTD_buildFSETable(ZSTD_seqSymbol* dt, +FORCE_INLINE_TEMPLATE +void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, - const U32* baseValue, const U32* nbAdditionalBits, - unsigned tableLog) + const U32* baseValue, const U8* nbAdditionalBits, + unsigned tableLog, void* wksp, size_t wkspSize) { ZSTD_seqSymbol* const tableDecode = dt+1; - U16 symbolNext[MaxSeq+1]; - U32 const maxSV1 = maxSymbolValue + 1; U32 const tableSize = 1 << tableLog; - U32 highThreshold = tableSize-1; + + U16* symbolNext = (U16*)wksp; + BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1); + U32 highThreshold = tableSize - 1; + /* Sanity Checks */ assert(maxSymbolValue <= MaxSeq); assert(tableLog <= MaxFSELog); - + assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE); + (void)wkspSize; /* Init, lay down lowprob symbols */ { ZSTD_seqSymbol_header DTableH; DTableH.tableLog = tableLog; @@ -391,36 +512,131 @@ ZSTD_buildFSETable(ZSTD_seqSymbol* dt, symbolNext[s] = 1; } else { if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0; - symbolNext[s] = normalizedCounter[s]; + assert(normalizedCounter[s]>=0); + symbolNext[s] = (U16)normalizedCounter[s]; } } } - memcpy(dt, &DTableH, sizeof(DTableH)); + ZSTD_memcpy(dt, &DTableH, sizeof(DTableH)); } /* Spread symbols */ - { U32 const tableMask = tableSize-1; + assert(tableSize <= 512); + /* Specialized symbol spreading for the case when there are + * no low probability (-1 count) symbols. When compressing + * small blocks we avoid low probability symbols to hit this + * case, since header decoding speed matters more. + */ + if (highThreshold == tableSize - 1) { + size_t const tableMask = tableSize-1; + size_t const step = FSE_TABLESTEP(tableSize); + /* First lay down the symbols in order. + * We use a uint64_t to lay down 8 bytes at a time. This reduces branch + * misses since small blocks generally have small table logs, so nearly + * all symbols have counts <= 8. We ensure we have 8 bytes at the end of + * our buffer to handle the over-write. + */ + { + U64 const add = 0x0101010101010101ull; + size_t pos = 0; + U64 sv = 0; + U32 s; + for (s=0; s=0); + pos += (size_t)n; + } + } + /* Now we spread those positions across the table. + * The benefit of doing it in two stages is that we avoid the + * variable size inner loop, which caused lots of branch misses. + * Now we can run through all the positions without any branch misses. + * We unroll the loop twice, since that is what empirically worked best. + */ + { + size_t position = 0; + size_t s; + size_t const unroll = 2; + assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */ + for (s = 0; s < (size_t)tableSize; s += unroll) { + size_t u; + for (u = 0; u < unroll; ++u) { + size_t const uPosition = (position + (u * step)) & tableMask; + tableDecode[uPosition].baseValue = spread[s + u]; + } + position = (position + (unroll * step)) & tableMask; + } + assert(position == 0); + } + } else { + U32 const tableMask = tableSize-1; U32 const step = FSE_TABLESTEP(tableSize); U32 s, position = 0; for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ + while (UNLIKELY(position > highThreshold)) position = (position + step) & tableMask; /* lowprob area */ } } assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ } /* Build Decoding table */ - { U32 u; + { + U32 u; for (u=0; u max, corruption_detected); + RETURN_ERROR_IF(!srcSize, srcSize_wrong, ""); + RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, ""); { U32 const symbol = *(const BYTE*)src; U32 const baseline = baseValue[symbol]; - U32 const nbBits = nbAdditionalBits[symbol]; + U8 const nbBits = nbAdditionalBits[symbol]; ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); } *DTablePtr = DTableSpace; @@ -450,7 +667,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb *DTablePtr = defaultTable; return 0; case set_repeat: - RETURN_ERROR_IF(!flagRepeatTable, corruption_detected); + RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, ""); /* prefetch FSE table if used */ if (ddictIsCold && (nbSeq > 24 /* heuristic */)) { const void* const pStart = *DTablePtr; @@ -462,9 +679,9 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb { unsigned tableLog; S16 norm[MaxSeq+1]; size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); - RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected); - RETURN_ERROR_IF(tableLog > maxLog, corruption_detected); - ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog); + RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, ""); + RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, ""); + ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2); *DTablePtr = DTableSpace; return headerSize; } @@ -477,73 +694,86 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize) { - const BYTE* const istart = (const BYTE* const)src; + const BYTE* const istart = (const BYTE*)src; const BYTE* const iend = istart + srcSize; const BYTE* ip = istart; int nbSeq; DEBUGLOG(5, "ZSTD_decodeSeqHeaders"); /* check */ - RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong); + RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, ""); /* SeqHead */ nbSeq = *ip++; - if (!nbSeq) { - *nbSeqPtr=0; - RETURN_ERROR_IF(srcSize != 1, srcSize_wrong); - return 1; - } if (nbSeq > 0x7F) { if (nbSeq == 0xFF) { - RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong); - nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2; + RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, ""); + nbSeq = MEM_readLE16(ip) + LONGNBSEQ; + ip+=2; } else { - RETURN_ERROR_IF(ip >= iend, srcSize_wrong); + RETURN_ERROR_IF(ip >= iend, srcSize_wrong, ""); nbSeq = ((nbSeq-0x80)<<8) + *ip++; } } *nbSeqPtr = nbSeq; + if (nbSeq == 0) { + /* No sequence : section ends immediately */ + RETURN_ERROR_IF(ip != iend, corruption_detected, + "extraneous data present in the Sequences section"); + return (size_t)(ip - istart); + } + /* FSE table descriptors */ - RETURN_ERROR_IF(ip+4 > iend, srcSize_wrong); /* minimum possible size */ - { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); - symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); - symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); + RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */ + RETURN_ERROR_IF(*ip & 3, corruption_detected, ""); /* The last field, Reserved, must be all-zeroes. */ + { SymbolEncodingType_e const LLtype = (SymbolEncodingType_e)(*ip >> 6); + SymbolEncodingType_e const OFtype = (SymbolEncodingType_e)((*ip >> 4) & 3); + SymbolEncodingType_e const MLtype = (SymbolEncodingType_e)((*ip >> 2) & 3); ip++; /* Build DTables */ + assert(ip <= iend); { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, - ip, iend-ip, + ip, (size_t)(iend-ip), LL_base, LL_bits, LL_defaultDTable, dctx->fseEntropy, - dctx->ddictIsCold, nbSeq); - RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected); + dctx->ddictIsCold, nbSeq, + dctx->workspace, sizeof(dctx->workspace), + ZSTD_DCtx_get_bmi2(dctx)); + RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += llhSize; } + assert(ip <= iend); { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, - ip, iend-ip, + ip, (size_t)(iend-ip), OF_base, OF_bits, OF_defaultDTable, dctx->fseEntropy, - dctx->ddictIsCold, nbSeq); - RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected); + dctx->ddictIsCold, nbSeq, + dctx->workspace, sizeof(dctx->workspace), + ZSTD_DCtx_get_bmi2(dctx)); + RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += ofhSize; } + assert(ip <= iend); { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, - ip, iend-ip, + ip, (size_t)(iend-ip), ML_base, ML_bits, ML_defaultDTable, dctx->fseEntropy, - dctx->ddictIsCold, nbSeq); - RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected); + dctx->ddictIsCold, nbSeq, + dctx->workspace, sizeof(dctx->workspace), + ZSTD_DCtx_get_bmi2(dctx)); + RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += mlhSize; } } - return ip-istart; + return (size_t)(ip-istart); } @@ -551,7 +781,6 @@ typedef struct { size_t litLength; size_t matchLength; size_t offset; - const BYTE* match; } seq_t; typedef struct { @@ -565,218 +794,407 @@ typedef struct { ZSTD_fseState stateOffb; ZSTD_fseState stateML; size_t prevOffset[ZSTD_REP_NUM]; - const BYTE* prefixStart; - const BYTE* dictEnd; - size_t pos; } seqState_t; +/*! ZSTD_overlapCopy8() : + * Copies 8 bytes from ip to op and updates op and ip where ip <= op. + * If the offset is < 8 then the offset is spread to at least 8 bytes. + * + * Precondition: *ip <= *op + * Postcondition: *op - *op >= 8 + */ +HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) +{ + assert(*ip <= *op); + if (offset < 8) { + /* close range match, overlap */ + static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ + static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ + int const sub2 = dec64table[offset]; + (*op)[0] = (*ip)[0]; + (*op)[1] = (*ip)[1]; + (*op)[2] = (*ip)[2]; + (*op)[3] = (*ip)[3]; + *ip += dec32table[offset]; + ZSTD_copy4(*op+4, *ip); + *ip -= sub2; + } else { + ZSTD_copy8(*op, *ip); + } + *ip += 8; + *op += 8; + assert(*op - *ip >= 8); +} + +/*! ZSTD_safecopy() : + * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer + * and write up to 16 bytes past oend_w (op >= oend_w is allowed). + * This function is only called in the uncommon case where the sequence is near the end of the block. It + * should be fast for a single long sequence, but can be slow for several short sequences. + * + * @param ovtype controls the overlap detection + * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. + * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. + * The src buffer must be before the dst buffer. + */ +static void +ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, size_t length, ZSTD_overlap_e ovtype) +{ + ptrdiff_t const diff = op - ip; + BYTE* const oend = op + length; + + assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) || + (ovtype == ZSTD_overlap_src_before_dst && diff >= 0)); + + if (length < 8) { + /* Handle short lengths. */ + while (op < oend) *op++ = *ip++; + return; + } + if (ovtype == ZSTD_overlap_src_before_dst) { + /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */ + assert(length >= 8); + assert(diff > 0); + ZSTD_overlapCopy8(&op, &ip, (size_t)diff); + length -= 8; + assert(op - ip >= 8); + assert(op <= oend); + } + + if (oend <= oend_w) { + /* No risk of overwrite. */ + ZSTD_wildcopy(op, ip, length, ovtype); + return; + } + if (op <= oend_w) { + /* Wildcopy until we get close to the end. */ + assert(oend > oend_w); + ZSTD_wildcopy(op, ip, (size_t)(oend_w - op), ovtype); + ip += oend_w - op; + op += oend_w - op; + } + /* Handle the leftovers. */ + while (op < oend) *op++ = *ip++; +} + +/* ZSTD_safecopyDstBeforeSrc(): + * This version allows overlap with dst before src, or handles the non-overlap case with dst after src + * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */ +static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, size_t length) +{ + ptrdiff_t const diff = op - ip; + BYTE* const oend = op + length; + + if (length < 8 || diff > -8) { + /* Handle short lengths, close overlaps, and dst not before src. */ + while (op < oend) *op++ = *ip++; + return; + } + + if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) { + ZSTD_wildcopy(op, ip, (size_t)(oend - WILDCOPY_OVERLENGTH - op), ZSTD_no_overlap); + ip += oend - WILDCOPY_OVERLENGTH - op; + op += oend - WILDCOPY_OVERLENGTH - op; + } + + /* Handle the leftovers. */ + while (op < oend) *op++ = *ip++; +} -/* ZSTD_execSequenceLast7(): - * exceptional case : decompress a match starting within last 7 bytes of output buffer. - * requires more careful checks, to ensure there is no overflow. - * performance does not matter though. - * note : this case is supposed to be never generated "naturally" by reference encoder, - * since in most cases it needs at least 8 bytes to look for a match. - * but it's allowed by the specification. */ +/* ZSTD_execSequenceEnd(): + * This version handles cases that are near the end of the output buffer. It requires + * more careful checks to make sure there is no overflow. By separating out these hard + * and unlikely cases, we can speed up the common cases. + * + * NOTE: This function needs to be fast for a single long sequence, but doesn't need + * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). + */ FORCE_NOINLINE -size_t ZSTD_execSequenceLast7(BYTE* op, - BYTE* const oend, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_execSequenceEnd(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; - BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; - /* check */ - RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer"); - RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer"); + /* bounds checks : careful of address space overflow in 32-bit mode */ + RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); + RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); + assert(op < op + sequenceLength); + assert(oLitEnd < op + sequenceLength); /* copy literals */ - while (op < oLitEnd) *op++ = *(*litPtr)++; + ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap); + op = oLitEnd; + *litPtr = iLitEnd; /* copy Match */ - if (sequence.offset > (size_t)(oLitEnd - base)) { + if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { /* offset beyond prefix */ - RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected); - match = dictEnd - (base-match); + RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); + match = dictEnd - (prefixStart - match); if (match + sequence.matchLength <= dictEnd) { - memmove(oLitEnd, match, sequence.matchLength); + ZSTD_memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; - memmove(oLitEnd, match, length1); + { size_t const length1 = (size_t)(dictEnd - match); + ZSTD_memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; - match = base; - } } - while (op < oMatchEnd) *op++ = *match++; + match = prefixStart; + } + } + ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); return sequenceLength; } +/* ZSTD_execSequenceEndSplitLitBuffer(): + * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. + */ +FORCE_NOINLINE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op, + BYTE* const oend, const BYTE* const oend_w, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + + /* bounds checks : careful of address space overflow in 32-bit mode */ + RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); + RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); + assert(op < op + sequenceLength); + assert(oLitEnd < op + sequenceLength); + + /* copy literals */ + RETURN_ERROR_IF(op > *litPtr && op < *litPtr + sequence.litLength, dstSize_tooSmall, "output should not catch up to and overwrite literal buffer"); + ZSTD_safecopyDstBeforeSrc(op, *litPtr, sequence.litLength); + op = oLitEnd; + *litPtr = iLitEnd; + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { + /* offset beyond prefix */ + RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); + match = dictEnd - (prefixStart - match); + if (match + sequence.matchLength <= dictEnd) { + ZSTD_memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = (size_t)(dictEnd - match); + ZSTD_memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } + ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); + return sequenceLength; +} HINT_INLINE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_execSequence(BYTE* op, - BYTE* const oend, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ - BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */ const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend"); - RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer"); - if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); - - /* copy Literals */ - ZSTD_copy8(op, *litPtr); - if (sequence.litLength > 8) - ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + assert(op != NULL /* Precondition */); + assert(oend_w < oend /* No underflow */); + +#if defined(__aarch64__) + /* prefetch sequence starting from match that will be used for copy later */ + PREFETCH_L1(match); +#endif + /* Handle edge cases in a slow path: + * - Read beyond end of literals + * - Match end is within WILDCOPY_OVERLIMIT of oend + * - 32-bit mode and the match length overflows + */ + if (UNLIKELY( + iLitEnd > litLimit || + oMatchEnd > oend_w || + (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) + return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + + /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ + assert(op <= oLitEnd /* No overflow */); + assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); + assert(oMatchEnd <= oend /* No underflow */); + assert(iLitEnd <= litLimit /* Literal length is in bounds */); + assert(oLitEnd <= oend_w /* Can wildcopy literals */); + assert(oMatchEnd <= oend_w /* Can wildcopy matches */); + + /* Copy Literals: + * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. + * We likely don't need the full 32-byte wildcopy. + */ + assert(WILDCOPY_OVERLENGTH >= 16); + ZSTD_copy16(op, (*litPtr)); + if (UNLIKELY(sequence.litLength > 16)) { + ZSTD_wildcopy(op + 16, (*litPtr) + 16, sequence.litLength - 16, ZSTD_no_overlap); + } op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ - /* copy Match */ + /* Copy Match */ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { /* offset beyond prefix -> go into extDict */ - RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected); + RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); match = dictEnd + (match - prefixStart); if (match + sequence.matchLength <= dictEnd) { - memmove(oLitEnd, match, sequence.matchLength); + ZSTD_memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; - memmove(oLitEnd, match, length1); + { size_t const length1 = (size_t)(dictEnd - match); + ZSTD_memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = prefixStart; - if (op > oend_w || sequence.matchLength < MINMATCH) { - U32 i; - for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; - return sequenceLength; - } - } } - /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ - - /* match within prefix */ - if (sequence.offset < 8) { - /* close range match, overlap */ - static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ - static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ - int const sub2 = dec64table[sequence.offset]; - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += dec32table[sequence.offset]; - ZSTD_copy4(op+4, match); - match -= sub2; - } else { - ZSTD_copy8(op, match); + } + } + /* Match within prefix of 1 or more bytes */ + assert(op <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence.matchLength >= 1); + + /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy + * without overlap checking. + */ + if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { + /* We bet on a full wildcopy for matches, since we expect matches to be + * longer than literals (in general). In silesia, ~10% of matches are longer + * than 16 bytes. + */ + ZSTD_wildcopy(op, match, sequence.matchLength, ZSTD_no_overlap); + return sequenceLength; } - op += 8; match += 8; + assert(sequence.offset < WILDCOPY_VECLEN); - if (oMatchEnd > oend-(16-MINMATCH)) { - if (op < oend_w) { - ZSTD_wildcopy(op, match, oend_w - op); - match += oend_w - op; - op = oend_w; - } - while (op < oMatchEnd) *op++ = *match++; - } else { - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ + /* Copy 8 bytes and spread the offset to be >= 8. */ + ZSTD_overlapCopy8(&op, &match, sequence.offset); + + /* If the match length is > 8 bytes, then continue with the wildcopy. */ + if (sequence.matchLength > 8) { + assert(op < oMatchEnd); + ZSTD_wildcopy(op, match, sequence.matchLength - 8, ZSTD_overlap_src_before_dst); } return sequenceLength; } - HINT_INLINE -size_t ZSTD_execSequenceLong(BYTE* op, - BYTE* const oend, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd) +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op, + BYTE* const oend, const BYTE* const oend_w, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ - BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; const BYTE* const iLitEnd = *litPtr + sequence.litLength; - const BYTE* match = sequence.match; + const BYTE* match = oLitEnd - sequence.offset; - /* check */ - RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend"); - RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer"); - if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd); - - /* copy Literals */ - ZSTD_copy8(op, *litPtr); /* note : op <= oLitEnd <= oend_w == oend - 8 */ - if (sequence.litLength > 8) - ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + assert(op != NULL /* Precondition */); + assert(oend_w < oend /* No underflow */); + /* Handle edge cases in a slow path: + * - Read beyond end of literals + * - Match end is within WILDCOPY_OVERLIMIT of oend + * - 32-bit mode and the match length overflows + */ + if (UNLIKELY( + iLitEnd > litLimit || + oMatchEnd > oend_w || + (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) + return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + + /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ + assert(op <= oLitEnd /* No overflow */); + assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); + assert(oMatchEnd <= oend /* No underflow */); + assert(iLitEnd <= litLimit /* Literal length is in bounds */); + assert(oLitEnd <= oend_w /* Can wildcopy literals */); + assert(oMatchEnd <= oend_w /* Can wildcopy matches */); + + /* Copy Literals: + * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. + * We likely don't need the full 32-byte wildcopy. + */ + assert(WILDCOPY_OVERLENGTH >= 16); + ZSTD_copy16(op, (*litPtr)); + if (UNLIKELY(sequence.litLength > 16)) { + ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap); + } op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ - /* copy Match */ + /* Copy Match */ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { - /* offset beyond prefix */ - RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected); + /* offset beyond prefix -> go into extDict */ + RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); + match = dictEnd + (match - prefixStart); if (match + sequence.matchLength <= dictEnd) { - memmove(oLitEnd, match, sequence.matchLength); + ZSTD_memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; - memmove(oLitEnd, match, length1); + { size_t const length1 = (size_t)(dictEnd - match); + ZSTD_memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = prefixStart; - if (op > oend_w || sequence.matchLength < MINMATCH) { - U32 i; - for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; - return sequenceLength; - } } } - assert(op <= oend_w); - assert(sequence.matchLength >= MINMATCH); - - /* match within prefix */ - if (sequence.offset < 8) { - /* close range match, overlap */ - static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ - static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ - int const sub2 = dec64table[sequence.offset]; - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += dec32table[sequence.offset]; - ZSTD_copy4(op+4, match); - match -= sub2; - } else { - ZSTD_copy8(op, match); + /* Match within prefix of 1 or more bytes */ + assert(op <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence.matchLength >= 1); + + /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy + * without overlap checking. + */ + if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { + /* We bet on a full wildcopy for matches, since we expect matches to be + * longer than literals (in general). In silesia, ~10% of matches are longer + * than 16 bytes. + */ + ZSTD_wildcopy(op, match, sequence.matchLength, ZSTD_no_overlap); + return sequenceLength; } - op += 8; match += 8; + assert(sequence.offset < WILDCOPY_VECLEN); - if (oMatchEnd > oend-(16-MINMATCH)) { - if (op < oend_w) { - ZSTD_wildcopy(op, match, oend_w - op); - match += oend_w - op; - op = oend_w; - } - while (op < oMatchEnd) *op++ = *match++; - } else { - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ + /* Copy 8 bytes and spread the offset to be >= 8. */ + ZSTD_overlapCopy8(&op, &match, sequence.offset); + + /* If the match length is > 8 bytes, then continue with the wildcopy. */ + if (sequence.matchLength > 8) { + assert(op < oMatchEnd); + ZSTD_wildcopy(op, match, sequence.matchLength-8, ZSTD_overlap_src_before_dst); } return sequenceLength; } + static void ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt) { @@ -790,16 +1208,14 @@ ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqS } FORCE_INLINE_TEMPLATE void -ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD) +ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 nextState, U32 nbBits) { - ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state]; - U32 const nbBits = DInfo.nbBits; size_t const lowBits = BIT_readBits(bitD, nbBits); - DStatePtr->state = DInfo.nextState + lowBits; + DStatePtr->state = nextState + lowBits; } /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum - * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1) + * offset bits. But we can only read at most STREAM_ACCUMULATOR_MIN_32 * bits before reloading. This value is the maximum number of bytes we read * after reloading when we are decoding long offsets. */ @@ -810,144 +1226,567 @@ ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD) typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e; -#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG +/** + * ZSTD_decodeSequence(): + * @p longOffsets : tells the decoder to reload more bit while decoding large offsets + * only used in 32-bit mode + * @return : Sequence (litL + matchL + offset) + */ FORCE_INLINE_TEMPLATE seq_t -ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) +ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const int isLastSeq) { seq_t seq; - U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits; - U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits; - U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits; - U32 const totalBits = llBits+mlBits+ofBits; - U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue; - U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue; - U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue; - - /* sequence */ - { size_t offset; - if (!ofBits) - offset = 0; - else { - ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); - ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); - assert(ofBits <= MaxOff); - if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { - U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); - offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); - BIT_reloadDStream(&seqState->DStream); - if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); - assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ - } else { +#if defined(__aarch64__) + size_t prevOffset0 = seqState->prevOffset[0]; + size_t prevOffset1 = seqState->prevOffset[1]; + size_t prevOffset2 = seqState->prevOffset[2]; + /* + * ZSTD_seqSymbol is a 64 bits wide structure. + * It can be loaded in one operation + * and its fields extracted by simply shifting or bit-extracting on aarch64. + * GCC doesn't recognize this and generates more unnecessary ldr/ldrb/ldrh + * operations that cause performance drop. This can be avoided by using this + * ZSTD_memcpy hack. + */ +# if defined(__GNUC__) && !defined(__clang__) + ZSTD_seqSymbol llDInfoS, mlDInfoS, ofDInfoS; + ZSTD_seqSymbol* const llDInfo = &llDInfoS; + ZSTD_seqSymbol* const mlDInfo = &mlDInfoS; + ZSTD_seqSymbol* const ofDInfo = &ofDInfoS; + ZSTD_memcpy(llDInfo, seqState->stateLL.table + seqState->stateLL.state, sizeof(ZSTD_seqSymbol)); + ZSTD_memcpy(mlDInfo, seqState->stateML.table + seqState->stateML.state, sizeof(ZSTD_seqSymbol)); + ZSTD_memcpy(ofDInfo, seqState->stateOffb.table + seqState->stateOffb.state, sizeof(ZSTD_seqSymbol)); +# else + const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state; + const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state; + const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; +# endif + (void)longOffsets; + seq.matchLength = mlDInfo->baseValue; + seq.litLength = llDInfo->baseValue; + { U32 const ofBase = ofDInfo->baseValue; + BYTE const llBits = llDInfo->nbAdditionalBits; + BYTE const mlBits = mlDInfo->nbAdditionalBits; + BYTE const ofBits = ofDInfo->nbAdditionalBits; + BYTE const totalBits = llBits+mlBits+ofBits; + + U16 const llNext = llDInfo->nextState; + U16 const mlNext = mlDInfo->nextState; + U16 const ofNext = ofDInfo->nextState; + U32 const llnbBits = llDInfo->nbBits; + U32 const mlnbBits = mlDInfo->nbBits; + U32 const ofnbBits = ofDInfo->nbBits; + + assert(llBits <= MaxLLBits); + assert(mlBits <= MaxMLBits); + assert(ofBits <= MaxOff); + /* As GCC has better branch and block analyzers, sometimes it is only + * valuable to mark likeliness for Clang. + */ + + /* sequence */ + { size_t offset; + if (ofBits > 1) { + ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); + ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits); offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); - } + prevOffset2 = prevOffset1; + prevOffset1 = prevOffset0; + prevOffset0 = offset; + } else { + U32 const ll0 = (llDInfo->baseValue == 0); + if (LIKELY((ofBits == 0))) { + if (ll0) { + offset = prevOffset1; + prevOffset1 = prevOffset0; + prevOffset0 = offset; + } else { + offset = prevOffset0; + } + } else { + offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); + { size_t temp = (offset == 1) ? prevOffset1 + : (offset == 3) ? prevOffset0 - 1 + : (offset >= 2) ? prevOffset2 + : prevOffset0; + /* 0 is not valid: input corrupted => force offset to -1 => + * corruption detected at execSequence. + */ + temp -= !temp; + prevOffset2 = (offset == 1) ? prevOffset2 : prevOffset1; + prevOffset1 = prevOffset0; + prevOffset0 = offset = temp; + } } } + seq.offset = offset; } - if (ofBits <= 1) { - offset += (llBase==0); - if (offset) { - size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; - temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ - if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset = temp; - } else { /* offset == 0 */ - offset = seqState->prevOffset[0]; - } - } else { - seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset; + if (mlBits > 0) + seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); + + if (UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) + BIT_reloadDStream(&seqState->DStream); + + /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ + ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); + + if (llBits > 0) + seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); + + DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", + (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + + if (!isLastSeq) { + /* Don't update FSE state for last sequence. */ + ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */ + BIT_reloadDStream(&seqState->DStream); } - seq.offset = offset; } + seqState->prevOffset[0] = prevOffset0; + seqState->prevOffset[1] = prevOffset1; + seqState->prevOffset[2] = prevOffset2; +#else /* !defined(__aarch64__) */ + const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state; + const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state; + const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; + seq.matchLength = mlDInfo->baseValue; + seq.litLength = llDInfo->baseValue; + { U32 const ofBase = ofDInfo->baseValue; + BYTE const llBits = llDInfo->nbAdditionalBits; + BYTE const mlBits = mlDInfo->nbAdditionalBits; + BYTE const ofBits = ofDInfo->nbAdditionalBits; + BYTE const totalBits = llBits+mlBits+ofBits; + + U16 const llNext = llDInfo->nextState; + U16 const mlNext = mlDInfo->nextState; + U16 const ofNext = ofDInfo->nextState; + U32 const llnbBits = llDInfo->nbBits; + U32 const mlnbBits = mlDInfo->nbBits; + U32 const ofnbBits = ofDInfo->nbBits; + + assert(llBits <= MaxLLBits); + assert(mlBits <= MaxMLBits); + assert(ofBits <= MaxOff); + /* As GCC has better branch and block analyzers, sometimes it is only + * valuable to mark likeliness for Clang. + */ - seq.matchLength = mlBase - + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0); /* <= 16 bits */ - if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) - BIT_reloadDStream(&seqState->DStream); - if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) - BIT_reloadDStream(&seqState->DStream); - /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ - ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); - - seq.litLength = llBase - + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0); /* <= 16 bits */ - if (MEM_32bits()) - BIT_reloadDStream(&seqState->DStream); - - DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", - (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + /* sequence */ + { size_t offset; + if (ofBits > 1) { + ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); + ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits); + if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { + /* Always read extra bits, this keeps the logic simple, + * avoids branches, and avoids accidentally reading 0 bits. + */ + U32 const extraBits = LONG_OFFSETS_MAX_EXTRA_BITS_32; + offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); + BIT_reloadDStream(&seqState->DStream); + offset += BIT_readBitsFast(&seqState->DStream, extraBits); + } else { + offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); + } + seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; + } else { + U32 const ll0 = (llDInfo->baseValue == 0); + if (LIKELY((ofBits == 0))) { + offset = seqState->prevOffset[ll0]; + seqState->prevOffset[1] = seqState->prevOffset[!ll0]; + seqState->prevOffset[0] = offset; + } else { + offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); + { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; + temp -= !temp; /* 0 is not valid: input corrupted => force offset to -1 => corruption detected at execSequence */ + if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset = temp; + } } } + seq.offset = offset; + } - /* ANS state update */ - ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ - ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ - ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ + if (mlBits > 0) + seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); + + if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) + BIT_reloadDStream(&seqState->DStream); + if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) + BIT_reloadDStream(&seqState->DStream); + /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ + ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); + + if (llBits > 0) + seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); + + if (MEM_32bits()) + BIT_reloadDStream(&seqState->DStream); + + DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", + (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + + if (!isLastSeq) { + /* Don't update FSE state for last sequence. */ + ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */ + BIT_reloadDStream(&seqState->DStream); + } + } +#endif /* defined(__aarch64__) */ return seq; } +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) +#if DEBUGLEVEL >= 1 +static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd) +{ + size_t const windowSize = dctx->fParams.windowSize; + /* No dictionary used. */ + if (dctx->dictContentEndForFuzzing == NULL) return 0; + /* Dictionary is our prefix. */ + if (prefixStart == dctx->dictContentBeginForFuzzing) return 1; + /* Dictionary is not our ext-dict. */ + if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0; + /* Dictionary is not within our window size. */ + if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0; + /* Dictionary is active. */ + return 1; +} +#endif + +static void ZSTD_assertValidSequence( + ZSTD_DCtx const* dctx, + BYTE const* op, BYTE const* oend, + seq_t const seq, + BYTE const* prefixStart, BYTE const* virtualStart) +{ +#if DEBUGLEVEL >= 1 + if (dctx->isFrameDecompression) { + size_t const windowSize = dctx->fParams.windowSize; + size_t const sequenceSize = seq.litLength + seq.matchLength; + BYTE const* const oLitEnd = op + seq.litLength; + DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u", + (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + assert(op <= oend); + assert((size_t)(oend - op) >= sequenceSize); + assert(sequenceSize <= ZSTD_blockSizeMax(dctx)); + if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) { + size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing); + /* Offset must be within the dictionary. */ + assert(seq.offset <= (size_t)(oLitEnd - virtualStart)); + assert(seq.offset <= windowSize + dictSize); + } else { + /* Offset must be within our window. */ + assert(seq.offset <= windowSize); + } + } +#else + (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart; +#endif +} +#endif + +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG + + FORCE_INLINE_TEMPLATE size_t -ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, +DONT_VECTORIZE +ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset) { - const BYTE* ip = (const BYTE*)seqStart; - const BYTE* const iend = ip + seqSize; - BYTE* const ostart = (BYTE* const)dst; - BYTE* const oend = ostart + maxDstSize; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize); BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; - const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const vBase = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); - DEBUGLOG(5, "ZSTD_decompressSequences_body"); + DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer (%i seqs)", nbSeq); - /* Regen sequences */ + /* Literals are split between internal buffer & output buffer */ if (nbSeq) { seqState_t seqState; dctx->fseEntropy = 1; { U32 i; for (i=0; ientropy.rep[i]; } RETURN_ERROR_IF( - ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), - corruption_detected); + ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)), + corruption_detected, ""); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + assert(dst != NULL); ZSTD_STATIC_ASSERT( BIT_DStream_unfinished < BIT_DStream_completed && BIT_DStream_endOfBuffer < BIT_DStream_completed && BIT_DStream_completed < BIT_DStream_overflow); - for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) { - nbSeq--; - { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset); - size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd); + /* decompress without overrunning litPtr begins */ + { seq_t sequence = {0,0,0}; /* some static analyzer believe that @sequence is not initialized (it necessarily is, since for(;;) loop as at least one iteration) */ + /* Align the decompression loop to 32 + 16 bytes. + * + * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression + * speed swings based on the alignment of the decompression loop. This + * performance swing is caused by parts of the decompression loop falling + * out of the DSB. The entire decompression loop should fit in the DSB, + * when it can't we get much worse performance. You can measure if you've + * hit the good case or the bad case with this perf command for some + * compressed file test.zst: + * + * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \ + * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst + * + * If you see most cycles served out of the MITE you've hit the bad case. + * If you see most cycles served out of the DSB you've hit the good case. + * If it is pretty even then you may be in an okay case. + * + * This issue has been reproduced on the following CPUs: + * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9 + * Use Instruments->Counters to get DSB/MITE cycles. + * I never got performance swings, but I was able to + * go from the good case of mostly DSB to half of the + * cycles served from MITE. + * - Coffeelake: Intel i9-9900k + * - Coffeelake: Intel i7-9700k + * + * I haven't been able to reproduce the instability or DSB misses on any + * of the following CPUS: + * - Haswell + * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH + * - Skylake + * + * Alignment is done for each of the three major decompression loops: + * - ZSTD_decompressSequences_bodySplitLitBuffer - presplit section of the literal buffer + * - ZSTD_decompressSequences_bodySplitLitBuffer - postsplit section of the literal buffer + * - ZSTD_decompressSequences_body + * Alignment choices are made to minimize large swings on bad cases and influence on performance + * from changes external to this code, rather than to overoptimize on the current commit. + * + * If you are seeing performance stability this script can help test. + * It tests on 4 commits in zstd where I saw performance change. + * + * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4 + */ +#if defined(__GNUC__) && defined(__x86_64__) + __asm__(".p2align 6"); +# if __GNUC__ >= 7 + /* good for gcc-7, gcc-9, and gcc-11 */ + __asm__("nop"); + __asm__(".p2align 5"); + __asm__("nop"); + __asm__(".p2align 4"); +# if __GNUC__ == 8 || __GNUC__ == 10 + /* good for gcc-8 and gcc-10 */ + __asm__("nop"); + __asm__(".p2align 3"); +# endif +# endif +#endif + + /* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */ + for ( ; nbSeq; nbSeq--) { + sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); + if (litPtr + sequence.litLength > dctx->litBufferEnd) break; + { size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + op += oneSeqSize; + } } + DEBUGLOG(6, "reached: (litPtr + sequence.litLength > dctx->litBufferEnd)"); + + /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */ + if (nbSeq > 0) { + const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr); + assert(dctx->litBufferEnd >= litPtr); + DEBUGLOG(6, "There are %i sequences left, and %zu/%zu literals left in buffer", nbSeq, leftoverLit, sequence.litLength); + if (leftoverLit) { + RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); + ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); + sequence.litLength -= leftoverLit; + op += leftoverLit; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + op += oneSeqSize; + } + nbSeq--; + } + } + + if (nbSeq > 0) { + /* there is remaining lit from extra buffer */ + +#if defined(__GNUC__) && defined(__x86_64__) + __asm__(".p2align 6"); + __asm__("nop"); +# if __GNUC__ != 7 + /* worse for gcc-7 better for gcc-8, gcc-9, and gcc-10 and clang */ + __asm__(".p2align 4"); + __asm__("nop"); + __asm__(".p2align 3"); +# elif __GNUC__ >= 11 + __asm__(".p2align 3"); +# else + __asm__(".p2align 5"); + __asm__("nop"); + __asm__(".p2align 3"); +# endif +#endif + + for ( ; nbSeq ; nbSeq--) { + seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); - if (ZSTD_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; - } } + } + } /* check if reached exact end */ - DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq); - RETURN_ERROR_IF(nbSeq, corruption_detected); - RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected); + DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq); + RETURN_ERROR_IF(nbSeq, corruption_detected, ""); + DEBUGLOG(5, "bitStream : start=%p, ptr=%p, bitsConsumed=%u", seqState.DStream.start, seqState.DStream.ptr, seqState.DStream.bitsConsumed); + RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); /* save reps for next block */ { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } } /* last literal segment */ - { size_t const lastLLSize = litEnd - litPtr; - RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall); - memcpy(op, litPtr, lastLLSize); - op += lastLLSize; + if (dctx->litBufferLocation == ZSTD_split) { + /* split hasn't been reached yet, first get dst then copy litExtraBuffer */ + size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); + DEBUGLOG(6, "copy last literals from segment : %u", (U32)lastLLSize); + RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; } + /* copy last literals from internal buffer */ + { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); + DEBUGLOG(6, "copy last literals from internal buffer : %u", (U32)lastLLSize); + RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } } - return op-ostart; + DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart)); + return (size_t)(op - ostart); +} + +FORCE_INLINE_TEMPLATE size_t +DONT_VECTORIZE +ZSTD_decompressSequences_body(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = (dctx->litBufferLocation == ZSTD_not_in_dst) ? + (BYTE*)ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize) : + dctx->litBuffer; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart); + const BYTE* const vBase = (const BYTE*)(dctx->virtualStart); + const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd); + DEBUGLOG(5, "ZSTD_decompressSequences_body: nbSeq = %d", nbSeq); + + /* Regen sequences */ + if (nbSeq) { + seqState_t seqState; + dctx->fseEntropy = 1; + { U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } + RETURN_ERROR_IF( + ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)), + corruption_detected, ""); + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + assert(dst != NULL); + +#if defined(__GNUC__) && defined(__x86_64__) + __asm__(".p2align 6"); + __asm__("nop"); +# if __GNUC__ >= 7 + __asm__(".p2align 5"); + __asm__("nop"); + __asm__(".p2align 3"); +# else + __asm__(".p2align 4"); + __asm__("nop"); + __asm__(".p2align 3"); +# endif +#endif + + for ( ; nbSeq ; nbSeq--) { + seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + op += oneSeqSize; + } + + /* check if reached exact end */ + assert(nbSeq == 0); + RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); + /* save reps for next block */ + { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } + } + + /* last literal segment */ + { size_t const lastLLSize = (size_t)(litEnd - litPtr); + DEBUGLOG(6, "copy last literals : %u", (U32)lastLLSize); + RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } } + + DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart)); + return (size_t)(op - ostart); } static size_t @@ -958,89 +1797,38 @@ ZSTD_decompressSequences_default(ZSTD_DCtx* dctx, { return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } -#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ - +static size_t +ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); +} +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT -FORCE_INLINE_TEMPLATE seq_t -ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets) -{ - seq_t seq; - U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits; - U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits; - U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits; - U32 const totalBits = llBits+mlBits+ofBits; - U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue; - U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue; - U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue; - - /* sequence */ - { size_t offset; - if (!ofBits) - offset = 0; - else { - ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); - ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); - assert(ofBits <= MaxOff); - if (MEM_32bits() && longOffsets) { - U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1); - offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); - if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream); - if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); - } else { - offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); - } - } - if (ofBits <= 1) { - offset += (llBase==0); - if (offset) { - size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; - temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ - if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset = temp; - } else { - offset = seqState->prevOffset[0]; - } - } else { - seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset; - } - seq.offset = offset; - } +FORCE_INLINE_TEMPLATE - seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ - if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) - BIT_reloadDStream(&seqState->DStream); - if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) - BIT_reloadDStream(&seqState->DStream); - /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */ - ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); - - seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ - if (MEM_32bits()) - BIT_reloadDStream(&seqState->DStream); - - { size_t const pos = seqState->pos + seq.litLength; - const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart; - seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. - * No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */ - seqState->pos = pos + seq.matchLength; +size_t ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence, + const BYTE* const prefixStart, const BYTE* const dictEnd) +{ + prefetchPos += sequence.litLength; + { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart; + /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. + * No consequence though : memory address is only used for prefetching, not for dereferencing */ + const BYTE* const match = (const BYTE*)ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, (ptrdiff_t)prefetchPos), (ptrdiff_t)sequence.offset); + PREFETCH_L1(match); PREFETCH_L1(ZSTD_wrappedPtrAdd(match, CACHELINE_SIZE)); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */ } - - /* ANS state update */ - ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ - ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ - ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ - - return seq; + return prefetchPos + sequence.matchLength; } +/* This decoding function employs prefetching + * to reduce latency impact of cache misses. + * It's generally employed when block contains a significant portion of long-distance matches + * or when coupled with a "cold" dictionary */ FORCE_INLINE_TEMPLATE size_t ZSTD_decompressSequencesLong_body( ZSTD_DCtx* dctx, @@ -1048,63 +1836,129 @@ ZSTD_decompressSequencesLong_body( const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset) { - const BYTE* ip = (const BYTE*)seqStart; - const BYTE* const iend = ip + seqSize; - BYTE* const ostart = (BYTE* const)dst; - BYTE* const oend = ostart + maxDstSize; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = (dctx->litBufferLocation == ZSTD_in_dst) ? + dctx->litBuffer : + (BYTE*)ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize); BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; - const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); /* Regen sequences */ if (nbSeq) { -#define STORED_SEQS 4 +#define STORED_SEQS 8 #define STORED_SEQS_MASK (STORED_SEQS-1) -#define ADVANCED_SEQS 4 +#define ADVANCED_SEQS STORED_SEQS seq_t sequences[STORED_SEQS]; int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); seqState_t seqState; int seqNb; + size_t prefetchPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */ + dctx->fseEntropy = 1; { int i; for (i=0; ientropy.rep[i]; } - seqState.prefixStart = prefixStart; - seqState.pos = (size_t)(op-prefixStart); - seqState.dictEnd = dictEnd; - assert(iend >= ip); + assert(dst != NULL); RETURN_ERROR_IF( - ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), - corruption_detected); + ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)), + corruption_detected, ""); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); /* prepare in advance */ - for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNblitBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) { + /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ + const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr); + assert(dctx->litBufferEnd >= litPtr); + if (leftoverLit) { + RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); + ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit; + op += leftoverLit; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); +#endif + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb & STORED_SEQS_MASK] = sequence; + op += oneSeqSize; + } } + else + { + /* lit buffer is either wholly contained in first or second split, or not split at all*/ + size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? + ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : + ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); +#endif + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb & STORED_SEQS_MASK] = sequence; + op += oneSeqSize; + } } - RETURN_ERROR_IF(seqNblitBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) { + const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr); + assert(dctx->litBufferEnd >= litPtr); + if (leftoverLit) { + RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); + ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); + sequence->litLength -= leftoverLit; + op += leftoverLit; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + { size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); +#endif + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } + } + else + { + size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? + ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : + ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); +#endif + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } } /* save reps for next block */ @@ -1112,13 +1966,27 @@ ZSTD_decompressSequencesLong_body( } /* last literal segment */ - { size_t const lastLLSize = litEnd - litPtr; - RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall); - memcpy(op, litPtr, lastLLSize); - op += lastLLSize; + if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */ + size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); + assert(litBufferEnd >= litPtr); + RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + } + { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); + assert(litBufferEnd >= litPtr); + RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } } - return op-ostart; + return (size_t)(op - ostart); } static size_t @@ -1136,7 +2004,8 @@ ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx, #if DYNAMIC_BMI2 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t +DONT_VECTORIZE ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, @@ -1144,10 +2013,19 @@ ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, { return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } +static BMI2_TARGET_ATTRIBUTE size_t +DONT_VECTORIZE +ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); +} #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, @@ -1159,12 +2037,6 @@ ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, #endif /* DYNAMIC_BMI2 */ -typedef size_t (*ZSTD_decompressSequences_t)( - ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset); - #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, @@ -1173,11 +2045,24 @@ ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, { DEBUGLOG(5, "ZSTD_decompressSequences"); #if DYNAMIC_BMI2 - if (dctx->bmi2) { + if (ZSTD_DCtx_get_bmi2(dctx)) { return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif - return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); +} +static size_t +ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer"); +#if DYNAMIC_BMI2 + if (ZSTD_DCtx_get_bmi2(dctx)) { + return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + } +#endif + return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ @@ -1196,7 +2081,7 @@ ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, { DEBUGLOG(5, "ZSTD_decompressSequencesLong"); #if DYNAMIC_BMI2 - if (dctx->bmi2) { + if (ZSTD_DCtx_get_bmi2(dctx)) { return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif @@ -1205,56 +2090,101 @@ ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ +/** + * @returns The total size of the history referenceable by zstd, including + * both the prefix and the extDict. At @p op any offset larger than this + * is invalid. + */ +static size_t ZSTD_totalHistorySize(void* curPtr, const void* virtualStart) +{ + return (size_t)((char*)curPtr - (const char*)virtualStart); +} -#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) -/* ZSTD_getLongOffsetsShare() : +typedef struct { + unsigned longOffsetShare; + unsigned maxNbAdditionalBits; +} ZSTD_OffsetInfo; + +/* ZSTD_getOffsetInfo() : * condition : offTable must be valid * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) - * compared to maximum possible of (1< 22) total += 1; - } + ZSTD_OffsetInfo info = {0, 0}; + /* If nbSeq == 0, then the offTable is uninitialized, but we have + * no sequences, so both values should be 0. + */ + if (nbSeq != 0) { + const void* ptr = offTable; + U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog; + const ZSTD_seqSymbol* table = offTable + 1; + U32 const max = 1 << tableLog; + U32 u; + DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog); + + assert(max <= (1 << OffFSELog)); /* max not too large */ + for (u=0; u 22) info.longOffsetShare += 1; + } - assert(tableLog <= OffFSELog); - total <<= (OffFSELog - tableLog); /* scale to OffFSELog */ + assert(tableLog <= OffFSELog); + info.longOffsetShare <<= (OffFSELog - tableLog); /* scale to OffFSELog */ + } - return total; + return info; } -#endif +/** + * @returns The maximum offset we can decode in one read of our bitstream, without + * reloading more bits in the middle of the offset bits read. Any offsets larger + * than this must use the long offset decoder. + */ +static size_t ZSTD_maxShortOffset(void) +{ + if (MEM_64bits()) { + /* We can decode any offset without reloading bits. + * This might change if the max window size grows. + */ + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); + return (size_t)-1; + } else { + /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1. + * This offBase would require STREAM_ACCUMULATOR_MIN extra bits. + * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset. + */ + size_t const maxOffbase = ((size_t)1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1; + size_t const maxOffset = maxOffbase - ZSTD_REP_NUM; + assert(ZSTD_highbit32((U32)maxOffbase) == STREAM_ACCUMULATOR_MIN); + return maxOffset; + } +} size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, const int frame) + const void* src, size_t srcSize, const streaming_operation streaming) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; - /* isLongOffset must be true if there are long offsets. - * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN. - * We don't expect that to be the case in 64-bit mode. - * In block mode, window size is not known, so we have to be conservative. - * (note: but it could be evaluated from current-lowLimit) - */ - ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)))); - DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize); - - RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong); + DEBUGLOG(5, "ZSTD_decompressBlock_internal (cSize : %u)", (unsigned)srcSize); + + /* Note : the wording of the specification + * allows compressed block to be sized exactly ZSTD_blockSizeMax(dctx). + * This generally does not happen, as it makes little sense, + * since an uncompressed block would feature same size and have no decompression cost. + * Also, note that decoder from reference libzstd before < v1.5.4 + * would consider this edge case as an error. + * As a consequence, avoid generating compressed blocks of size ZSTD_blockSizeMax(dctx) + * for broader compatibility with the deployed ecosystem of zstd decoders */ + RETURN_ERROR_IF(srcSize > ZSTD_blockSizeMax(dctx), srcSize_wrong, ""); /* Decode literals section */ - { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); - DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize); + { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming); + DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : cSize=%u, nbLiterals=%zu", (U32)litCSize, dctx->litSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; @@ -1262,6 +2192,23 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, /* Build Decoding Tables */ { + /* Compute the maximum block size, which must also work when !frame and fParams are unset. + * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. + */ + size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx)); + size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)blockSizeMax), (BYTE const*)dctx->virtualStart); + /* isLongOffset must be true if there are long offsets. + * Offsets are long if they are larger than ZSTD_maxShortOffset(). + * We don't expect that to be the case in 64-bit mode. + * + * We check here to see if our history is large enough to allow long offsets. + * If it isn't, then we can't possible have (valid) long offsets. If the offset + * is invalid, then it is okay to read it incorrectly. + * + * If isLongOffsets is true, then we will later check our decoding table to see + * if it is even possible to generate long offsets. + */ + ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (totalHistorySize > ZSTD_maxShortOffset())); /* These macros control at build-time which decompressor implementation * we use. If neither is defined, we do some inspection and dispatch at * runtime. @@ -1269,6 +2216,11 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) int usePrefetchDecoder = dctx->ddictIsCold; +#else + /* Set to 1 to avoid computing offset info if we don't need to. + * Otherwise this value is ignored. + */ + int usePrefetchDecoder = 1; #endif int nbSeq; size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); @@ -1276,42 +2228,84 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, ip += seqHSize; srcSize -= seqHSize; -#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) - if ( !usePrefetchDecoder - && (!frame || (dctx->fParams.windowSize > (1<<24))) - && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */ - U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr); - U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ - usePrefetchDecoder = (shareLongOffsets >= minShare); + RETURN_ERROR_IF((dst == NULL || dstCapacity == 0) && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF(MEM_64bits() && sizeof(size_t) == sizeof(void*) && (size_t)(-1) - (size_t)dst < (size_t)(1 << 20), dstSize_tooSmall, + "invalid dst"); + + /* If we could potentially have long offsets, or we might want to use the prefetch decoder, + * compute information about the share of long offsets, and the maximum nbAdditionalBits. + * NOTE: could probably use a larger nbSeq limit + */ + if (isLongOffset || (!usePrefetchDecoder && (totalHistorySize > (1u << 24)) && (nbSeq > 8))) { + ZSTD_OffsetInfo const info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq); + if (isLongOffset && info.maxNbAdditionalBits <= STREAM_ACCUMULATOR_MIN) { + /* If isLongOffset, but the maximum number of additional bits that we see in our table is small + * enough, then we know it is impossible to have too long an offset in this block, so we can + * use the regular offset decoder. + */ + isLongOffset = ZSTD_lo_isRegularOffset; + } + if (!usePrefetchDecoder) { + U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ + usePrefetchDecoder = (info.longOffsetShare >= minShare); + } } -#endif dctx->ddictIsCold = 0; #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) - if (usePrefetchDecoder) + if (usePrefetchDecoder) { +#else + (void)usePrefetchDecoder; + { #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); #endif + } #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG /* else */ - return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); + if (dctx->litBufferLocation == ZSTD_split) + return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); + else + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); #endif } } -size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize) +{ + if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */ + dctx->dictEnd = dctx->previousDstEnd; + dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); + dctx->prefixStart = dst; + dctx->previousDstEnd = dst; + } +} + + +size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) { size_t dSize; - ZSTD_checkContinuity(dctx, dst); - dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0); + dctx->isFrameDecompression = 0; + ZSTD_checkContinuity(dctx, dst, dstCapacity); + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, not_streaming); + FORWARD_IF_ERROR(dSize, ""); dctx->previousDstEnd = (char*)dst + dSize; return dSize; } + + +/* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */ +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize); +} diff --git a/lib/decompress/zstd_decompress_block.h b/lib/decompress/zstd_decompress_block.h index 7e929604102..ab152404ba0 100644 --- a/lib/decompress/zstd_decompress_block.h +++ b/lib/decompress/zstd_decompress_block.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,9 +15,9 @@ /*-******************************************************* * Dependencies *********************************************************/ -#include /* size_t */ -#include "zstd.h" /* DCtx, and some public functions */ -#include "zstd_internal.h" /* blockProperties_t, and some public functions */ +#include "../common/zstd_deps.h" /* size_t */ +#include "../zstd.h" /* DCtx, and some public functions */ +#include "../common/zstd_internal.h" /* blockProperties_t, and some public functions */ #include "zstd_decompress_internal.h" /* ZSTD_seqSymbol */ @@ -33,6 +33,12 @@ */ + /* Streaming state is used to inform allocation of the literal buffer */ +typedef enum { + not_streaming = 0, + is_streaming = 1 +} streaming_operation; + /* ZSTD_decompressBlock_internal() : * decompress block, starting at `src`, * into destination buffer `dst`. @@ -41,19 +47,27 @@ */ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, const int frame); + const void* src, size_t srcSize, const streaming_operation streaming); /* ZSTD_buildFSETable() : * generate FSE decoding table for one symbol (ll, ml or off) * this function must be called with valid parameters only * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.) * in which case it cannot fail. + * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is + * defined in zstd_decompress_internal.h. * Internal use only. */ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, - const U32* baseValue, const U32* nbAdditionalBits, - unsigned tableLog); + const U32* baseValue, const U8* nbAdditionalBits, + unsigned tableLog, void* wksp, size_t wkspSize, + int bmi2); + +/* Internal definition of ZSTD_decompressBlock() to avoid deprecation warnings. */ +size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize); #endif /* ZSTD_DEC_BLOCK_H */ diff --git a/lib/decompress/zstd_decompress_internal.h b/lib/decompress/zstd_decompress_internal.h index ccbdfa090fa..e4bffdbc6cf 100644 --- a/lib/decompress/zstd_decompress_internal.h +++ b/lib/decompress/zstd_decompress_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,34 +19,34 @@ /*-******************************************************* * Dependencies *********************************************************/ -#include "mem.h" /* BYTE, U16, U32 */ -#include "zstd_internal.h" /* ZSTD_seqSymbol */ +#include "../common/mem.h" /* BYTE, U16, U32 */ +#include "../common/zstd_internal.h" /* constants : MaxLL, MaxML, MaxOff, LLFSELog, etc. */ /*-******************************************************* * Constants *********************************************************/ -static const U32 LL_base[MaxLL+1] = { +static UNUSED_ATTR const U32 LL_base[MaxLL+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 }; -static const U32 OF_base[MaxOff+1] = { +static UNUSED_ATTR const U32 OF_base[MaxOff+1] = { 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD }; -static const U32 OF_bits[MaxOff+1] = { +static UNUSED_ATTR const U8 OF_bits[MaxOff+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }; -static const U32 ML_base[MaxML+1] = { +static UNUSED_ATTR const U32 ML_base[MaxML+1] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, @@ -73,12 +73,17 @@ static const U32 ML_base[MaxML+1] = { #define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log))) +#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64)) +#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32)) +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 + typedef struct { ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */ - HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ + HUF_DTable hufTable[HUF_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; /* can accommodate HUF_decompress4X */ U32 rep[ZSTD_REP_NUM]; + U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32]; } ZSTD_entropyDTables_t; typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, @@ -95,6 +100,29 @@ typedef enum { ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */ } ZSTD_dictUses_e; +/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */ +typedef struct { + const ZSTD_DDict** ddictPtrTable; + size_t ddictPtrTableSize; + size_t ddictPtrCount; +} ZSTD_DDictHashSet; + +#ifndef ZSTD_DECODER_INTERNAL_BUFFER +# define ZSTD_DECODER_INTERNAL_BUFFER (1 << 16) +#endif + +#define ZSTD_LBMIN 64 +#define ZSTD_LBMAX (128 << 10) + +/* extra buffer, compensates when dst is not large enough to store litBuffer */ +#define ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX) + +typedef enum { + ZSTD_not_in_dst = 0, /* Stored entirely within litExtraBuffer */ + ZSTD_in_dst = 1, /* Stored entirely within dst (in memory after current output write) */ + ZSTD_split = 2 /* Split between litExtraBuffer and dst */ +} ZSTD_litLocation_e; + struct ZSTD_DCtx_s { const ZSTD_seqSymbol* LLTptr; @@ -108,7 +136,8 @@ struct ZSTD_DCtx_s const void* virtualStart; /* virtual start of previous segment if it was just before current one */ const void* dictEnd; /* end of previous segment */ size_t expected; - ZSTD_frameHeader fParams; + ZSTD_FrameHeader fParams; + U64 processedCSize; U64 decodedSize; blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */ ZSTD_dStage stage; @@ -117,12 +146,17 @@ struct ZSTD_DCtx_s XXH64_state_t xxhState; size_t headerSize; ZSTD_format_e format; + ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */ + U32 validateChecksum; /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */ const BYTE* litPtr; ZSTD_customMem customMem; size_t litSize; size_t rleSize; size_t staticSize; + int isFrameDecompression; +#if DYNAMIC_BMI2 int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */ +#endif /* dictionary */ ZSTD_DDict* ddictLocal; @@ -130,6 +164,10 @@ struct ZSTD_DCtx_s U32 dictID; int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */ ZSTD_dictUses_e dictUses; + ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */ + ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */ + int disableHufAsm; + int maxBlockSizeParam; /* streaming */ ZSTD_dStreamStage streamStage; @@ -142,17 +180,44 @@ struct ZSTD_DCtx_s size_t outStart; size_t outEnd; size_t lhSize; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) void* legacyContext; U32 previousLegacyVersion; U32 legacyVersion; +#endif U32 hostageByte; int noForwardProgress; + ZSTD_bufferMode_e outBufferMode; + ZSTD_outBuffer expectedOutBuffer; /* workspace */ - BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH]; + BYTE* litBuffer; + const BYTE* litBufferEnd; + ZSTD_litLocation_e litBufferLocation; + BYTE litExtraBuffer[ZSTD_LITBUFFEREXTRASIZE + WILDCOPY_OVERLENGTH]; /* literal buffer can be split between storage within dst and within this scratch buffer */ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; + + size_t oversizedDuration; + +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + void const* dictContentBeginForFuzzing; + void const* dictContentEndForFuzzing; +#endif + + /* Tracing */ +#if ZSTD_TRACE + ZSTD_TraceCtx traceCtx; +#endif }; /* typedef'd to ZSTD_DCtx within "zstd.h" */ +MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) { +#if DYNAMIC_BMI2 + return dctx->bmi2; +#else + (void)dctx; + return 0; +#endif +} /*-******************************************************* * Shared internal functions @@ -160,7 +225,7 @@ struct ZSTD_DCtx_s /*! ZSTD_loadDEntropy() : * dict : must point at beginning of a valid zstd dictionary. - * @return : size of entropy tables read */ + * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */ size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize); @@ -169,7 +234,7 @@ size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, * If yes, do nothing (continue on current segment). * If not, classify previous segment as "external dictionary", and start a new segment. * This function cannot fail. */ -void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst); +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize); #endif /* ZSTD_DECOMPRESS_INTERNAL_H */ diff --git a/lib/deprecated/zbuff.h b/lib/deprecated/zbuff.h index a93115da4a1..a968245b36a 100644 --- a/lib/deprecated/zbuff.h +++ b/lib/deprecated/zbuff.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -28,7 +28,7 @@ extern "C" { * Dependencies ***************************************/ #include /* size_t */ -#include "zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ +#include "../zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ /* *************************************************************** @@ -36,16 +36,17 @@ extern "C" { *****************************************************************/ /* Deprecation warnings */ /* Should these warnings be a problem, - it is generally possible to disable them, - typically with -Wno-deprecated-declarations for gcc - or _CRT_SECURE_NO_WARNINGS in Visual. - Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */ + * it is generally possible to disable them, + * typically with -Wno-deprecated-declarations for gcc + * or _CRT_SECURE_NO_WARNINGS in Visual. + * Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS + */ #ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS # define ZBUFF_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */ #else # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ # define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API -# elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__) +# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) # define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message))) # elif defined(__GNUC__) && (__GNUC__ >= 3) # define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated)) @@ -185,7 +186,7 @@ ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(voi /*--- Dependency ---*/ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters, ZSTD_customMem */ -#include "zstd.h" +#include "../zstd.h" /*--- Custom memory allocator ---*/ diff --git a/lib/deprecated/zbuff_common.c b/lib/deprecated/zbuff_common.c index 661b9b0e18c..5a2f2db354f 100644 --- a/lib/deprecated/zbuff_common.c +++ b/lib/deprecated/zbuff_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,7 +11,7 @@ /*-************************************* * Dependencies ***************************************/ -#include "error_private.h" +#include "../common/error_private.h" #include "zbuff.h" /*-**************************************** diff --git a/lib/deprecated/zbuff_compress.c b/lib/deprecated/zbuff_compress.c index f39c60d89f6..1d8682150b2 100644 --- a/lib/deprecated/zbuff_compress.c +++ b/lib/deprecated/zbuff_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,6 +15,7 @@ ***************************************/ #define ZBUFF_STATIC_LINKING_ONLY #include "zbuff.h" +#include "../common/error_private.h" /*-*********************************************************** @@ -73,13 +74,32 @@ size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc, ZSTD_parameters params, unsigned long long pledgedSrcSize) { if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* preserve "0 == unknown" behavior */ - return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize); + FORWARD_IF_ERROR(ZSTD_CCtx_reset(zbc, ZSTD_reset_session_only), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setPledgedSrcSize(zbc, pledgedSrcSize), ""); + + FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_windowLog, params.cParams.windowLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_hashLog, params.cParams.hashLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_chainLog, params.cParams.chainLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_searchLog, params.cParams.searchLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_minMatch, params.cParams.minMatch), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_targetLength, params.cParams.targetLength), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_strategy, params.cParams.strategy), ""); + + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_contentSizeFlag, params.fParams.contentSizeFlag), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_checksumFlag, params.fParams.checksumFlag), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_dictIDFlag, params.fParams.noDictIDFlag), ""); + + FORWARD_IF_ERROR(ZSTD_CCtx_loadDictionary(zbc, dict, dictSize), ""); + return 0; } - size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel) { - return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel); + FORWARD_IF_ERROR(ZSTD_CCtx_reset(zbc, ZSTD_reset_session_only), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(zbc, ZSTD_c_compressionLevel, compressionLevel), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_loadDictionary(zbc, dict, dictSize), ""); + return 0; } size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel) diff --git a/lib/deprecated/zbuff_decompress.c b/lib/deprecated/zbuff_decompress.c index 923c22b73c5..12a66af7412 100644 --- a/lib/deprecated/zbuff_decompress.c +++ b/lib/deprecated/zbuff_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,6 +13,8 @@ /* ************************************* * Dependencies ***************************************/ +#define ZSTD_DISABLE_DEPRECATE_WARNINGS /* suppress warning on ZSTD_initDStream_usingDict */ +#include "../zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ #define ZBUFF_STATIC_LINKING_ONLY #include "zbuff.h" diff --git a/lib/dictBuilder/cover.c b/lib/dictBuilder/cover.c index 961e1cb9d23..06d1cb93a5c 100644 --- a/lib/dictBuilder/cover.c +++ b/lib/dictBuilder/cover.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -21,52 +21,102 @@ /*-************************************* * Dependencies ***************************************/ +/* qsort_r is an extension. + * + * Android NDK does not ship qsort_r(). + */ +#if (defined(__linux__) && !defined(__ANDROID__)) || defined(__CYGWIN__) || defined(__MSYS__) +# ifndef _GNU_SOURCE +# define _GNU_SOURCE +# endif +#endif + +#define __STDC_WANT_LIB_EXT1__ 1 /* request C11 Annex K, which includes qsort_s() */ + #include /* fprintf */ -#include /* malloc, free, qsort */ +#include /* malloc, free, qsort_r */ + #include /* memset */ #include /* clock */ -#include "mem.h" /* read */ -#include "pool.h" -#include "threading.h" -#include "cover.h" -#include "zstd_internal.h" /* includes zstd.h */ #ifndef ZDICT_STATIC_LINKING_ONLY -#define ZDICT_STATIC_LINKING_ONLY +# define ZDICT_STATIC_LINKING_ONLY #endif -#include "zdict.h" + +#include "../common/debug.h" /* DEBUG_STATIC_ASSERT */ +#include "../common/mem.h" /* read */ +#include "../common/pool.h" /* POOL_ctx */ +#include "../common/threading.h" /* ZSTD_pthread_mutex_t */ +#include "../common/zstd_internal.h" /* includes zstd.h */ +#include "../common/bits.h" /* ZSTD_highbit32 */ +#include "../zdict.h" +#include "cover.h" /*-************************************* * Constants ***************************************/ +/** +* There are 32bit indexes used to ref samples, so limit samples size to 4GB +* on 64bit builds. +* For 32bit builds we choose 1 GB. +* Most 32bit platforms have 2GB user-mode addressable space and we allocate a large +* contiguous buffer, so 1GB is already a high limit. +*/ #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) -#define DEFAULT_SPLITPOINT 1.0 +#define COVER_DEFAULT_SPLITPOINT 1.0 + +/** + * Select the qsort() variant used by cover + */ +#define ZDICT_QSORT_MIN 0 +#define ZDICT_QSORT_C90 ZDICT_QSORT_MIN +#define ZDICT_QSORT_GNU 1 +#define ZDICT_QSORT_APPLE 2 +#define ZDICT_QSORT_MSVC 3 +#define ZDICT_QSORT_C11 ZDICT_QSORT_MAX +#define ZDICT_QSORT_MAX 4 + +#ifndef ZDICT_QSORT +# if defined(__APPLE__) +# define ZDICT_QSORT ZDICT_QSORT_APPLE /* uses qsort_r() with a different order for parameters */ +# elif (defined(__linux__) && !defined(__ANDROID__)) || defined(__CYGWIN__) || defined(__MSYS__) +# define ZDICT_QSORT ZDICT_QSORT_GNU /* uses qsort_r() */ +# elif defined(_WIN32) && defined(_MSC_VER) +# define ZDICT_QSORT ZDICT_QSORT_MSVC /* uses qsort_s() with a different order for parameters */ +# elif defined(STDC_LIB_EXT1) && (STDC_LIB_EXT1 > 0) /* C11 Annex K */ +# define ZDICT_QSORT ZDICT_QSORT_C11 /* uses qsort_s() */ +# else +# define ZDICT_QSORT ZDICT_QSORT_C90 /* uses standard qsort() which is not re-entrant (requires global variable) */ +# endif +#endif + /*-************************************* * Console display +* +* Captures the `displayLevel` variable in the local scope. ***************************************/ -static int g_displayLevel = 2; +#undef DISPLAY #define DISPLAY(...) \ { \ fprintf(stderr, __VA_ARGS__); \ fflush(stderr); \ } -#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ +#undef DISPLAYLEVEL +#define DISPLAYLEVEL(l, ...) \ if (displayLevel >= l) { \ DISPLAY(__VA_ARGS__); \ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ -#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) -#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ +#undef DISPLAYUPDATE +#define DISPLAYUPDATE(lastUpdateTime, l, ...) \ if (displayLevel >= l) { \ - if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ - g_time = clock(); \ + const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; \ + if ((clock() - lastUpdateTime > refreshRate) || (displayLevel >= 4)) { \ + lastUpdateTime = clock(); \ DISPLAY(__VA_ARGS__); \ } \ } -#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) -static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; -static clock_t g_time = 0; /*-************************************* * Hash table @@ -120,9 +170,9 @@ static int COVER_map_init(COVER_map_t *map, U32 size) { /** * Internal hash function */ -static const U32 prime4bytes = 2654435761U; +static const U32 COVER_prime4bytes = 2654435761U; static U32 COVER_map_hash(COVER_map_t *map, U32 key) { - return (key * prime4bytes) >> (32 - map->sizeLog); + return (key * COVER_prime4bytes) >> (32 - map->sizeLog); } /** @@ -161,7 +211,7 @@ static U32 *COVER_map_at(COVER_map_t *map, U32 key) { */ static void COVER_map_remove(COVER_map_t *map, U32 key) { U32 i = COVER_map_index(map, key); - COVER_map_pair_t *del = &map->data[i]; + COVER_map_pair_t* del = &map->data[i]; U32 shift = 1; if (del->value == MAP_EMPTY_VALUE) { return; @@ -212,10 +262,13 @@ typedef struct { U32 *freqs; U32 *dmerAt; unsigned d; + int displayLevel; } COVER_ctx_t; -/* We need a global context for qsort... */ -static COVER_ctx_t *g_ctx = NULL; +#if ZDICT_QSORT == ZDICT_QSORT_C90 +/* Use global context for non-reentrant sort functions */ +static COVER_ctx_t *g_coverCtx = NULL; +#endif /*-************************************* * Helper functions @@ -258,11 +311,15 @@ static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) { /** * Same as COVER_cmp() except ties are broken by pointer value - * NOTE: g_ctx must be set to call this function. A global is required because - * qsort doesn't take an opaque pointer. */ +#if (ZDICT_QSORT == ZDICT_QSORT_MSVC) || (ZDICT_QSORT == ZDICT_QSORT_APPLE) +static int WIN_CDECL COVER_strict_cmp(void* g_coverCtx, const void* lp, const void* rp) { +#elif (ZDICT_QSORT == ZDICT_QSORT_GNU) || (ZDICT_QSORT == ZDICT_QSORT_C11) +static int COVER_strict_cmp(const void *lp, const void *rp, void *g_coverCtx) { +#else /* C90 fallback.*/ static int COVER_strict_cmp(const void *lp, const void *rp) { - int result = COVER_cmp(g_ctx, lp, rp); +#endif + int result = COVER_cmp((COVER_ctx_t*)g_coverCtx, lp, rp); if (result == 0) { result = lp < rp ? -1 : 1; } @@ -271,21 +328,60 @@ static int COVER_strict_cmp(const void *lp, const void *rp) { /** * Faster version for d <= 8. */ +#if (ZDICT_QSORT == ZDICT_QSORT_MSVC) || (ZDICT_QSORT == ZDICT_QSORT_APPLE) +static int WIN_CDECL COVER_strict_cmp8(void* g_coverCtx, const void* lp, const void* rp) { +#elif (ZDICT_QSORT == ZDICT_QSORT_GNU) || (ZDICT_QSORT == ZDICT_QSORT_C11) +static int COVER_strict_cmp8(const void *lp, const void *rp, void *g_coverCtx) { +#else /* C90 fallback.*/ static int COVER_strict_cmp8(const void *lp, const void *rp) { - int result = COVER_cmp8(g_ctx, lp, rp); +#endif + int result = COVER_cmp8((COVER_ctx_t*)g_coverCtx, lp, rp); if (result == 0) { result = lp < rp ? -1 : 1; } return result; } +/** + * Abstract away divergence of qsort_r() parameters. + * Hopefully when C11 become the norm, we will be able + * to clean it up. + */ +static void stableSort(COVER_ctx_t *ctx) +{ + DEBUG_STATIC_ASSERT(ZDICT_QSORT_MIN <= ZDICT_QSORT && ZDICT_QSORT <= ZDICT_QSORT_MAX); +#if (ZDICT_QSORT == ZDICT_QSORT_APPLE) + qsort_r(ctx->suffix, ctx->suffixSize, sizeof(U32), + ctx, + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); +#elif (ZDICT_QSORT == ZDICT_QSORT_GNU) + qsort_r(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp), + ctx); +#elif (ZDICT_QSORT == ZDICT_QSORT_MSVC) + qsort_s(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp), + ctx); +#elif (ZDICT_QSORT == ZDICT_QSORT_C11) + qsort_s(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp), + ctx); +#else /* C90 fallback.*/ + g_coverCtx = ctx; + /* TODO(cavalcanti): implement a reentrant qsort() when _r is not available. */ + qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); +#endif +} + /** * Returns the first pointer in [first, last) whose element does not compare * less than value. If no such element exists it returns last. */ -static const size_t *COVER_lower_bound(const size_t *first, const size_t *last, +static const size_t *COVER_lower_bound(const size_t* first, const size_t* last, size_t value) { - size_t count = last - first; + size_t count = (size_t)(last - first); + assert(last >= first); while (count != 0) { size_t step = count / 2; const size_t *ptr = first; @@ -524,14 +620,15 @@ static void COVER_ctx_destroy(COVER_ctx_t *ctx) { /** * Prepare a context for dictionary building. - * The context is only dependent on the parameter `d` and can used multiple + * The context is only dependent on the parameter `d` and can be used multiple * times. * Returns 0 on success or error code on error. * The context must be destroyed with `COVER_ctx_destroy()`. */ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, - unsigned d, double splitPoint) { + unsigned d, double splitPoint, int displayLevel) +{ const BYTE *const samples = (const BYTE *)samplesBuffer; const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); /* Split samples into testing and training sets */ @@ -539,6 +636,7 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; + ctx->displayLevel = displayLevel; /* Checks */ if (totalSamplesSize < MAX(d, sizeof(U64)) || totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { @@ -600,17 +698,7 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, for (i = 0; i < ctx->suffixSize; ++i) { ctx->suffix[i] = i; } - /* qsort doesn't take an opaque pointer, so pass as a global. - * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is. - */ - g_ctx = ctx; -#if defined(__OpenBSD__) - mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32), - (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); -#else - qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), - (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); -#endif + stableSort(ctx); } DISPLAYLEVEL(2, "Computing frequencies\n"); /* For each dmer group (group of positions with the same first d bytes): @@ -629,18 +717,18 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel) { - const double ratio = (double)nbDmers / maxDictSize; + const double ratio = (double)nbDmers / (double)maxDictSize; if (ratio >= 10) { return; } - LOCALDISPLAYLEVEL(displayLevel, 1, - "WARNING: The maximum dictionary size %u is too large " - "compared to the source size %u! " - "size(source)/size(dictionary) = %f, but it should be >= " - "10! This may lead to a subpar dictionary! We recommend " - "training on sources at least 10x, and up to 100x the " - "size of the dictionary!\n", (U32)maxDictSize, - (U32)nbDmers, ratio); + DISPLAYLEVEL(1, + "WARNING: The maximum dictionary size %u is too large " + "compared to the source size %u! " + "size(source)/size(dictionary) = %f, but it should be >= " + "10! This may lead to a subpar dictionary! We recommend " + "training on sources at least 10x, and preferably 100x " + "the size of the dictionary! \n", (U32)maxDictSize, + (U32)nbDmers, ratio); } COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, @@ -675,6 +763,8 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3)); size_t zeroScoreRun = 0; size_t epoch; + clock_t lastUpdateTime = 0; + const int displayLevel = ctx->displayLevel; DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", (U32)epochs.num, (U32)epochs.size); /* Loop through the epochs until there are no more segments or the dictionary @@ -708,6 +798,7 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, tail -= segmentSize; memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); DISPLAYUPDATE( + lastUpdateTime, 2, "\r%u%% ", (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); } @@ -715,7 +806,7 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, return tail; } -ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters) @@ -723,9 +814,8 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( BYTE* const dict = (BYTE*)dictBuffer; COVER_ctx_t ctx; COVER_map_t activeDmers; + const int displayLevel = (int)parameters.zParams.notificationLevel; parameters.splitPoint = 1.0; - /* Initialize global data */ - g_displayLevel = parameters.zParams.notificationLevel; /* Checks */ if (!COVER_checkParameters(parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); @@ -743,12 +833,12 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( /* Initialize context and activeDmers */ { size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, - parameters.d, parameters.splitPoint); + parameters.d, parameters.splitPoint, displayLevel); if (ZSTD_isError(initVal)) { return initVal; } } - COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel); + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel); if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); COVER_ctx_destroy(&ctx); @@ -889,9 +979,13 @@ void COVER_best_start(COVER_best_t *best) { * Decrements liveJobs and signals any waiting threads if liveJobs == 0. * If this dictionary is the best so far save it and its parameters. */ -void COVER_best_finish(COVER_best_t *best, size_t compressedSize, - ZDICT_cover_params_t parameters, void *dict, - size_t dictSize) { +void COVER_best_finish(COVER_best_t* best, + ZDICT_cover_params_t parameters, + COVER_dictSelection_t selection) +{ + void* dict = selection.dictContent; + size_t compressedSize = selection.totalCompressedSize; + size_t dictSize = selection.dictSize; if (!best) { return; } @@ -917,10 +1011,12 @@ void COVER_best_finish(COVER_best_t *best, size_t compressedSize, } } /* Save the dictionary, parameters, and size */ - memcpy(best->dict, dict, dictSize); - best->dictSize = dictSize; - best->parameters = parameters; - best->compressedSize = compressedSize; + if (dict) { + memcpy(best->dict, dict, dictSize); + best->dictSize = dictSize; + best->parameters = parameters; + best->compressedSize = compressedSize; + } } if (liveJobs == 0) { ZSTD_pthread_cond_broadcast(&best->cond); @@ -929,6 +1025,114 @@ void COVER_best_finish(COVER_best_t *best, size_t compressedSize, } } +static COVER_dictSelection_t setDictSelection(BYTE* buf, size_t s, size_t csz) +{ + COVER_dictSelection_t ds; + ds.dictContent = buf; + ds.dictSize = s; + ds.totalCompressedSize = csz; + return ds; +} + +COVER_dictSelection_t COVER_dictSelectionError(size_t error) { + return setDictSelection(NULL, 0, error); +} + +unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) { + return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent); +} + +void COVER_dictSelectionFree(COVER_dictSelection_t selection){ + free(selection.dictContent); +} + +COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity, + size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples, + size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) { + + size_t largestDict = 0; + size_t largestCompressed = 0; + BYTE* customDictContentEnd = customDictContent + dictContentSize; + + BYTE* largestDictbuffer = (BYTE*)malloc(dictBufferCapacity); + BYTE* candidateDictBuffer = (BYTE*)malloc(dictBufferCapacity); + double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00; + + if (!largestDictbuffer || !candidateDictBuffer) { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); + } + + /* Initial dictionary size and compressed size */ + memcpy(largestDictbuffer, customDictContent, dictContentSize); + dictContentSize = ZDICT_finalizeDictionary( + largestDictbuffer, dictBufferCapacity, customDictContent, dictContentSize, + samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams); + + if (ZDICT_isError(dictContentSize)) { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); + } + + totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes, + samplesBuffer, offsets, + nbCheckSamples, nbSamples, + largestDictbuffer, dictContentSize); + + if (ZSTD_isError(totalCompressedSize)) { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(totalCompressedSize); + } + + if (params.shrinkDict == 0) { + free(candidateDictBuffer); + return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); + } + + largestDict = dictContentSize; + largestCompressed = totalCompressedSize; + dictContentSize = ZDICT_DICTSIZE_MIN; + + /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */ + while (dictContentSize < largestDict) { + memcpy(candidateDictBuffer, largestDictbuffer, largestDict); + dictContentSize = ZDICT_finalizeDictionary( + candidateDictBuffer, dictBufferCapacity, customDictContentEnd - dictContentSize, dictContentSize, + samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams); + + if (ZDICT_isError(dictContentSize)) { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); + + } + + totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes, + samplesBuffer, offsets, + nbCheckSamples, nbSamples, + candidateDictBuffer, dictContentSize); + + if (ZSTD_isError(totalCompressedSize)) { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(totalCompressedSize); + } + + if ((double)totalCompressedSize <= (double)largestCompressed * regressionTolerance) { + free(largestDictbuffer); + return setDictSelection( candidateDictBuffer, dictContentSize, totalCompressedSize ); + } + dictContentSize *= 2; + } + dictContentSize = largestDict; + totalCompressedSize = largestCompressed; + free(candidateDictBuffer); + return setDictSelection( largestDictbuffer, dictContentSize, totalCompressedSize ); +} + /** * Parameters for COVER_tryParameters(). */ @@ -944,17 +1148,20 @@ typedef struct COVER_tryParameters_data_s { * This function is thread safe if zstd is compiled with multithreaded support. * It takes its parameters as an *OWNING* opaque pointer to support threading. */ -static void COVER_tryParameters(void *opaque) { +static void COVER_tryParameters(void *opaque) +{ /* Save parameters as local variables */ - COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque; + COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t*)opaque; const COVER_ctx_t *const ctx = data->ctx; const ZDICT_cover_params_t parameters = data->parameters; size_t dictBufferCapacity = data->dictBufferCapacity; size_t totalCompressedSize = ERROR(GENERIC); /* Allocate space for hash table, dict, and freqs */ COVER_map_t activeDmers; - BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); - U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); + BYTE* const dict = (BYTE*)malloc(dictBufferCapacity); + COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); + U32* const freqs = (U32*)malloc(ctx->suffixSize * sizeof(U32)); + const int displayLevel = ctx->displayLevel; if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); goto _cleanup; @@ -969,42 +1176,33 @@ static void COVER_tryParameters(void *opaque) { { const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict, dictBufferCapacity, parameters); - dictBufferCapacity = ZDICT_finalizeDictionary( - dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, - ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, - parameters.zParams); - if (ZDICT_isError(dictBufferCapacity)) { - DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); + selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail, + ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, + totalCompressedSize); + + if (COVER_dictSelectionIsError(selection)) { + DISPLAYLEVEL(1, "Failed to select dictionary\n"); goto _cleanup; } } - /* Check total compressed size */ - totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes, - ctx->samples, ctx->offsets, - ctx->nbTrainSamples, ctx->nbSamples, - dict, dictBufferCapacity); - _cleanup: - COVER_best_finish(data->best, totalCompressedSize, parameters, dict, - dictBufferCapacity); + free(dict); + COVER_best_finish(data->best, parameters, selection); free(data); COVER_map_destroy(&activeDmers); - if (dict) { - free(dict); - } - if (freqs) { - free(freqs); - } + COVER_dictSelectionFree(selection); + free(freqs); } -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( - void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, - const size_t *samplesSizes, unsigned nbSamples, - ZDICT_cover_params_t *parameters) { +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover( + void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, + const size_t* samplesSizes, unsigned nbSamples, + ZDICT_cover_params_t* parameters) +{ /* constants */ const unsigned nbThreads = parameters->nbThreads; const double splitPoint = - parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint; + parameters->splitPoint <= 0.0 ? COVER_DEFAULT_SPLITPOINT : parameters->splitPoint; const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; @@ -1013,22 +1211,24 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); const unsigned kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); + const unsigned shrinkDict = 0; /* Local variables */ - const int displayLevel = parameters->zParams.notificationLevel; + int displayLevel = (int)parameters->zParams.notificationLevel; unsigned iteration = 1; unsigned d; unsigned k; COVER_best_t best; POOL_ctx *pool = NULL; int warned = 0; + clock_t lastUpdateTime = 0; /* Checks */ if (splitPoint <= 0 || splitPoint > 1) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); + DISPLAYLEVEL(1, "Incorrect parameters\n"); return ERROR(parameter_outOfBound); } if (kMinK < kMaxD || kMaxK < kMinK) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); + DISPLAYLEVEL(1, "Incorrect parameters\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { @@ -1048,19 +1248,19 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( } /* Initialization */ COVER_best_init(&best); - /* Turn down global display level to clean up display at level 2 and below */ - g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; /* Loop through d first because each new value needs a new context */ - LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", + DISPLAYLEVEL(2, "Trying %u different sets of parameters\n", kIterations); for (d = kMinD; d <= kMaxD; d += 2) { /* Initialize the context for this value of d */ COVER_ctx_t ctx; - LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); + DISPLAYLEVEL(3, "d=%u\n", d); { - const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint); + /* Turn down global display level to clean up display at level 2 and below */ + const int childDisplayLevel = (displayLevel == 0) ? 0 : displayLevel - 1; + const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, childDisplayLevel); if (ZSTD_isError(initVal)) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); + DISPLAYLEVEL(1, "Failed to initialize context\n"); COVER_best_destroy(&best); POOL_free(pool); return initVal; @@ -1075,9 +1275,9 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( /* Prepare the arguments */ COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc( sizeof(COVER_tryParameters_data_t)); - LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); + DISPLAYLEVEL(3, "k=%u\n", k); if (!data) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); + DISPLAYLEVEL(1, "Failed to allocate parameters\n"); COVER_best_destroy(&best); COVER_ctx_destroy(&ctx); POOL_free(pool); @@ -1091,7 +1291,8 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( data->parameters.d = d; data->parameters.splitPoint = splitPoint; data->parameters.steps = kSteps; - data->parameters.zParams.notificationLevel = g_displayLevel; + data->parameters.shrinkDict = shrinkDict; + data->parameters.zParams.notificationLevel = (unsigned)ctx.displayLevel; /* Check the parameters */ if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); @@ -1106,14 +1307,14 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( COVER_tryParameters(data); } /* Print status */ - LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", - (unsigned)((iteration * 100) / kIterations)); + DISPLAYUPDATE(lastUpdateTime, 2, "\r%u%% ", + (unsigned)((iteration * 100) / kIterations)); ++iteration; } COVER_best_wait(&best); COVER_ctx_destroy(&ctx); } - LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); + DISPLAYLEVEL(2, "\r%79s\r", ""); /* Fill the output buffer and parameters with output of the best parameters */ { const size_t dictSize = best.dictSize; diff --git a/lib/dictBuilder/cover.h b/lib/dictBuilder/cover.h index efb46807c78..a5d7506ef6d 100644 --- a/lib/dictBuilder/cover.h +++ b/lib/dictBuilder/cover.h @@ -1,15 +1,20 @@ -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* memset */ -#include /* clock */ -#include "mem.h" /* read */ -#include "pool.h" -#include "threading.h" -#include "zstd_internal.h" /* includes zstd.h */ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + #ifndef ZDICT_STATIC_LINKING_ONLY -#define ZDICT_STATIC_LINKING_ONLY +# define ZDICT_STATIC_LINKING_ONLY #endif -#include "zdict.h" + +#include "../common/threading.h" /* ZSTD_pthread_mutex_t */ +#include "../common/mem.h" /* U32, BYTE */ +#include "../zdict.h" /** * COVER_best_t is used for two purposes: @@ -46,6 +51,15 @@ typedef struct { U32 size; } COVER_epoch_info_t; +/** + * Struct used for the dictionary selection function. + */ +typedef struct COVER_dictSelection { + BYTE* dictContent; + size_t dictSize; + size_t totalCompressedSize; +} COVER_dictSelection_t; + /** * Computes the number of epochs and the size of each epoch. * We will make sure that each epoch gets at least 10 * k bytes. @@ -107,6 +121,32 @@ void COVER_best_start(COVER_best_t *best); * Decrements liveJobs and signals any waiting threads if liveJobs == 0. * If this dictionary is the best so far save it and its parameters. */ -void COVER_best_finish(COVER_best_t *best, size_t compressedSize, - ZDICT_cover_params_t parameters, void *dict, - size_t dictSize); +void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, + COVER_dictSelection_t selection); +/** + * Error function for COVER_selectDict function. Checks if the return + * value is an error. + */ +unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection); + + /** + * Error function for COVER_selectDict function. Returns a struct where + * return.totalCompressedSize is a ZSTD error. + */ +COVER_dictSelection_t COVER_dictSelectionError(size_t error); + +/** + * Always call after selectDict is called to free up used memory from + * newly created dictionary. + */ +void COVER_dictSelectionFree(COVER_dictSelection_t selection); + +/** + * Called to finalize the dictionary and select one based on whether or not + * the shrink-dict flag was enabled. If enabled the dictionary used is the + * smallest dictionary within a specified regression of the compressed size + * from the largest dictionary. + */ + COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity, + size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples, + size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize); diff --git a/lib/dictBuilder/divsufsort.c b/lib/dictBuilder/divsufsort.c index ead9220442b..e5b117b298e 100644 --- a/lib/dictBuilder/divsufsort.c +++ b/lib/dictBuilder/divsufsort.c @@ -199,8 +199,8 @@ ss_isqrt(int x) { int y, e; if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; } - e = (x & 0xffff0000) ? - ((x & 0xff000000) ? + e = ((unsigned)x & 0xffff0000) ? + (((unsigned)x & 0xff000000) ? 24 + lg_table[(x >> 24) & 0xff] : 16 + lg_table[(x >> 16) & 0xff]) : ((x & 0x0000ff00) ? @@ -909,8 +909,8 @@ sssort(const unsigned char *T, const int *PA, static INLINE int tr_ilg(int n) { - return (n & 0xffff0000) ? - ((n & 0xff000000) ? + return ((unsigned)n & 0xffff0000) ? + (((unsigned)n & 0xff000000) ? 24 + lg_table[(n >> 24) & 0xff] : 16 + lg_table[(n >> 16) & 0xff]) : ((n & 0x0000ff00) ? @@ -1576,7 +1576,7 @@ sort_typeBstar(const unsigned char *T, int *SA, /* Construct the inverse suffix array of type B* suffixes using trsort. */ trsort(ISAb, SA, m, 1); - /* Set the sorted order of tyoe B* suffixes. */ + /* Set the sorted order of type B* suffixes. */ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } if(0 <= i) { diff --git a/lib/dictBuilder/divsufsort.h b/lib/dictBuilder/divsufsort.h index 5440994af15..3ed2b287ab1 100644 --- a/lib/dictBuilder/divsufsort.h +++ b/lib/dictBuilder/divsufsort.h @@ -27,11 +27,6 @@ #ifndef _DIVSUFSORT_H #define _DIVSUFSORT_H 1 -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - - /*- Prototypes -*/ /** @@ -59,9 +54,4 @@ divsufsort(const unsigned char *T, int *SA, int n, int openMP); int divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP); - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ - #endif /* _DIVSUFSORT_H */ diff --git a/lib/dictBuilder/fastcover.c b/lib/dictBuilder/fastcover.c index 40131e69363..56a08138520 100644 --- a/lib/dictBuilder/fastcover.c +++ b/lib/dictBuilder/fastcover.c @@ -1,3 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + /*-************************************* * Dependencies ***************************************/ @@ -6,75 +16,76 @@ #include /* memset */ #include /* clock */ -#include "mem.h" /* read */ -#include "pool.h" -#include "threading.h" -#include "cover.h" -#include "zstd_internal.h" /* includes zstd.h */ #ifndef ZDICT_STATIC_LINKING_ONLY -#define ZDICT_STATIC_LINKING_ONLY +# define ZDICT_STATIC_LINKING_ONLY #endif -#include "zdict.h" + +#include "../common/mem.h" /* read */ +#include "../common/pool.h" +#include "../common/threading.h" +#include "../common/zstd_internal.h" /* includes zstd.h */ +#include "../compress/zstd_compress_internal.h" /* ZSTD_hash*() */ +#include "../zdict.h" +#include "cover.h" /*-************************************* * Constants ***************************************/ +/** +* There are 32bit indexes used to ref samples, so limit samples size to 4GB +* on 64bit builds. +* For 32bit builds we choose 1 GB. +* Most 32bit platforms have 2GB user-mode addressable space and we allocate a large +* contiguous buffer, so 1GB is already a high limit. +*/ #define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) #define FASTCOVER_MAX_F 31 #define FASTCOVER_MAX_ACCEL 10 -#define DEFAULT_SPLITPOINT 0.75 +#define FASTCOVER_DEFAULT_SPLITPOINT 0.75 #define DEFAULT_F 20 #define DEFAULT_ACCEL 1 /*-************************************* * Console display +* +* Captures the `displayLevel` variable in the local scope. ***************************************/ -static int g_displayLevel = 2; +#undef DISPLAY #define DISPLAY(...) \ { \ fprintf(stderr, __VA_ARGS__); \ fflush(stderr); \ } -#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ +#undef DISPLAYLEVEL +#define DISPLAYLEVEL(l, ...) \ if (displayLevel >= l) { \ DISPLAY(__VA_ARGS__); \ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ -#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) -#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ +#undef DISPLAYUPDATE +#define DISPLAYUPDATE(lastUpdateTime, l, ...) \ if (displayLevel >= l) { \ - if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ - g_time = clock(); \ + const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; \ + if ((clock() - lastUpdateTime > refreshRate) || (displayLevel >= 4)) { \ + lastUpdateTime = clock(); \ DISPLAY(__VA_ARGS__); \ } \ } -#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) -static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; -static clock_t g_time = 0; /*-************************************* * Hash Functions ***************************************/ -static const U64 prime6bytes = 227718039650203ULL; -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } -static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } - -static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } -static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } - - /** - * Hash the d-byte value pointed to by p and mod 2^f + * Hash the d-byte value pointed to by p and mod 2^f into the frequency vector */ -static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) { +static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 f, unsigned d) { if (d == 6) { - return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1); + return ZSTD_hash6Ptr(p, f); } - return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1); + return ZSTD_hash8Ptr(p, f); } @@ -117,6 +128,7 @@ typedef struct { unsigned d; unsigned f; FASTCOVER_accel_t accelParams; + int displayLevel; } FASTCOVER_ctx_t; @@ -285,7 +297,7 @@ FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx) /** * Prepare a context for dictionary building. - * The context is only dependent on the parameter `d` and can used multiple + * The context is only dependent on the parameter `d` and can be used multiple * times. * Returns 0 on success or error code on error. * The context must be destroyed with `FASTCOVER_ctx_destroy()`. @@ -295,7 +307,8 @@ FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, unsigned d, double splitPoint, unsigned f, - FASTCOVER_accel_t accelParams) + FASTCOVER_accel_t accelParams, + int displayLevel) { const BYTE* const samples = (const BYTE*)samplesBuffer; const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); @@ -304,6 +317,7 @@ FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; + ctx->displayLevel = displayLevel; /* Checks */ if (totalSamplesSize < MAX(d, sizeof(U64)) || @@ -390,7 +404,9 @@ FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, const COVER_epoch_info_t epochs = COVER_computeEpochs( (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1); const size_t maxZeroScoreRun = 10; + const int displayLevel = ctx->displayLevel; size_t zeroScoreRun = 0; + clock_t lastUpdateTime = 0; size_t epoch; DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", (U32)epochs.num, (U32)epochs.size); @@ -428,6 +444,7 @@ FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, tail -= segmentSize; memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); DISPLAYUPDATE( + lastUpdateTime, 2, "\r%u%% ", (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); } @@ -435,7 +452,6 @@ FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, return tail; } - /** * Parameters for FASTCOVER_tryParameters(). */ @@ -452,19 +468,21 @@ typedef struct FASTCOVER_tryParameters_data_s { * This function is thread safe if zstd is compiled with multithreaded support. * It takes its parameters as an *OWNING* opaque pointer to support threading. */ -static void FASTCOVER_tryParameters(void *opaque) +static void FASTCOVER_tryParameters(void* opaque) { /* Save parameters as local variables */ - FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque; + FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t*)opaque; const FASTCOVER_ctx_t *const ctx = data->ctx; const ZDICT_cover_params_t parameters = data->parameters; size_t dictBufferCapacity = data->dictBufferCapacity; size_t totalCompressedSize = ERROR(GENERIC); /* Initialize array to keep track of frequency of dmer within activeSegment */ - U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16)); + U16* segmentFreqs = (U16*)calloc(((U64)1 << ctx->f), sizeof(U16)); /* Allocate space for hash table, dict, and freqs */ - BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); - U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32)); + BYTE *const dict = (BYTE*)malloc(dictBufferCapacity); + COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); + U32* freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32)); + const int displayLevel = ctx->displayLevel; if (!segmentFreqs || !dict || !freqs) { DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); goto _cleanup; @@ -473,27 +491,24 @@ static void FASTCOVER_tryParameters(void *opaque) memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32)); /* Build the dictionary */ { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity, - parameters, segmentFreqs); + parameters, segmentFreqs); + const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100); - dictBufferCapacity = ZDICT_finalizeDictionary( - dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, - ctx->samples, ctx->samplesSizes, nbFinalizeSamples, parameters.zParams); - if (ZDICT_isError(dictBufferCapacity)) { - DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); + selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail, + ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, + totalCompressedSize); + + if (COVER_dictSelectionIsError(selection)) { + DISPLAYLEVEL(1, "Failed to select dictionary\n"); goto _cleanup; } } - /* Check total compressed size */ - totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes, - ctx->samples, ctx->offsets, - ctx->nbTrainSamples, ctx->nbSamples, - dict, dictBufferCapacity); _cleanup: - COVER_best_finish(data->best, totalCompressedSize, parameters, dict, - dictBufferCapacity); + free(dict); + COVER_best_finish(data->best, parameters, selection); free(data); free(segmentFreqs); - free(dict); + COVER_dictSelectionFree(selection); free(freqs); } @@ -508,6 +523,7 @@ FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams, coverParams->nbThreads = fastCoverParams.nbThreads; coverParams->splitPoint = fastCoverParams.splitPoint; coverParams->zParams = fastCoverParams.zParams; + coverParams->shrinkDict = fastCoverParams.shrinkDict; } @@ -524,10 +540,11 @@ FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, fastCoverParams->f = f; fastCoverParams->accel = accel; fastCoverParams->zParams = coverParams.zParams; + fastCoverParams->shrinkDict = coverParams.shrinkDict; } -ZDICTLIB_API size_t +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, @@ -537,8 +554,7 @@ ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, FASTCOVER_ctx_t ctx; ZDICT_cover_params_t coverParams; FASTCOVER_accel_t accelParams; - /* Initialize global data */ - g_displayLevel = parameters.zParams.notificationLevel; + const int displayLevel = (int)parameters.zParams.notificationLevel; /* Assign splitPoint and f if not provided */ parameters.splitPoint = 1.0; parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f; @@ -567,13 +583,13 @@ ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, { size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, coverParams.d, parameters.splitPoint, parameters.f, - accelParams); + accelParams, displayLevel); if (ZSTD_isError(initVal)) { DISPLAYLEVEL(1, "Failed to initialize context\n"); return initVal; } } - COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel); /* Build the dictionary */ DISPLAYLEVEL(2, "Building dictionary\n"); { @@ -596,7 +612,7 @@ ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, } -ZDICTLIB_API size_t +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_fastCover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, @@ -608,7 +624,7 @@ ZDICT_optimizeTrainFromBuffer_fastCover( /* constants */ const unsigned nbThreads = parameters->nbThreads; const double splitPoint = - parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint; + parameters->splitPoint <= 0.0 ? FASTCOVER_DEFAULT_SPLITPOINT : parameters->splitPoint; const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; @@ -619,33 +635,35 @@ ZDICT_optimizeTrainFromBuffer_fastCover( (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f; const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel; + const unsigned shrinkDict = 0; /* Local variables */ - const int displayLevel = parameters->zParams.notificationLevel; + const int displayLevel = (int)parameters->zParams.notificationLevel; unsigned iteration = 1; unsigned d; unsigned k; COVER_best_t best; POOL_ctx *pool = NULL; int warned = 0; + clock_t lastUpdateTime = 0; /* Checks */ if (splitPoint <= 0 || splitPoint > 1) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n"); + DISPLAYLEVEL(1, "Incorrect splitPoint\n"); return ERROR(parameter_outOfBound); } if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n"); + DISPLAYLEVEL(1, "Incorrect accel\n"); return ERROR(parameter_outOfBound); } if (kMinK < kMaxD || kMaxK < kMinK) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n"); + DISPLAYLEVEL(1, "Incorrect k\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { - LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n"); + DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n"); return ERROR(srcSize_wrong); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { - LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n", + DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } @@ -660,19 +678,18 @@ ZDICT_optimizeTrainFromBuffer_fastCover( memset(&coverParams, 0 , sizeof(coverParams)); FASTCOVER_convertToCoverParams(*parameters, &coverParams); accelParams = FASTCOVER_defaultAccelParameters[accel]; - /* Turn down global display level to clean up display at level 2 and below */ - g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; /* Loop through d first because each new value needs a new context */ - LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", - kIterations); + DISPLAYLEVEL(2, "Trying %u different sets of parameters\n", kIterations); for (d = kMinD; d <= kMaxD; d += 2) { /* Initialize the context for this value of d */ FASTCOVER_ctx_t ctx; - LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); + DISPLAYLEVEL(3, "d=%u\n", d); { - size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams); + /* Turn down global display level to clean up display at level 2 and below */ + const int childDisplayLevel = displayLevel == 0 ? 0 : displayLevel - 1; + size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams, childDisplayLevel); if (ZSTD_isError(initVal)) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); + DISPLAYLEVEL(1, "Failed to initialize context\n"); COVER_best_destroy(&best); POOL_free(pool); return initVal; @@ -687,9 +704,9 @@ ZDICT_optimizeTrainFromBuffer_fastCover( /* Prepare the arguments */ FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc( sizeof(FASTCOVER_tryParameters_data_t)); - LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); + DISPLAYLEVEL(3, "k=%u\n", k); if (!data) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); + DISPLAYLEVEL(1, "Failed to allocate parameters\n"); COVER_best_destroy(&best); FASTCOVER_ctx_destroy(&ctx); POOL_free(pool); @@ -703,7 +720,8 @@ ZDICT_optimizeTrainFromBuffer_fastCover( data->parameters.d = d; data->parameters.splitPoint = splitPoint; data->parameters.steps = kSteps; - data->parameters.zParams.notificationLevel = g_displayLevel; + data->parameters.shrinkDict = shrinkDict; + data->parameters.zParams.notificationLevel = (unsigned)ctx.displayLevel; /* Check the parameters */ if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, data->ctx->f, accel)) { @@ -719,14 +737,15 @@ ZDICT_optimizeTrainFromBuffer_fastCover( FASTCOVER_tryParameters(data); } /* Print status */ - LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", - (unsigned)((iteration * 100) / kIterations)); + DISPLAYUPDATE(lastUpdateTime, + 2, "\r%u%% ", + (unsigned)((iteration * 100) / kIterations)); ++iteration; } COVER_best_wait(&best); FASTCOVER_ctx_destroy(&ctx); } - LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); + DISPLAYLEVEL(2, "\r%79s\r", ""); /* Fill the output buffer and parameters with output of the best parameters */ { const size_t dictSize = best.dictSize; diff --git a/lib/dictBuilder/zdict.c b/lib/dictBuilder/zdict.c index ee21ee1a9d3..befa616b87f 100644 --- a/lib/dictBuilder/zdict.c +++ b/lib/dictBuilder/zdict.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -23,9 +23,13 @@ /* Unix Large Files support (>4GB) */ #define _FILE_OFFSET_BITS 64 #if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */ +# ifndef _LARGEFILE_SOURCE # define _LARGEFILE_SOURCE +# endif #elif ! defined(__LP64__) /* No point defining Large file for 64 bit */ +# ifndef _LARGEFILE64_SOURCE # define _LARGEFILE64_SOURCE +# endif #endif @@ -37,17 +41,19 @@ #include /* fprintf, fopen, ftello64 */ #include /* clock */ -#include "mem.h" /* read */ -#include "fse.h" /* FSE_normalizeCount, FSE_writeNCount */ -#define HUF_STATIC_LINKING_ONLY -#include "huf.h" /* HUF_buildCTable, HUF_writeCTable */ -#include "zstd_internal.h" /* includes zstd.h */ -#include "xxhash.h" /* XXH64 */ -#include "divsufsort.h" #ifndef ZDICT_STATIC_LINKING_ONLY # define ZDICT_STATIC_LINKING_ONLY #endif -#include "zdict.h" + +#include "../common/mem.h" /* read */ +#include "../common/fse.h" /* FSE_normalizeCount, FSE_writeNCount */ +#include "../common/huf.h" /* HUF_buildCTable, HUF_writeCTable */ +#include "../common/zstd_internal.h" /* includes zstd.h */ +#include "../common/xxhash.h" /* XXH64 */ +#include "../compress/zstd_compress_internal.h" /* ZSTD_loadCEntropy() */ +#include "../zdict.h" +#include "divsufsort.h" +#include "../common/bits.h" /* ZSTD_NbCommonBytes */ /*-************************************* @@ -61,15 +67,16 @@ #define NOISELENGTH 32 -static const int g_compressionLevel_default = 3; static const U32 g_selectivity_default = 9; /*-************************************* * Console display ***************************************/ -#define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } -#define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ +#undef DISPLAY +#define DISPLAY(...) do { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } while (0) +#undef DISPLAYLEVEL +#define DISPLAYLEVEL(l, ...) do { if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } } while (0) /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; } @@ -99,69 +106,30 @@ unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize) return MEM_readLE32((const char*)dictBuffer + 4); } - -/*-******************************************************** -* Dictionary training functions -**********************************************************/ -static unsigned ZDICT_NbCommonBytes (size_t val) +size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize) { - if (MEM_isLittleEndian()) { - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) - unsigned long r = 0; - _BitScanForward64( &r, (U64)val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_ctzll((U64)val) >> 3); -# else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - unsigned long r=0; - _BitScanForward( &r, (U32)val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_ctz((U32)val) >> 3); -# else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -# endif + size_t headerSize; + if (dictSize <= 8 || MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return ERROR(dictionary_corrupted); + + { ZSTD_compressedBlockState_t* bs = (ZSTD_compressedBlockState_t*)malloc(sizeof(ZSTD_compressedBlockState_t)); + U32* wksp = (U32*)malloc(HUF_WORKSPACE_SIZE); + if (!bs || !wksp) { + headerSize = ERROR(memory_allocation); + } else { + ZSTD_reset_compressedBlockState(bs); + headerSize = ZSTD_loadCEntropy(bs, wksp, dictBuffer, dictSize); } - } else { /* Big Endian CPU */ - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) - unsigned long r = 0; - _BitScanReverse64( &r, val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_clzll(val) >> 3); -# else - unsigned r; - const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ - if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - unsigned long r = 0; - _BitScanReverse( &r, (unsigned long)val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_clz((U32)val) >> 3); -# else - unsigned r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; -# endif - } } -} + free(bs); + free(wksp); + } + + return headerSize; +} +/*-******************************************************** +* Dictionary training functions +**********************************************************/ /*! ZDICT_count() : Count the nb of common bytes between 2 pointers. Note : this function presumes end of buffer followed by noisy guard band. @@ -176,7 +144,7 @@ static size_t ZDICT_count(const void* pIn, const void* pMatch) pMatch = (const char*)pMatch+sizeof(size_t); continue; } - pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff); + pIn = (const char*)pIn+ZSTD_NbCommonBytes(diff); return (size_t)((const char*)pIn - pStart); } } @@ -200,7 +168,7 @@ static void ZDICT_initDictItem(dictItem* d) #define MINMATCHLENGTH 7 /* heuristic determined experimentally */ static dictItem ZDICT_analyzePos( BYTE* doneMarks, - const int* suffix, U32 start, + const unsigned* suffix, U32 start, const void* buffer, U32 minRatio, U32 notificationLevel) { U32 lengthList[LLIMIT] = {0}; @@ -208,7 +176,7 @@ static dictItem ZDICT_analyzePos( U32 savings[LLIMIT] = {0}; const BYTE* b = (const BYTE*)buffer; size_t maxLength = LLIMIT; - size_t pos = suffix[start]; + size_t pos = (size_t)suffix[start]; U32 end = start; dictItem solution; @@ -325,8 +293,10 @@ static dictItem ZDICT_analyzePos( for (i=(int)(maxLength-2); i>=0; i--) cumulLength[i] = cumulLength[i+1] + lengthList[i]; - for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break; - maxLength = i; + { unsigned u; + for (u=LLIMIT-1; u>=MINMATCHLENGTH; u--) if (cumulLength[u]>=minRatio) break; + maxLength = u; + } /* reduce maxLength in case of final into repetitive data */ { U32 l = (U32)maxLength; @@ -338,11 +308,13 @@ static dictItem ZDICT_analyzePos( /* calculate savings */ savings[5] = 0; - for (i=MINMATCHLENGTH; i<=(int)maxLength; i++) - savings[i] = savings[i-1] + (lengthList[i] * (i-3)); + { unsigned u; + for (u=MINMATCHLENGTH; u<=maxLength; u++) + savings[u] = savings[u-1] + (lengthList[u] * (u-3)); + } DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n", - (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength); + (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / (double)maxLength); solution.pos = (U32)pos; solution.length = (U32)maxLength; @@ -352,7 +324,7 @@ static dictItem ZDICT_analyzePos( { U32 id; for (id=start; id1) && (table[u-1].savings < elt.savings)) - table[u] = table[u-1], u--; + table[u] = table[u-1], u--; table[u] = elt; return u; } } @@ -415,11 +387,11 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */ /* append */ - int const addedLength = (int)eltEnd - (table[u].pos + table[u].length); + int const addedLength = (int)eltEnd - (int)(table[u].pos + table[u].length); /* note: can be negative */ table[u].savings += elt.length / 8; /* rough approx bonus */ if (addedLength > 0) { /* otherwise, elt fully included into existing */ - table[u].length += addedLength; - table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ + table[u].length += (unsigned)addedLength; + table[u].savings += elt.savings * (unsigned)addedLength / elt.length; /* rough approx */ } /* sort : improve rank */ elt = table[u]; @@ -431,7 +403,7 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) { if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) { - size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 ); + size_t const addedLength = MAX( elt.length - table[u].length , 1 ); table[u].pos = elt.pos; table[u].savings += (U32)(elt.savings * addedLength / elt.length); table[u].length = MIN(elt.length, table[u].length + 1); @@ -499,8 +471,8 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, const size_t* fileSizes, unsigned nbFiles, unsigned minRatio, U32 notificationLevel) { - int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0)); - int* const suffix = suffix0+1; + unsigned* const suffix0 = (unsigned*)malloc((bufferSize+2)*sizeof(*suffix0)); + unsigned* const suffix = suffix0+1; U32* reverseSuffix = (U32*)malloc((bufferSize)*sizeof(*reverseSuffix)); BYTE* doneMarks = (BYTE*)malloc((bufferSize+16)*sizeof(*doneMarks)); /* +16 for overflow security */ U32* filePos = (U32*)malloc(nbFiles * sizeof(*filePos)); @@ -508,10 +480,17 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, clock_t displayClock = 0; clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10; -# define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \ - if (ZDICT_clockSpan(displayClock) > refreshRate) \ - { displayClock = clock(); DISPLAY(__VA_ARGS__); \ - if (notificationLevel>=4) fflush(stderr); } } +# undef DISPLAYUPDATE +# define DISPLAYUPDATE(l, ...) \ + do { \ + if (notificationLevel>=l) { \ + if (ZDICT_clockSpan(displayClock) > refreshRate) { \ + displayClock = clock(); \ + DISPLAY(__VA_ARGS__); \ + } \ + if (notificationLevel>=4) fflush(stderr); \ + } \ + } while (0) /* init */ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ @@ -528,11 +507,11 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, /* sort */ DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20)); - { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0); + { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, (int*)suffix, (int)bufferSize, 0); if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; } } - suffix[bufferSize] = (int)bufferSize; /* leads into noise */ - suffix0[0] = (int)bufferSize; /* leads into noise */ + suffix[bufferSize] = (unsigned)bufferSize; /* leads into noise */ + suffix0[0] = (unsigned)bufferSize; /* leads into noise */ /* build reverse suffix sort */ { size_t pos; for (pos=0; pos < bufferSize; pos++) @@ -554,7 +533,7 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, if (solution.length==0) { cursor++; continue; } ZDICT_insertDictItem(dictList, dictListSize, solution, buffer); cursor += solution.length; - DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100); + DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / (double)bufferSize * 100.0); } } _cleanup: @@ -571,7 +550,7 @@ static void ZDICT_fillNoise(void* buffer, size_t length) unsigned const prime1 = 2654435761U; unsigned const prime2 = 2246822519U; unsigned acc = prime1; - size_t p=0;; + size_t p=0; for (p=0; p> 21); @@ -588,24 +567,24 @@ typedef struct #define MAXREPOFFSET 1024 -static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params, +static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params, unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets, const void* src, size_t srcSize, U32 notificationLevel) { - size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params.cParams.windowLog); + size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params->cParams.windowLog); size_t cSize; if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */ - { size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict); + { size_t const errorCode = ZSTD_compressBegin_usingCDict_deprecated(esr.zc, esr.dict); if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; } } - cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); + cSize = ZSTD_compressBlock_deprecated(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; } if (cSize) { /* if == 0; block is not compressible */ - const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc); + const SeqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc); /* literals stats */ { const BYTE* bytePtr; @@ -633,9 +612,9 @@ static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params, } if (nbSeq >= 2) { /* rep offsets */ - const seqDef* const seq = seqStorePtr->sequencesStart; - U32 offset1 = seq[0].offset - 3; - U32 offset2 = seq[1].offset - 3; + const SeqDef* const seq = seqStorePtr->sequencesStart; + U32 offset1 = seq[0].offBase - ZSTD_REP_NUM; + U32 offset2 = seq[1].offBase - ZSTD_REP_NUM; if (offset1 >= MAXREPOFFSET) offset1 = 0; if (offset2 >= MAXREPOFFSET) offset2 = 0; repOffsets[offset1] += 3; @@ -682,7 +661,7 @@ static void ZDICT_flatLit(unsigned* countLit) #define OFFCODE_MAX 30 /* only applicable to first block */ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, - unsigned compressionLevel, + int compressionLevel, const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles, const void* dictBuffer, size_t dictBufferSize, unsigned notificationLevel) @@ -706,6 +685,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles); BYTE* dstPtr = (BYTE*)dstBuffer; + U32 wksp[HUF_CTABLE_WORKSPACE_SIZE_U32]; /* init */ DEBUGLOG(4, "ZDICT_analyzeEntropy"); @@ -717,7 +697,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, memset(repOffset, 0, sizeof(repOffset)); repOffset[1] = repOffset[4] = repOffset[8] = 1; memset(bestRepOffset, 0, sizeof(bestRepOffset)); - if (compressionLevel==0) compressionLevel = g_compressionLevel_default; + if (compressionLevel==0) compressionLevel = ZSTD_CLEVEL_DEFAULT; params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem); @@ -731,15 +711,22 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, /* collect stats on all samples */ for (u=0; u= 4) { + /* writeStats */ + DISPLAYLEVEL(4, "Offset Code Frequencies : \n"); + for (u=0; u<=offcodeMax; u++) { + DISPLAYLEVEL(4, "%2u :%7u \n", u, offcodeCount[u]); + } } + /* analyze, build stats, starting with literals */ - { size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog); + { size_t maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(wksp)); if (HUF_isError(maxNbBits)) { eSize = maxNbBits; DISPLAYLEVEL(1, " HUF_buildCTable error \n"); @@ -748,7 +735,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, if (maxNbBits==8) { /* not compressible : will fail on HUF_writeCTable() */ DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n"); ZDICT_flatLit(countLit); /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */ - maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog); + maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(wksp)); assert(maxNbBits==9); } huffLog = (U32)maxNbBits; @@ -762,7 +749,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, /* note : the result of this phase should be used to better appreciate the impact on statistics */ total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u]; - errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax); + errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax, /* useLowProbCount */ 1); if (FSE_isError(errorCode)) { eSize = errorCode; DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n"); @@ -771,7 +758,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, Offlog = (U32)errorCode; total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u]; - errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML); + errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML, /* useLowProbCount */ 1); if (FSE_isError(errorCode)) { eSize = errorCode; DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n"); @@ -780,7 +767,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, mlLog = (U32)errorCode; total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u]; - errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL); + errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL, /* useLowProbCount */ 1); if (FSE_isError(errorCode)) { eSize = errorCode; DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n"); @@ -789,7 +776,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, llLog = (U32)errorCode; /* write result to buffer */ - { size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog); + { size_t const hhSize = HUF_writeCTable_wksp(dstPtr, maxDstSize, hufTable, 255, huffLog, wksp, sizeof(wksp)); if (HUF_isError(hhSize)) { eSize = hhSize; DISPLAYLEVEL(1, "HUF_writeCTable error \n"); @@ -844,7 +831,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset); #else /* at this stage, we don't use the result of "most common first offset", - as the impact of statistics is not properly evaluated */ + * as the impact of statistics is not properly evaluated */ MEM_writeLE32(dstPtr+0, repStartValue[0]); MEM_writeLE32(dstPtr+4, repStartValue[1]); MEM_writeLE32(dstPtr+8, repStartValue[2]); @@ -860,6 +847,17 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, } +/** + * @returns the maximum repcode value + */ +static U32 ZDICT_maxRep(U32 const reps[ZSTD_REP_NUM]) +{ + U32 maxRep = reps[0]; + int r; + for (r = 1; r < ZSTD_REP_NUM; ++r) + maxRep = MAX(maxRep, reps[r]); + return maxRep; +} size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, const void* customDictContent, size_t dictContentSize, @@ -869,13 +867,15 @@ size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, size_t hSize; #define HBUFFSIZE 256 /* should prove large enough for all entropy headers */ BYTE header[HBUFFSIZE]; - int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel; + int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel; U32 const notificationLevel = params.notificationLevel; + /* The final dictionary content must be at least as large as the largest repcode */ + size_t const minContentSize = (size_t)ZDICT_maxRep(repStartValue); + size_t paddingSize; /* check conditions */ DEBUGLOG(4, "ZDICT_finalizeDictionary"); if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall); - if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong); if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall); /* dictionary header */ @@ -899,12 +899,43 @@ size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, hSize += eSize; } - /* copy elements in final buffer ; note : src and dst buffer can overlap */ - if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize; - { size_t const dictSize = hSize + dictContentSize; - char* dictEnd = (char*)dictBuffer + dictSize; - memmove(dictEnd - dictContentSize, customDictContent, dictContentSize); - memcpy(dictBuffer, header, hSize); + /* Shrink the content size if it doesn't fit in the buffer */ + if (hSize + dictContentSize > dictBufferCapacity) { + dictContentSize = dictBufferCapacity - hSize; + } + + /* Pad the dictionary content with zeros if it is too small */ + if (dictContentSize < minContentSize) { + RETURN_ERROR_IF(hSize + minContentSize > dictBufferCapacity, dstSize_tooSmall, + "dictBufferCapacity too small to fit max repcode"); + paddingSize = minContentSize - dictContentSize; + } else { + paddingSize = 0; + } + + { + size_t const dictSize = hSize + paddingSize + dictContentSize; + + /* The dictionary consists of the header, optional padding, and the content. + * The padding comes before the content because the "best" position in the + * dictionary is the last byte. + */ + BYTE* const outDictHeader = (BYTE*)dictBuffer; + BYTE* const outDictPadding = outDictHeader + hSize; + BYTE* const outDictContent = outDictPadding + paddingSize; + + assert(dictSize <= dictBufferCapacity); + assert(outDictContent + dictContentSize == (BYTE*)dictBuffer + dictSize); + + /* First copy the customDictContent into its final location. + * `customDictContent` and `dictBuffer` may overlap, so we must + * do this before any other writes into the output buffer. + * Then copy the header & padding into the output buffer. + */ + memmove(outDictContent, customDictContent, dictContentSize); + memcpy(outDictHeader, header, hSize); + memset(outDictPadding, 0, paddingSize); + return dictSize; } } @@ -915,7 +946,7 @@ static size_t ZDICT_addEntropyTablesFromBuffer_advanced( const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_params_t params) { - int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel; + int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel; U32 const notificationLevel = params.notificationLevel; size_t hSize = 8; @@ -944,16 +975,11 @@ static size_t ZDICT_addEntropyTablesFromBuffer_advanced( return MIN(dictBufferCapacity, hSize+dictContentSize); } -/* Hidden declaration for dbio.c */ -size_t ZDICT_trainFromBuffer_unsafe_legacy( - void* dictBuffer, size_t maxDictSize, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, - ZDICT_legacy_params_t params); /*! ZDICT_trainFromBuffer_unsafe_legacy() : -* Warning : `samplesBuffer` must be followed by noisy guard band. +* Warning : `samplesBuffer` must be followed by noisy guard band !!! * @return : size of dictionary, or an error code which can be tested with ZDICT_isError() */ -size_t ZDICT_trainFromBuffer_unsafe_legacy( +static size_t ZDICT_trainFromBuffer_unsafe_legacy( void* dictBuffer, size_t maxDictSize, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t params) @@ -1090,8 +1116,8 @@ size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, memset(¶ms, 0, sizeof(params)); params.d = 8; params.steps = 4; - /* Default to level 6 since no compression level information is available */ - params.zParams.compressionLevel = 3; + /* Use default level since no compression level information is available */ + params.zParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1) params.zParams.notificationLevel = DEBUGLEVEL; #endif diff --git a/lib/dll/example/Makefile b/lib/dll/example/Makefile index 45d0db3cd54..86cf6906e5c 100644 --- a/lib/dll/example/Makefile +++ b/lib/dll/example/Makefile @@ -1,10 +1,11 @@ # ################################################################ -# Copyright (c) 2016-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the # LICENSE file in the root directory of this source tree) and the GPLv2 (found # in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. # ################################################################ VOID := /dev/null diff --git a/lib/dll/example/README.md b/lib/dll/example/README.md index e231f59c55f..46aec798005 100644 --- a/lib/dll/example/README.md +++ b/lib/dll/example/README.md @@ -1,23 +1,21 @@ -ZSTD Windows binary package -==================================== +# ZSTD Windows binary package -#### The package contents +## The package contents -- `zstd.exe` : Command Line Utility, supporting gzip-like arguments -- `dll\libzstd.dll` : The ZSTD dynamic library (DLL) -- `dll\libzstd.lib` : The import library of the ZSTD dynamic library (DLL) for Visual C++ -- `example\` : The example of usage of the ZSTD library -- `include\` : Header files required by the ZSTD library +- `zstd.exe` : Command Line Utility, supporting gzip-like arguments +- `dll\libzstd.dll` : The ZSTD dynamic library (DLL) +- `dll\libzstd.lib` : The import library of the ZSTD dynamic library (DLL) for Visual C++ +- `example\` : The example of usage of the ZSTD library +- `include\` : Header files required by the ZSTD library - `static\libzstd_static.lib` : The static ZSTD library (LIB) - -#### Usage of Command Line Interface +## Usage of Command Line Interface Command Line Interface (CLI) supports gzip-like arguments. By default CLI takes an input file and compresses it to an output file: -``` + Usage: zstd [arg] [input] [output] -``` + The full list of commands for CLI can be obtained with `-h` or `-H`. The ratio can be improved with commands from `-3` to `-16` but higher levels also have slower compression. CLI includes in-memory compression benchmark module with compression @@ -25,36 +23,32 @@ levels starting from `-b` and ending with `-e` with iteration time of `-i` secon CLI supports aggregation of parameters i.e. `-b1`, `-e18`, and `-i1` can be joined into `-b1e18i1`. - -#### The example of usage of static and dynamic ZSTD libraries with gcc/MinGW +## The example of usage of static and dynamic ZSTD libraries with gcc/MinGW Use `cd example` and `make` to build `fullbench-dll` and `fullbench-lib`. `fullbench-dll` uses a dynamic ZSTD library from the `dll` directory. `fullbench-lib` uses a static ZSTD library from the `lib` directory. - -#### Using ZSTD DLL with gcc/MinGW +## Using ZSTD DLL with gcc/MinGW The header files from `include\` and the dynamic library `dll\libzstd.dll` are required to compile a project using gcc/MinGW. The dynamic library has to be added to linking options. It means that if a project that uses ZSTD consists of a single `test-dll.c` file it should be linked with `dll\libzstd.dll`. For example: -``` + gcc $(CFLAGS) -Iinclude\ test-dll.c -o test-dll dll\libzstd.dll -``` -The compiled executable will require ZSTD DLL which is available at `dll\libzstd.dll`. +The compiled executable will require ZSTD DLL which is available at `dll\libzstd.dll`. -#### The example of usage of static and dynamic ZSTD libraries with Visual C++ +## The example of usage of static and dynamic ZSTD libraries with Visual C++ Open `example\fullbench-dll.sln` to compile `fullbench-dll` that uses a dynamic ZSTD library from the `dll` directory. The solution works with Visual C++ 2010 or newer. When one will open the solution with Visual C++ newer than 2010 -then the solution will upgraded to the current version. - +then the solution will be upgraded to the current version. -#### Using ZSTD DLL with Visual C++ +## Using ZSTD DLL with Visual C++ The header files from `include\` and the import library `dll\libzstd.lib` are required to compile a project using Visual C++. diff --git a/lib/dll/example/build_package.bat b/lib/dll/example/build_package.bat index 8baabc7b2c6..bd3103de5bd 100644 --- a/lib/dll/example/build_package.bat +++ b/lib/dll/example/build_package.bat @@ -1,20 +1,55 @@ -@ECHO OFF -MKDIR bin\dll bin\static bin\example bin\include -COPY tests\fullbench.c bin\example\ -COPY programs\datagen.c bin\example\ -COPY programs\datagen.h bin\example\ -COPY programs\util.h bin\example\ -COPY programs\platform.h bin\example\ -COPY lib\common\mem.h bin\example\ -COPY lib\common\zstd_internal.h bin\example\ -COPY lib\common\error_private.h bin\example\ -COPY lib\common\xxhash.h bin\example\ -COPY lib\libzstd.a bin\static\libzstd_static.lib -COPY lib\dll\libzstd.* bin\dll\ -COPY lib\dll\example\Makefile bin\example\ -COPY lib\dll\example\fullbench-dll.* bin\example\ -COPY lib\dll\example\README.md bin\ -COPY lib\zstd.h bin\include\ -COPY lib\common\zstd_errors.h bin\include\ -COPY lib\dictBuilder\zdict.h bin\include\ -COPY programs\zstd.exe bin\zstd.exe +@echo off +setlocal + +rem Detect build type based on available files +set BUILD_TYPE=make +if exist "build\cmake\build\lib\Release\zstd_static.lib" set BUILD_TYPE=cmake + +echo Detected build type: %BUILD_TYPE% + +rem Create required directories +mkdir bin\dll bin\static bin\example bin\include + +rem Copy common files using a subroutine. Exits immediately on failure. +call :copyFile "tests\fullbench.c" "bin\example\" +call :copyFile "programs\datagen.c" "bin\example\" +call :copyFile "programs\datagen.h" "bin\example\" +call :copyFile "programs\util.h" "bin\example\" +call :copyFile "programs\platform.h" "bin\example\" +call :copyFile "lib\common\mem.h" "bin\example\" +call :copyFile "lib\common\zstd_internal.h" "bin\example\" +call :copyFile "lib\common\error_private.h" "bin\example\" +call :copyFile "lib\common\xxhash.h" "bin\example\" +call :copyFile "lib\dll\example\Makefile" "bin\example\" +call :copyFile "lib\dll\example\fullbench-dll.*" "bin\example\" +call :copyFile "lib\zstd.h" "bin\include\" +call :copyFile "lib\zstd_errors.h" "bin\include\" +call :copyFile "lib\zdict.h" "bin\include\" + +rem Copy build-specific files +if "%BUILD_TYPE%"=="cmake" ( + echo Copying CMake build artifacts... + call :copyFile "build\cmake\build\lib\Release\zstd_static.lib" "bin\static\libzstd_static.lib" + call :copyFile "build\cmake\build\lib\Release\zstd.dll" "bin\dll\libzstd.dll" + call :copyFile "build\cmake\build\lib\Release\zstd.lib" "bin\dll\zstd.lib" + call :copyFile "build\cmake\build\programs\Release\zstd.exe" "bin\zstd.exe" + call :copyFile "lib\dll\example\README.md" "bin\README.md" +) else ( + echo Copying Make build artifacts... + call :copyFile "lib\libzstd.a" "bin\static\libzstd_static.lib" + call :copyFile "lib\dll\libzstd.*" "bin\dll\" + call :copyFile "programs\zstd.exe" "bin\zstd.exe" + call :copyFile "lib\dll\example\README.md" "bin\" +) + +echo Build package created successfully for %BUILD_TYPE% build! +endlocal +exit /b 0 + +:copyFile +copy "%~1" "%~2" +if errorlevel 1 ( + echo Failed to copy "%~1" + exit 1 +) +exit /b \ No newline at end of file diff --git a/lib/install_oses.mk b/lib/install_oses.mk new file mode 100644 index 00000000000..c2cdd1635e5 --- /dev/null +++ b/lib/install_oses.mk @@ -0,0 +1,17 @@ +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ################################################################ + +# This included Makefile provides the following variables : +# UNAME, INSTALL_OS_LIST + +UNAME := $(shell sh -c 'MSYSTEM="MSYS" uname') + +# List of OSes for which target install is supported +INSTALL_OS_LIST ?= Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX MSYS_NT% CYGWIN_NT% diff --git a/lib/legacy/zstd_legacy.h b/lib/legacy/zstd_legacy.h index 0dbd3c7a40f..7a8a04e593c 100644 --- a/lib/legacy/zstd_legacy.h +++ b/lib/legacy/zstd_legacy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -18,9 +18,9 @@ extern "C" { /* ************************************* * Includes ***************************************/ -#include "mem.h" /* MEM_STATIC */ -#include "error_private.h" /* ERROR */ -#include "zstd_internal.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTD_frameSizeInfo */ +#include "../common/mem.h" /* MEM_STATIC */ +#include "../common/error_private.h" /* ERROR */ +#include "../common/zstd_internal.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTD_frameSizeInfo */ #if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0) # undef ZSTD_LEGACY_SUPPORT @@ -124,6 +124,20 @@ MEM_STATIC size_t ZSTD_decompressLegacy( const void* dict,size_t dictSize) { U32 const version = ZSTD_isLegacy(src, compressedSize); + char x; + /* Avoid passing NULL to legacy decoding. */ + if (dst == NULL) { + assert(dstCapacity == 0); + dst = &x; + } + if (src == NULL) { + assert(compressedSize == 0); + src = &x; + } + if (dict == NULL) { + assert(dictSize == 0); + dict = &x; + } (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */ switch(version) { @@ -242,6 +256,13 @@ MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size frameSizeInfo.compressedSize = ERROR(srcSize_wrong); frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; } + /* In all cases, decompressedBound == nbBlocks * ZSTD_BLOCKSIZE_MAX. + * So we can compute nbBlocks without having to change every function. + */ + if (frameSizeInfo.decompressedBound != ZSTD_CONTENTSIZE_ERROR) { + assert((frameSizeInfo.decompressedBound & (ZSTD_BLOCKSIZE_MAX - 1)) == 0); + frameSizeInfo.nbBlocks = (size_t)(frameSizeInfo.decompressedBound / ZSTD_BLOCKSIZE_MAX); + } return frameSizeInfo; } @@ -280,6 +301,12 @@ MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version) MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion, const void* dict, size_t dictSize) { + char x; + /* Avoid passing NULL to legacy decoding. */ + if (dict == NULL) { + assert(dictSize == 0); + dict = &x; + } DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion); if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion); switch(newVersion) @@ -339,6 +366,16 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { + static char x; + /* Avoid passing NULL to legacy decoding. */ + if (output->dst == NULL) { + assert(output->size == 0); + output->dst = &x; + } + if (input->src == NULL) { + assert(input->size == 0); + input->src = &x; + } DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version); switch(version) { diff --git a/lib/legacy/zstd_v01.c b/lib/legacy/zstd_v01.c index ae8cba2a3fa..d3322f93e11 100644 --- a/lib/legacy/zstd_v01.c +++ b/lib/legacy/zstd_v01.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,7 +14,8 @@ ******************************************/ #include /* size_t, ptrdiff_t */ #include "zstd_v01.h" -#include "error_private.h" +#include "../common/compiler.h" +#include "../common/error_private.h" /****************************************** @@ -190,28 +191,6 @@ typedef signed long long S64; /**************************************************************** * Memory I/O *****************************************************************/ -/* FSE_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define FSE_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) -# define FSE_FORCE_MEMORY_ACCESS 1 -# endif -#endif - static unsigned FSE_32bits(void) { @@ -224,24 +203,6 @@ static unsigned FSE_isLittleEndian(void) return one.c[0]; } -#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2) - -static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; } -static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; } -static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; } - -#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -#else - static U16 FSE_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -257,8 +218,6 @@ static U64 FSE_read64(const void* memPtr) U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } -#endif // FSE_FORCE_MEMORY_ACCESS - static U16 FSE_readLE16(const void* memPtr) { if (FSE_isLittleEndian()) @@ -343,10 +302,9 @@ FORCE_INLINE unsigned FSE_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r; - _BitScanReverse ( &r, val ); - return (unsigned) r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */ - return 31 - __builtin_clz (val); + return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; @@ -472,7 +430,7 @@ static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_ma static short FSE_abs(short a) { - return a<0? -a : a; + return a<0? (short)-a : a; } @@ -1078,7 +1036,7 @@ static size_t HUF_decompress_usingDTable( /* -3% slower when non static */ BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const omax = op + maxDstSize; - BYTE* const olimit = omax-15; + BYTE* const olimit = maxDstSize < 15 ? op : omax-15; const void* ptr = DTable; const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1; @@ -1092,7 +1050,7 @@ static size_t HUF_decompress_usingDTable( /* -3% slower when non static */ const size_t length1 = FSE_readLE16(jumpTable); const size_t length2 = FSE_readLE16(jumpTable+1); const size_t length3 = FSE_readLE16(jumpTable+2); - const size_t length4 = cSrcSize - 6 - length1 - length2 - length3; // check coherency !! + const size_t length4 = cSrcSize - 6 - length1 - length2 - length3; /* check coherency !! */ const char* const start1 = (const char*)(cSrc) + 6; const char* const start2 = start1 + length1; const char* const start3 = start2 + length2; @@ -1150,11 +1108,11 @@ static size_t HUF_decompress_usingDTable( /* -3% slower when non static */ /* tail */ { - // bitTail = bitD1; // *much* slower : -20% !??! + /* bitTail = bitD1; */ /* *much* slower : -20% !??! */ FSE_DStream_t bitTail; bitTail.ptr = bitD1.ptr; bitTail.bitsConsumed = bitD1.bitsConsumed; - bitTail.bitContainer = bitD1.bitContainer; // required in case of FSE_DStream_endOfBuffer + bitTail.bitContainer = bitD1.bitContainer; /* required in case of FSE_DStream_endOfBuffer */ bitTail.start = start1; for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op= 199901L /* C99 */ -# include +# if defined(_AIX) +# include +# else +# include /* intptr_t */ +# endif typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; @@ -1421,7 +1383,7 @@ typedef struct { BYTE* matchLength; BYTE* dumpsStart; BYTE* dumps; -} seqStore_t; +} SeqStore_t; typedef struct ZSTD_Cctx_s @@ -1429,7 +1391,7 @@ typedef struct ZSTD_Cctx_s const BYTE* base; U32 current; U32 nextUpdate; - seqStore_t seqStore; + SeqStore_t seqStore; #ifdef __AVX2__ __m256i hashTable[HASH_TABLESIZE>>3]; #else @@ -1483,7 +1445,9 @@ static size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProper static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); - memcpy(dst, src, srcSize); + if (srcSize > 0) { + memcpy(dst, src, srcSize); + } return srcSize; } @@ -1502,7 +1466,7 @@ static size_t ZSTD_decompressLiterals(void* ctx, if (srcSize <= 3) return ERROR(corruption_detected); litSize = ip[1] + (ip[0]<<8); - litSize += ((ip[-3] >> 3) & 7) << 16; // mmmmh.... + litSize += ((ip[-3] >> 3) & 7) << 16; /* mmmmh.... */ op = oend - litSize; (void)ctx; @@ -1541,7 +1505,9 @@ static size_t ZSTDv01_decodeLiteralsBlock(void* ctx, size_t rleSize = litbp.origSize; if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall); if (!srcSize) return ERROR(srcSize_wrong); - memset(oend - rleSize, *ip, rleSize); + if (rleSize > 0) { + memset(oend - rleSize, *ip, rleSize); + } *litStart = oend - rleSize; *litSize = rleSize; ip++; @@ -1755,20 +1721,26 @@ static size_t ZSTD_execSequence(BYTE* op, static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */ const BYTE* const ostart = op; + BYTE* const oLitEnd = op + sequence.litLength; const size_t litLength = sequence.litLength; BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ const BYTE* const litEnd = *litPtr + litLength; - /* check */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); - if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ + if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ /* copy Literals */ - if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8)) - memmove(op, *litPtr, litLength); /* overwrite risk */ - else - ZSTD_wildcopy(op, *litPtr, litLength); + ZSTD_memmove(op, *litPtr, sequence.litLength); /* note : v0.1 seems to allow scenarios where output or input are close to end of buffer */ + op += litLength; *litPtr = litEnd; /* update for next sequence */ @@ -1851,11 +1823,12 @@ static size_t ZSTD_decompressSequences( BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; - size_t errorCode, dumpsLength; + size_t errorCode = 0; + size_t dumpsLength = 0; const BYTE* litPtr = litStart; const BYTE* const litEnd = litStart + litSize; - int nbSeq; - const BYTE* dumps; + int nbSeq = 0; + const BYTE* dumps = NULL; U32* DTableLL = dctx->LLTable; U32* DTableML = dctx->MLTable; U32* DTableOffb = dctx->OffTable; @@ -1901,8 +1874,10 @@ static size_t ZSTD_decompressSequences( { size_t lastLLSize = litEnd - litPtr; if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); - if (op != litPtr) memmove(op, litPtr, lastLLSize); - op += lastLLSize; + if (lastLLSize > 0) { + if (op != litPtr) memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } } } @@ -1941,7 +1916,7 @@ size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const voi size_t remainingSize = srcSize; U32 magicNumber; size_t errorCode=0; - blockProperties_t blockProperties; + blockProperties_t blockProperties = { 0 }; /* Frame Header */ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); @@ -2145,6 +2120,7 @@ size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSi } ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; + if (ZSTDv01_isError(rSize)) return rSize; ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); return rSize; } diff --git a/lib/legacy/zstd_v01.h b/lib/legacy/zstd_v01.h index 245f9dd3146..6ac876954d1 100644 --- a/lib/legacy/zstd_v01.h +++ b/lib/legacy/zstd_v01.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v02.c b/lib/legacy/zstd_v02.c index 793df6024bb..d1e00385a23 100644 --- a/lib/legacy/zstd_v02.c +++ b/lib/legacy/zstd_v02.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,7 +11,8 @@ #include /* size_t, ptrdiff_t */ #include "zstd_v02.h" -#include "error_private.h" +#include "../common/compiler.h" +#include "../common/error_private.h" /****************************************** @@ -28,7 +29,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -71,25 +72,15 @@ extern "C" { #include /* memcpy */ -/****************************************** -* Compiler-specific -******************************************/ -#if defined(__GNUC__) -# define MEM_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif - - /**************************************************************** * Basic Types *****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# include +# if defined(_AIX) +# include +# else +# include /* intptr_t */ +# endif typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; @@ -111,27 +102,6 @@ extern "C" { /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define MEM_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -142,33 +112,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -189,9 +132,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } -#endif // MEM_FORCE_MEMORY_ACCESS - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -268,7 +208,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -349,11 +289,10 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return 31 - __builtin_clz (val); + return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; @@ -433,7 +372,7 @@ MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) } /*! BIT_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -453,7 +392,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) } /*!BIT_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); @@ -510,7 +449,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) Error codes and messages Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -609,7 +548,7 @@ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be mor header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -753,7 +692,7 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -822,7 +761,7 @@ static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, si Header File Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -882,7 +821,7 @@ typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ Header File for static linking only Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -923,7 +862,7 @@ extern "C" { * Streaming functions ***************************************/ -typedef struct ZSTD_DCtx_s ZSTD_DCtx; +typedef struct ZSTDv02_Dctx_s ZSTD_DCtx; /* Use above functions alternatively. @@ -946,7 +885,7 @@ typedef struct ZSTD_DCtx_s ZSTD_DCtx; FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1450,7 +1389,7 @@ static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, siz Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2609,7 +2548,7 @@ static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_ zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2783,7 +2722,7 @@ typedef struct { BYTE* matchLength; BYTE* dumpsStart; BYTE* dumps; -} seqStore_t; +} SeqStore_t; /* ************************************* @@ -2798,7 +2737,7 @@ static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } /* ************************************************************* * Decompression section ***************************************************************/ -struct ZSTD_DCtx_s +struct ZSTDv02_Dctx_s { U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; @@ -2836,7 +2775,9 @@ static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockPropertie static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); - memcpy(dst, src, srcSize); + if (srcSize > 0) { + memcpy(dst, src, srcSize); + } return srcSize; } @@ -2889,6 +2830,7 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx, const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ if (litSize > srcSize-11) /* risk of reading too far with wildcopy */ { + if (litSize > BLOCKSIZE) return ERROR(corruption_detected); if (litSize > srcSize-3) return ERROR(corruption_detected); memcpy(dctx->litBuffer, istart, litSize); dctx->litPtr = dctx->litBuffer; @@ -3111,12 +3053,19 @@ static size_t ZSTD_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; /* checks */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use the pointer check */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ @@ -3228,8 +3177,10 @@ static size_t ZSTD_decompressSequences( size_t lastLLSize = litEnd - litPtr; if (litPtr > litEnd) return ERROR(corruption_detected); if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); - if (op != litPtr) memmove(op, litPtr, lastLLSize); - op += lastLLSize; + if (lastLLSize > 0) { + if (op != litPtr) memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } } } @@ -3467,6 +3418,7 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi } ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; + if (ZSTD_isError(rSize)) return rSize; ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); return rSize; } diff --git a/lib/legacy/zstd_v02.h b/lib/legacy/zstd_v02.h index 9d7d8d9b5bc..dab0260ee9e 100644 --- a/lib/legacy/zstd_v02.h +++ b/lib/legacy/zstd_v02.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v03.c b/lib/legacy/zstd_v03.c index 7a0e7c9b69f..50fa87cf362 100644 --- a/lib/legacy/zstd_v03.c +++ b/lib/legacy/zstd_v03.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,7 +11,8 @@ #include /* size_t, ptrdiff_t */ #include "zstd_v03.h" -#include "error_private.h" +#include "../common/compiler.h" +#include "../common/error_private.h" /****************************************** @@ -29,7 +30,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -72,25 +73,15 @@ extern "C" { #include /* memcpy */ -/****************************************** -* Compiler-specific -******************************************/ -#if defined(__GNUC__) -# define MEM_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif - - /**************************************************************** * Basic Types *****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# include +# if defined(_AIX) +# include +# else +# include /* intptr_t */ +# endif typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; @@ -112,27 +103,6 @@ extern "C" { /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define MEM_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -143,33 +113,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -190,10 +133,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } - -#endif // MEM_FORCE_MEMORY_ACCESS - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -270,7 +209,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -352,11 +291,10 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return 31 - __builtin_clz (val); + return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; @@ -435,7 +373,7 @@ MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) } /*! BIT_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -455,7 +393,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) } /*!BIT_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); @@ -512,7 +450,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) Error codes and messages Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -611,7 +549,7 @@ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be mor header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -755,7 +693,7 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -823,7 +761,7 @@ static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, si Header File Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -883,7 +821,7 @@ typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ Header File for static linking only Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -924,7 +862,7 @@ extern "C" { * Streaming functions ***************************************/ -typedef struct ZSTD_DCtx_s ZSTD_DCtx; +typedef struct ZSTDv03_Dctx_s ZSTD_DCtx; /* Use above functions alternatively. @@ -947,7 +885,7 @@ typedef struct ZSTD_DCtx_s ZSTD_DCtx; FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1173,7 +1111,7 @@ static unsigned FSE_isError(size_t code) { return ERR_isError(code); } ****************************************************************/ static short FSE_abs(short a) { - return a<0 ? -a : a; + return a<0 ? (short)-a : a; } static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, @@ -1451,7 +1389,7 @@ static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, siz Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2248,7 +2186,7 @@ static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_ zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2424,7 +2362,7 @@ typedef struct { BYTE* matchLength; BYTE* dumpsStart; BYTE* dumps; -} seqStore_t; +} SeqStore_t; /* ************************************* @@ -2439,7 +2377,7 @@ static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } /* ************************************************************* * Decompression section ***************************************************************/ -struct ZSTD_DCtx_s +struct ZSTDv03_Dctx_s { U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; @@ -2477,7 +2415,9 @@ static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockPropertie static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); - memcpy(dst, src, srcSize); + if (srcSize > 0) { + memcpy(dst, src, srcSize); + } return srcSize; } @@ -2530,6 +2470,7 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx, const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ if (litSize > srcSize-11) /* risk of reading too far with wildcopy */ { + if (litSize > BLOCKSIZE) return ERROR(corruption_detected); if (litSize > srcSize-3) return ERROR(corruption_detected); memcpy(dctx->litBuffer, istart, litSize); dctx->litPtr = dctx->litBuffer; @@ -2752,18 +2693,24 @@ static size_t ZSTD_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; /* checks */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ /* copy Match */ - { - const BYTE* match = op - sequence.offset; + { const BYTE* match = op - sequence.offset; /* check */ if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */ @@ -2869,8 +2816,10 @@ static size_t ZSTD_decompressSequences( size_t lastLLSize = litEnd - litPtr; if (litPtr > litEnd) return ERROR(corruption_detected); if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); - if (op != litPtr) memmove(op, litPtr, lastLLSize); - op += lastLLSize; + if (lastLLSize > 0) { + if (op != litPtr) memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } } } @@ -3109,6 +3058,7 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi } ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; + if (ZSTD_isError(rSize)) return rSize; ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); return rSize; } diff --git a/lib/legacy/zstd_v03.h b/lib/legacy/zstd_v03.h index efd8c2b9241..9bf3cce6473 100644 --- a/lib/legacy/zstd_v03.h +++ b/lib/legacy/zstd_v03.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v04.c b/lib/legacy/zstd_v04.c index 645a6e313c0..31c2052583d 100644 --- a/lib/legacy/zstd_v04.c +++ b/lib/legacy/zstd_v04.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,7 +16,8 @@ #include /* memcpy */ #include "zstd_v04.h" -#include "error_private.h" +#include "../common/compiler.h" +#include "../common/error_private.h" /* ****************************************************************** @@ -37,22 +38,17 @@ extern "C" { # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif -#if defined(__GNUC__) -# define MEM_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif /**************************************************************** * Basic Types *****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# include +# if defined(_AIX) +# include +# else +# include /* intptr_t */ +# endif typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; @@ -74,7 +70,7 @@ extern "C" { /*-************************************* * Debug ***************************************/ -#include "debug.h" +#include "../common/debug.h" #ifndef assert # define assert(condition) ((void)0) #endif @@ -83,27 +79,6 @@ extern "C" { /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define MEM_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -114,33 +89,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -161,9 +109,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } -#endif // MEM_FORCE_MEMORY_ACCESS - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -541,7 +486,7 @@ If there is an error, the function will return an error code, which can be teste header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -623,11 +568,10 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return 31 - __builtin_clz (val); + return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; @@ -700,7 +644,7 @@ MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) } /*! BIT_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -720,7 +664,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) } /*!BIT_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); @@ -781,7 +725,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -930,7 +874,7 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1150,7 +1094,7 @@ static unsigned FSE_isError(size_t code) { return ERR_isError(code); } ****************************************************************/ static short FSE_abs(short a) { - return a<0 ? -a : a; + return a<0 ? (short)-a : a; } static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, @@ -1436,7 +1380,7 @@ static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, siz header file Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1514,7 +1458,7 @@ static unsigned HUF_isError(size_t code); /* tells if a return value i header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1601,7 +1545,7 @@ static size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2401,7 +2345,7 @@ static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_ zstd - decompression module fo v0.4 legacy format Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2603,7 +2547,9 @@ static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockPropertie static size_t ZSTD_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); - memcpy(dst, src, srcSize); + if (srcSize > 0) { + memcpy(dst, src, srcSize); + } return srcSize; } @@ -2655,6 +2601,7 @@ static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ if (litSize > srcSize-11) /* risk of reading too far with wildcopy */ { + if (litSize > BLOCKSIZE) return ERROR(corruption_detected); if (litSize > srcSize-3) return ERROR(corruption_detected); memcpy(dctx->litBuffer, istart, litSize); dctx->litPtr = dctx->litBuffer; @@ -2873,13 +2820,19 @@ static size_t ZSTD_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ @@ -3007,8 +2960,10 @@ static size_t ZSTD_decompressSequences( size_t lastLLSize = litEnd - litPtr; if (litPtr > litEnd) return ERROR(corruption_detected); if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); - if (op != litPtr) memcpy(op, litPtr, lastLLSize); - op += lastLLSize; + if (lastLLSize > 0) { + if (op != litPtr) memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } } } @@ -3034,9 +2989,12 @@ static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; + size_t litCSize; + + if (srcSize > BLOCKSIZE) return ERROR(corruption_detected); /* Decode literals sub-block */ - size_t litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); + litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; @@ -3252,6 +3210,7 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi } ctx->stage = ZSTDds_decodeBlockHeader; ctx->expected = ZSTD_blockHeaderSize; + if (ZSTD_isError(rSize)) return rSize; ctx->previousDstEnd = (char*)dst + rSize; return rSize; } @@ -3275,7 +3234,7 @@ static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* dict, s Buffered version of Zstd compression library Copyright (C) 2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3403,7 +3362,9 @@ static size_t ZBUFF_decompressWithDictionary(ZBUFF_DCtx* zbc, const void* src, s static size_t ZBUFF_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { size_t length = MIN(maxDstSize, srcSize); - memcpy(dst, src, length); + if (length > 0) { + memcpy(dst, src, length); + } return length; } @@ -3577,8 +3538,8 @@ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDs unsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); } const char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } -size_t ZBUFFv04_recommendedDInSize() { return BLOCKSIZE + 3; } -size_t ZBUFFv04_recommendedDOutSize() { return BLOCKSIZE; } +size_t ZBUFFv04_recommendedDInSize(void) { return BLOCKSIZE + 3; } +size_t ZBUFFv04_recommendedDOutSize(void) { return BLOCKSIZE; } diff --git a/lib/legacy/zstd_v04.h b/lib/legacy/zstd_v04.h index bb5f3b7d0b8..640240d624d 100644 --- a/lib/legacy/zstd_v04.h +++ b/lib/legacy/zstd_v04.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v05.c b/lib/legacy/zstd_v05.c index a7ea6066fb0..e1efca597ce 100644 --- a/lib/legacy/zstd_v05.c +++ b/lib/legacy/zstd_v05.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,7 +11,7 @@ /*- Dependencies -*/ #include "zstd_v05.h" -#include "error_private.h" +#include "../common/error_private.h" /* ****************************************************************** @@ -19,7 +19,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -80,7 +80,11 @@ extern "C" { * Basic Types *****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# include +# if defined(_AIX) +# include +# else +# include /* intptr_t */ +# endif typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; @@ -102,27 +106,6 @@ extern "C" { /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define MEM_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -133,37 +116,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -194,9 +146,6 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -218,11 +167,6 @@ MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) } } -MEM_STATIC U32 MEM_readLE24(const void* memPtr) -{ - return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); -} - MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) @@ -266,7 +210,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) Header File for static linking only Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -290,7 +234,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ #ifndef ZSTD_STATIC_H #define ZSTD_STATIC_H @@ -402,7 +346,7 @@ size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -489,7 +433,7 @@ static const size_t ZSTDv05_frameHeaderSize_min = 5; #define FSEv05_ENCODING_DYNAMIC 3 -#define HufLog 12 +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ @@ -547,7 +491,7 @@ typedef struct { U32 litLengthSum; U32 litSum; U32 offCodeSum; -} seqStore_t; +} SeqStore_t; @@ -557,7 +501,7 @@ typedef struct { header file Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -676,7 +620,7 @@ size_t FSEv05_decompress_usingDTable(void* dst, size_t dstCapacity, const void* header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -757,11 +701,10 @@ MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BITv05_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return 31 - __builtin_clz (val); + return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; @@ -831,7 +774,7 @@ MEM_STATIC size_t BITv05_lookBits(BITv05_DStream_t* bitD, U32 nbBits) } /*! BITv05_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv05_lookBitsFast(BITv05_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -851,7 +794,7 @@ MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits) } /*!BITv05_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits) { size_t value = BITv05_lookBitsFast(bitD, nbBits); @@ -906,7 +849,7 @@ MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* DStream) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1056,7 +999,7 @@ MEM_STATIC unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr) FSEv05 : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1281,7 +1224,7 @@ const char* FSEv05_getErrorName(size_t code) { return ERR_getErrorName(code); } /*-************************************************************** * FSEv05 NCount encoding-decoding ****************************************************************/ -static short FSEv05_abs(short a) { return a<0 ? -a : a; } +static short FSEv05_abs(short a) { return a<0 ? (short)-a : a; } size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, @@ -1542,7 +1485,7 @@ size_t FSEv05_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1615,7 +1558,7 @@ const char* HUFv05_getErrorName(size_t code); /* provides error code string (u header file, for static linking only Copyright (C) 2013-2016, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1707,7 +1650,7 @@ size_t HUFv05_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1809,7 +1752,7 @@ static size_t HUFv05_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; - //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ + /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) { /* special header */ if (iSize >= (242)) { /* RLE */ @@ -1884,7 +1827,7 @@ size_t HUFv05_readDTableX2 (U16* DTable, const void* src, size_t srcSize) HUFv05_DEltX2* const dt = (HUFv05_DEltX2*)dtPtr; HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ - //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ + /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv05_readStats(huffWeight, HUFv05_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUFv05_isError(iSize)) return iSize; @@ -2215,7 +2158,7 @@ size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize) HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(unsigned)); /* if compilation fails here, assertion is false */ if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); - //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ + /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv05_readStats(weightList, HUFv05_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUFv05_isError(iSize)) return iSize; @@ -2544,15 +2487,15 @@ size_t HUFv05_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cS return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); - //return HUFv05_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */ - //return HUFv05_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */ - //return HUFv05_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */ + /* return HUFv05_decompress4X2(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams single-symbol decoding */ + /* return HUFv05_decompress4X4(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams double-symbols decoding */ + /* return HUFv05_decompress4X6(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams quad-symbols decoding */ } /* zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2650,7 +2593,7 @@ struct ZSTDv05_DCtx_s FSEv05_DTable LLTable[FSEv05_DTABLE_SIZE_U32(LLFSEv05Log)]; FSEv05_DTable OffTable[FSEv05_DTABLE_SIZE_U32(OffFSEv05Log)]; FSEv05_DTable MLTable[FSEv05_DTABLE_SIZE_U32(MLFSEv05Log)]; - unsigned hufTableX4[HUFv05_DTABLE_SIZE(HufLog)]; + unsigned hufTableX4[HUFv05_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; const void* previousDstEnd; const void* base; const void* vBase; @@ -2678,7 +2621,7 @@ size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx) dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; - dctx->hufTableX4[0] = HufLog; + dctx->hufTableX4[0] = ZSTD_HUFFDTABLE_CAPACITY_LOG; dctx->flagStaticTables = 0; return 0; } @@ -2834,7 +2777,7 @@ static size_t ZSTDv05_decodeFrameHeader_Part2(ZSTDv05_DCtx* zc, const void* src, static size_t ZSTDv05_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { - const BYTE* const in = (const BYTE* const)src; + const BYTE* const in = (const BYTE*)src; BYTE headerFlags; U32 cSize; @@ -3003,7 +2946,7 @@ static size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t FSEv05_DTable* DTableLL, FSEv05_DTable* DTableML, FSEv05_DTable* DTableOffb, const void* src, size_t srcSize, U32 flagStaticTable) { - const BYTE* const istart = (const BYTE* const)src; + const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* const iend = istart + srcSize; U32 LLtype, Offtype, MLtype; @@ -3158,10 +3101,14 @@ static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState) if (litLength == MaxLL) { const U32 add = *dumps++; if (add < 255) litLength += add; - else if (dumps + 3 <= de) { - litLength = MEM_readLE24(dumps); - if (litLength&1) litLength>>=1, dumps += 3; - else litLength = (U16)(litLength)>>1, dumps += 2; + else if (dumps + 2 <= de) { + litLength = MEM_readLE16(dumps); + dumps += 2; + if ((litLength & 1) && dumps < de) { + litLength += *dumps << 16; + dumps += 1; + } + litLength>>=1; } if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */ } @@ -3191,10 +3138,14 @@ static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState) if (matchLength == MaxML) { const U32 add = dumps>=1, dumps += 3; - else matchLength = (U16)(matchLength)>>1, dumps += 2; + else if (dumps + 2 <= de) { + matchLength = MEM_readLE16(dumps); + dumps += 2; + if ((matchLength & 1) && dumps < de) { + matchLength += *dumps << 16; + dumps += 1; + } + matchLength >>= 1; } if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */ } @@ -3231,13 +3182,19 @@ static size_t ZSTDv05_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTDv05_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTDv05_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ @@ -3303,7 +3260,7 @@ static size_t ZSTDv05_decompressSequences( { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; - BYTE* const ostart = (BYTE* const)dst; + BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; size_t errorCode, dumpsLength=0; @@ -3359,8 +3316,10 @@ static size_t ZSTDv05_decompressSequences( size_t lastLLSize = litEnd - litPtr; if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); - memcpy(op, litPtr, lastLLSize); - op += lastLLSize; + if (lastLLSize > 0) { + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } } return op-ostart; @@ -3414,7 +3373,7 @@ static size_t ZSTDv05_decompress_continueDCtx(ZSTDv05_DCtx* dctx, { const BYTE* ip = (const BYTE*)src; const BYTE* iend = ip + srcSize; - BYTE* const ostart = (BYTE* const)dst; + BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; size_t remainingSize = srcSize; @@ -3641,6 +3600,7 @@ size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSi } dctx->stage = ZSTDv05ds_decodeBlockHeader; dctx->expected = ZSTDv05_blockHeaderSize; + if (ZSTDv05_isError(rSize)) return rSize; dctx->previousDstEnd = (char*)dst + rSize; return rSize; } @@ -3741,7 +3701,7 @@ size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, s Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3788,7 +3748,9 @@ static size_t ZBUFFv05_blockHeaderSize = 3; static size_t ZBUFFv05_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { size_t length = MIN(maxDstSize, srcSize); - memcpy(dst, src, length); + if (length > 0) { + memcpy(dst, src, length); + } return length; } @@ -3925,7 +3887,7 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst *maxDstSizePtr = 0; return headerSize - zbc->hPos; } - // zbc->stage = ZBUFFv05ds_decodeHeader; break; /* useless : stage follows */ + /* zbc->stage = ZBUFFv05ds_decodeHeader; break; */ /* useless : stage follows */ } /* fall-through */ case ZBUFFv05ds_decodeHeader: @@ -3998,7 +3960,7 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst if (!decodedSize) { zbc->stage = ZBUFFv05ds_read; break; } /* this was just a header */ zbc->outEnd = zbc->outStart + decodedSize; zbc->stage = ZBUFFv05ds_flush; - // break; /* ZBUFFv05ds_flush follows */ + /* break; */ /* ZBUFFv05ds_flush follows */ } } /* fall-through */ diff --git a/lib/legacy/zstd_v05.h b/lib/legacy/zstd_v05.h index 4a979854b36..2dcffc92367 100644 --- a/lib/legacy/zstd_v05.h +++ b/lib/legacy/zstd_v05.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,7 @@ extern "C" { * Dependencies ***************************************/ #include /* size_t */ -#include "mem.h" /* U64, U32 */ +#include "../common/mem.h" /* U64, U32 */ /* ************************************* diff --git a/lib/legacy/zstd_v06.c b/lib/legacy/zstd_v06.c index f907a3a7122..fb8c14df914 100644 --- a/lib/legacy/zstd_v06.c +++ b/lib/legacy/zstd_v06.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,7 +14,8 @@ #include /* size_t, ptrdiff_t */ #include /* memcpy */ #include /* malloc, free, qsort */ -#include "error_private.h" +#include "../common/compiler.h" +#include "../common/error_private.h" @@ -23,7 +24,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -67,22 +68,17 @@ extern "C" { # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif -#if defined(__GNUC__) -# define MEM_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif /*-************************************************************** * Basic Types *****************************************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include +# if defined(_AIX) +# include +# else +# include /* intptr_t */ +# endif typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; @@ -104,27 +100,6 @@ extern "C" { /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define MEM_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } @@ -135,33 +110,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -182,9 +130,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } - -#endif /* MEM_FORCE_MEMORY_ACCESS */ - MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -280,7 +225,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) Header File for static linking only Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -304,7 +249,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ #ifndef ZSTDv06_STATIC_H #define ZSTDv06_STATIC_H @@ -411,7 +356,7 @@ ZSTDLIBv06_API size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx, void* dst, siz Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -478,7 +423,7 @@ typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t; #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ -#define HufLog 12 +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 #define IS_HUF 0 #define IS_PCH 1 @@ -607,9 +552,9 @@ typedef struct { U32 cachedLitLength; const BYTE* cachedLiterals; ZSTDv06_stats_t stats; -} seqStore_t; +} SeqStore_t; -void ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq); +void ZSTDv06_seqToCodes(const SeqStore_t* seqStorePtr, size_t const nbSeq); #endif /* ZSTDv06_CCOMMON_H_MODULE */ @@ -618,7 +563,7 @@ void ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq); Public Prototypes declaration Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -766,7 +711,7 @@ If there is an error, the function will return an error code, which can be teste header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -856,11 +801,10 @@ MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BITv06_highbit32 ( U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return 31 - __builtin_clz (val); + return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; @@ -928,7 +872,7 @@ MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuff } /*! BITv06_lookBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv06_lookBitsFast(const BITv06_DStream_t* bitD, U32 nbBits) { U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -948,7 +892,7 @@ MEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, U32 nbBits) } /*! BITv06_readBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits) { size_t const value = BITv06_lookBitsFast(bitD, nbBits); @@ -1002,7 +946,7 @@ MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* DStream) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1210,7 +1154,7 @@ MEM_STATIC BYTE FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStre Common functions of New Generation Entropy library Copyright (C) 2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1258,7 +1202,7 @@ static unsigned HUFv06_isError(size_t code) { return ERR_isError(code); } /*-************************************************************** * FSE NCount encoding-decoding ****************************************************************/ -static short FSEv06_abs(short a) { return a<0 ? -a : a; } +static short FSEv06_abs(short a) { return a<0 ? (short)-a : a; } size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) @@ -1355,7 +1299,7 @@ size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned FSE : Finite State Entropy decoder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1679,7 +1623,7 @@ size_t FSEv06_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1749,7 +1693,7 @@ size_t HUFv06_compressBound(size_t size); /**< maximum compressed size */ header file, for static linking only Copyright (C) 2013-2016, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1862,7 +1806,7 @@ MEM_STATIC size_t HUFv06_readStats(BYTE* huffWeight, size_t hwSize, U32* rankSta if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; - //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ + /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) { /* special header */ if (iSize >= (242)) { /* RLE */ @@ -1931,7 +1875,7 @@ MEM_STATIC size_t HUFv06_readStats(BYTE* huffWeight, size_t hwSize, U32* rankSta Huffman decoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2014,7 +1958,7 @@ size_t HUFv06_readDTableX2 (U16* DTable, const void* src, size_t srcSize) HUFv06_DEltX2* const dt = (HUFv06_DEltX2*)dtPtr; HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ - //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ + /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv06_readStats(huffWeight, HUFv06_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUFv06_isError(iSize)) return iSize; @@ -2340,7 +2284,7 @@ size_t HUFv06_readDTableX4 (U32* DTable, const void* src, size_t srcSize) HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ if (memLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); - //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ + /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv06_readStats(weightList, HUFv06_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUFv06_isError(iSize)) return iSize; @@ -2664,19 +2608,19 @@ size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cS { U32 algoNb = 0; if (Dtime[1] < Dtime[0]) algoNb = 1; - // if (Dtime[2] < Dtime[algoNb]) algoNb = 2; /* current speed of HUFv06_decompress4X6 is not good */ + /* if (Dtime[2] < Dtime[algoNb]) algoNb = 2; */ /* current speed of HUFv06_decompress4X6 is not good */ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); } - //return HUFv06_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */ - //return HUFv06_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */ - //return HUFv06_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */ + /* return HUFv06_decompress4X2(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams single-symbol decoding */ + /* return HUFv06_decompress4X4(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams double-symbols decoding */ + /* return HUFv06_decompress4X6(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams quad-symbols decoding */ } /* Common functions of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2700,7 +2644,7 @@ size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cS OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -2730,7 +2674,7 @@ const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(er zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2754,7 +2698,7 @@ const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(er OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ /* *************************************************************** @@ -2806,7 +2750,7 @@ struct ZSTDv06_DCtx_s FSEv06_DTable LLTable[FSEv06_DTABLE_SIZE_U32(LLFSELog)]; FSEv06_DTable OffTable[FSEv06_DTABLE_SIZE_U32(OffFSELog)]; FSEv06_DTable MLTable[FSEv06_DTABLE_SIZE_U32(MLFSELog)]; - unsigned hufTableX4[HUFv06_DTABLE_SIZE(HufLog)]; + unsigned hufTableX4[HUFv06_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; const void* previousDstEnd; const void* base; const void* vBase; @@ -2834,7 +2778,7 @@ size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx) dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; - dctx->hufTableX4[0] = HufLog; + dctx->hufTableX4[0] = ZSTD_HUFFDTABLE_CAPACITY_LOG; dctx->flagRepeatTable = 0; return 0; } @@ -3025,7 +2969,7 @@ typedef struct * Provides the size of compressed block from block header `src` */ static size_t ZSTDv06_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { - const BYTE* const in = (const BYTE* const)src; + const BYTE* const in = (const BYTE*)src; U32 cSize; if (srcSize < ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong); @@ -3219,7 +3163,7 @@ static size_t ZSTDv06_decodeSeqHeaders(int* nbSeqPtr, FSEv06_DTable* DTableLL, FSEv06_DTable* DTableML, FSEv06_DTable* DTableOffb, U32 flagRepeatTable, const void* src, size_t srcSize) { - const BYTE* const istart = (const BYTE* const)src; + const BYTE* const istart = (const BYTE*)src; const BYTE* const iend = istart + srcSize; const BYTE* ip = istart; @@ -3370,13 +3314,19 @@ static size_t ZSTDv06_execSequence(BYTE* op, const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTDv06_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTDv06_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ @@ -3441,7 +3391,7 @@ static size_t ZSTDv06_decompressSequences( { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; - BYTE* const ostart = (BYTE* const)dst; + BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; @@ -3501,8 +3451,10 @@ static size_t ZSTDv06_decompressSequences( { size_t const lastLLSize = litEnd - litPtr; if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); - memcpy(op, litPtr, lastLLSize); - op += lastLLSize; + if (lastLLSize > 0) { + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } } return op-ostart; @@ -3555,7 +3507,7 @@ static size_t ZSTDv06_decompressFrame(ZSTDv06_DCtx* dctx, { const BYTE* ip = (const BYTE*)src; const BYTE* const iend = ip + srcSize; - BYTE* const ostart = (BYTE* const)dst; + BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; BYTE* const oend = ostart + dstCapacity; size_t remainingSize = srcSize; @@ -3785,6 +3737,7 @@ size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapac } dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTDv06_blockHeaderSize; + if (ZSTDv06_isError(rSize)) return rSize; dctx->previousDstEnd = (char*)dst + rSize; return rSize; } @@ -3887,7 +3840,7 @@ size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, s Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3911,7 +3864,7 @@ size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, s OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -3966,6 +3919,10 @@ ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void) if (zbd==NULL) return NULL; memset(zbd, 0, sizeof(*zbd)); zbd->zd = ZSTDv06_createDCtx(); + if (zbd->zd==NULL) { + ZBUFFv06_freeDCtx(zbd); /* avoid leaking the context */ + return NULL; + } zbd->stage = ZBUFFds_init; return zbd; } @@ -4000,7 +3957,9 @@ size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* zbd) MEM_STATIC size_t ZBUFFv06_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t length = MIN(dstCapacity, srcSize); - memcpy(dst, src, length); + if (length > 0) { + memcpy(dst, src, length); + } return length; } @@ -4031,7 +3990,8 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd, size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */ if (ZSTDv06_isError(hSize)) return hSize; if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ - memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); + if (ip != NULL) + memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); zbd->lhSize += iend-ip; *dstCapacityPtr = 0; return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize; /* remaining header bytes + next block header */ @@ -4109,7 +4069,7 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd, if (!decodedSize) { zbd->stage = ZBUFFds_read; break; } /* this was just a header */ zbd->outEnd = zbd->outStart + decodedSize; zbd->stage = ZBUFFds_flush; - // break; /* ZBUFFds_flush follows */ + /* break; */ /* ZBUFFds_flush follows */ } } /* fall-through */ diff --git a/lib/legacy/zstd_v06.h b/lib/legacy/zstd_v06.h index 07818571dca..633891010d7 100644 --- a/lib/legacy/zstd_v06.h +++ b/lib/legacy/zstd_v06.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v07.c b/lib/legacy/zstd_v07.c index a83ddc9a68b..cdc56288cb2 100644 --- a/lib/legacy/zstd_v07.c +++ b/lib/legacy/zstd_v07.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -17,14 +17,15 @@ #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ #endif -#include "xxhash.h" /* XXH64_* */ +#include "../common/xxhash.h" /* XXH64_* */ #include "zstd_v07.h" #define FSEv07_STATIC_LINKING_ONLY /* FSEv07_MIN_TABLELOG */ #define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */ #define ZSTDv07_STATIC_LINKING_ONLY -#include "error_private.h" +#include "../common/compiler.h" +#include "../common/error_private.h" #ifdef ZSTDv07_STATIC_LINKING_ONLY @@ -184,7 +185,7 @@ ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockS low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -227,22 +228,17 @@ extern "C" { # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif -#if defined(__GNUC__) -# define MEM_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif /*-************************************************************** * Basic Types *****************************************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include +# if defined(_AIX) +# include +# else +# include /* intptr_t */ +# endif typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; @@ -264,27 +260,6 @@ extern "C" { /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define MEM_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } @@ -295,33 +270,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -342,8 +290,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -438,7 +384,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -526,11 +472,10 @@ MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BITv07_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return 31 - __builtin_clz (val); + return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; @@ -596,7 +541,7 @@ MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuff } /*! BITv07_lookBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv07_lookBitsFast(const BITv07_DStream_t* bitD, U32 nbBits) { U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -616,7 +561,7 @@ MEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, U32 nbBits) } /*! BITv07_readBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, U32 nbBits) { size_t const value = BITv07_lookBitsFast(bitD, nbBits); @@ -670,7 +615,7 @@ MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* DStream) Public Prototypes declaration Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -978,7 +923,7 @@ MEM_STATIC BYTE FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStre header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1151,7 +1096,7 @@ size_t HUFv07_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void Common functions of New Generation Entropy library Copyright (C) 2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1314,7 +1259,7 @@ size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; - //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ + /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) { /* special header */ if (iSize >= (242)) { /* RLE */ @@ -1375,7 +1320,7 @@ size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, FSE : Finite State Entropy decoder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1699,7 +1644,7 @@ size_t FSEv07_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t Huffman decoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1784,7 +1729,7 @@ size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSi HUFv07_DEltX2* const dt = (HUFv07_DEltX2*)dtPtr; HUFv07_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUFv07_DTable)); - //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ + /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv07_readStats(huffWeight, HUFv07_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUFv07_isError(iSize)) return iSize; @@ -2148,7 +2093,7 @@ size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSi HUFv07_STATIC_ASSERT(sizeof(HUFv07_DEltX4) == sizeof(HUFv07_DTable)); /* if compilation fails here, assertion is false */ if (maxTableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(tableLog_tooLarge); - //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ + /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv07_readStats(weightList, HUFv07_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUFv07_isError(iSize)) return iSize; @@ -2530,8 +2475,8 @@ size_t HUFv07_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cS return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); } - //return HUFv07_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */ - //return HUFv07_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */ + /* return HUFv07_decompress4X2(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams single-symbol decoding */ + /* return HUFv07_decompress4X4(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams double-symbols decoding */ } size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) @@ -2577,7 +2522,7 @@ size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, Common functions of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2601,7 +2546,7 @@ size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -2647,7 +2592,7 @@ static void ZSTDv07_defaultFreeFunction(void* opaque, void* address) Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2717,7 +2662,7 @@ typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t; #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ -#define HufLog 12 +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 typedef enum { lbt_huffman, lbt_repeat, lbt_raw, lbt_rle } litBlockType_t; #define LONGNBSEQ 0x7F00 @@ -2842,9 +2787,9 @@ typedef struct { U32 cachedLitLength; const BYTE* cachedLiterals; ZSTDv07_stats_t stats; -} seqStore_t; +} SeqStore_t; -void ZSTDv07_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq); +void ZSTDv07_seqToCodes(const SeqStore_t* seqStorePtr, size_t const nbSeq); /* custom memory allocation functions */ static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction, ZSTDv07_defaultFreeFunction, NULL }; @@ -2854,7 +2799,7 @@ static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2878,7 +2823,7 @@ static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ /* *************************************************************** @@ -2931,7 +2876,7 @@ struct ZSTDv07_DCtx_s FSEv07_DTable LLTable[FSEv07_DTABLE_SIZE_U32(LLFSELog)]; FSEv07_DTable OffTable[FSEv07_DTABLE_SIZE_U32(OffFSELog)]; FSEv07_DTable MLTable[FSEv07_DTABLE_SIZE_U32(MLFSELog)]; - HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(HufLog)]; /* can accommodate HUFv07_decompress4X */ + HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; /* can accommodate HUFv07_decompress4X */ const void* previousDstEnd; const void* base; const void* vBase; @@ -2967,7 +2912,7 @@ size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx) dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; - dctx->hufTable[0] = (HUFv07_DTable)((HufLog)*0x1000001); + dctx->hufTable[0] = (HUFv07_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; { int i; for (i=0; irep[i] = repStartValue[i]; } @@ -3254,7 +3199,7 @@ typedef struct * Provides the size of compressed block from block header `src` */ static size_t ZSTDv07_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { - const BYTE* const in = (const BYTE* const)src; + const BYTE* const in = (const BYTE*)src; U32 cSize; if (srcSize < ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong); @@ -3272,7 +3217,9 @@ static size_t ZSTDv07_getcBlockSize(const void* src, size_t srcSize, blockProper static size_t ZSTDv07_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall); - memcpy(dst, src, srcSize); + if (srcSize > 0) { + memcpy(dst, src, srcSize); + } return srcSize; } @@ -3447,7 +3394,7 @@ static size_t ZSTDv07_decodeSeqHeaders(int* nbSeqPtr, FSEv07_DTable* DTableLL, FSEv07_DTable* DTableML, FSEv07_DTable* DTableOffb, U32 flagRepeatTable, const void* src, size_t srcSize) { - const BYTE* const istart = (const BYTE* const)src; + const BYTE* const istart = (const BYTE*)src; const BYTE* const iend = istart + srcSize; const BYTE* ip = istart; @@ -3597,11 +3544,14 @@ size_t ZSTDv07_execSequence(BYTE* op, const BYTE* match = oLitEnd - sequence.offset; /* check */ - if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ - if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + assert(oend >= op); + if (sequence.litLength + WILDCOPY_OVERLENGTH > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequenceLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + assert(litLimit >= *litPtr); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected);; /* copy Literals */ - ZSTDv07_wildcopy(op, *litPtr, sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + ZSTDv07_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ @@ -3615,7 +3565,7 @@ size_t ZSTDv07_execSequence(BYTE* op, return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; + { size_t const length1 = (size_t)(dictEnd - match); memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; @@ -3666,7 +3616,7 @@ static size_t ZSTDv07_decompressSequences( { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; - BYTE* const ostart = (BYTE* const)dst; + BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; @@ -3712,10 +3662,12 @@ static size_t ZSTDv07_decompressSequences( /* last literal segment */ { size_t const lastLLSize = litEnd - litPtr; - //if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */ + /* if (litPtr > litEnd) return ERROR(corruption_detected); */ /* too many literals already used */ if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); - memcpy(op, litPtr, lastLLSize); - op += lastLLSize; + if (lastLLSize > 0) { + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } } return op-ostart; @@ -3776,7 +3728,9 @@ ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockS static size_t ZSTDv07_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length) { if (length > dstCapacity) return ERROR(dstSize_tooSmall); - memset(dst, byte, length); + if (length > 0) { + memset(dst, byte, length); + } return length; } @@ -3789,7 +3743,7 @@ static size_t ZSTDv07_decompressFrame(ZSTDv07_DCtx* dctx, { const BYTE* ip = (const BYTE*)src; const BYTE* const iend = ip + srcSize; - BYTE* const ostart = (BYTE* const)dst; + BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t remainingSize = srcSize; @@ -4053,8 +4007,8 @@ size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapac } dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTDv07_blockHeaderSize; - dctx->previousDstEnd = (char*)dst + rSize; if (ZSTDv07_isError(rSize)) return rSize; + dctx->previousDstEnd = (char*)dst + rSize; if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); return rSize; } @@ -4247,7 +4201,7 @@ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx, Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -4271,7 +4225,7 @@ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx, OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -4378,7 +4332,9 @@ size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* zbd) MEM_STATIC size_t ZBUFFv07_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t const length = MIN(dstCapacity, srcSize); - memcpy(dst, src, length); + if (length > 0) { + memcpy(dst, src, length); + } return length; } @@ -4409,7 +4365,8 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd, if (hSize != 0) { size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */ if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ - memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); + if (ip != NULL) + memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); zbd->lhSize += iend-ip; *dstCapacityPtr = 0; return (hSize - zbd->lhSize) + ZSTDv07_blockHeaderSize; /* remaining header bytes + next block header */ diff --git a/lib/legacy/zstd_v07.h b/lib/legacy/zstd_v07.h index a566c1d102a..1ff39041f88 100644 --- a/lib/legacy/zstd_v07.h +++ b/lib/legacy/zstd_v07.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/libzstd.mk b/lib/libzstd.mk new file mode 100644 index 00000000000..d1744973e2c --- /dev/null +++ b/lib/libzstd.mk @@ -0,0 +1,238 @@ +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ################################################################ + +# This included Makefile provides the following variables : +# LIB_SRCDIR, LIB_BINDIR + +# Ensure the file is not included twice +# Note : must be included after setting the default target +ifndef LIBZSTD_MK_INCLUDED +LIBZSTD_MK_INCLUDED := 1 + +################################################################## +# Input Variables +################################################################## + +# By default, library's directory is same as this included makefile +LIB_SRCDIR ?= $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) +LIB_BINDIR ?= $(LIB_SRCDIR) + +# ZSTD_LIB_MINIFY is a helper variable that +# configures a bunch of other variables to space-optimized defaults. +ZSTD_LIB_MINIFY ?= 0 + +# Legacy support +ifneq ($(ZSTD_LIB_MINIFY), 0) + ZSTD_LEGACY_SUPPORT ?= 0 +else + ZSTD_LEGACY_SUPPORT ?= 5 +endif +ZSTD_LEGACY_MULTITHREADED_API ?= 0 + +# Build size optimizations +ifneq ($(ZSTD_LIB_MINIFY), 0) + HUF_FORCE_DECOMPRESS_X1 ?= 1 + HUF_FORCE_DECOMPRESS_X2 ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 1 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 + ZSTD_NO_INLINE ?= 1 + ZSTD_STRIP_ERROR_STRINGS ?= 1 +else + HUF_FORCE_DECOMPRESS_X1 ?= 0 + HUF_FORCE_DECOMPRESS_X2 ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 + ZSTD_NO_INLINE ?= 0 + ZSTD_STRIP_ERROR_STRINGS ?= 0 +endif + +# Assembly support +ZSTD_NO_ASM ?= 0 + +ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP ?= 0 +ZSTD_LIB_EXCLUDE_COMPRESSORS_GREEDY_AND_UP ?= 0 + +################################################################## +# libzstd helpers +################################################################## + +VOID ?= /dev/null + +# Make 4.3 doesn't support '\#' anymore (https://lwn.net/Articles/810071/) +NUM_SYMBOL := \# + +# define silent mode as default (verbose mode with V=1 or VERBOSE=1) +# Note : must be defined _after_ the default target +$(V)$(VERBOSE).SILENT: + +# When cross-compiling from linux to windows, +# one might need to specify TARGET_SYSTEM as "Windows." +# Building from Fedora fails without it. +# (but Ubuntu and Debian don't need to set anything) +TARGET_SYSTEM ?= $(OS) + +# Version numbers +LIBVER_SRC := $(LIB_SRCDIR)/zstd.h +LIBVER_MAJOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` +LIBVER_MINOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` +LIBVER_PATCH_SCRIPT:=`sed -n '/define ZSTD_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` +LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT) +LIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT)) +LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT)) +LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT)) +LIBVER := $(shell echo $(LIBVER_SCRIPT)) +CCVER := $(shell $(CC) --version) +ZSTD_VERSION?= $(LIBVER) + +ifneq ($(ZSTD_LIB_MINIFY), 0) + HAVE_CC_OZ ?= $(shell echo "" | $(CC) -Oz -x c -c - -o /dev/null 2> /dev/null && echo 1 || echo 0) +ifneq ($(HAVE_CC_OZ), 0) + # Some compilers (clang) support an even more space-optimized setting. + CFLAGS += -Oz +else + CFLAGS += -Os +endif + CFLAGS += -fno-stack-protector -fomit-frame-pointer -fno-ident \ + -DDYNAMIC_BMI2=0 -DNDEBUG +else + CFLAGS ?= -O3 +endif + +DEBUGLEVEL ?= 0 +CPPFLAGS += -DXXH_NAMESPACE=ZSTD_ -DDEBUGLEVEL=$(DEBUGLEVEL) +ifeq ($(TARGET_SYSTEM),Windows_NT) # MinGW assumed + CPPFLAGS += -D__USE_MINGW_ANSI_STDIO # compatibility with %zu formatting +endif +DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ + -Wstrict-prototypes -Wundef -Wpointer-arith \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls -Wmissing-prototypes -Wc++-compat +CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) +ASFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) $(CFLAGS) +LDFLAGS += $(MOREFLAGS) +FLAGS = $(CPPFLAGS) $(CFLAGS) $(ASFLAGS) $(LDFLAGS) + +ifndef ALREADY_APPENDED_NOEXECSTACK +export ALREADY_APPENDED_NOEXECSTACK := 1 +ifeq ($(shell echo "int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }" | $(CC) $(FLAGS) -z noexecstack -x c -Werror - -o $(VOID) 2>$(VOID) && echo 1 || echo 0),1) +LDFLAGS += -z noexecstack +endif +ifeq ($(shell echo | $(CC) $(FLAGS) -Wa,--noexecstack -x assembler -Werror -c - -o $(VOID) 2>$(VOID) && echo 1 || echo 0),1) +CFLAGS += -Wa,--noexecstack +# CFLAGS are also added to ASFLAGS +else ifeq ($(shell echo | $(CC) $(FLAGS) -Qunused-arguments -Wa,--noexecstack -x assembler -Werror -c - -o $(VOID) 2>$(VOID) && echo 1 || echo 0),1) +# See e.g.: https://github.com/android/ndk/issues/171 +CFLAGS += -Qunused-arguments -Wa,--noexecstack +# CFLAGS are also added to ASFLAGS +endif +endif + +ifeq ($(shell echo "int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }" | $(CC) $(FLAGS) -z cet-report=error -x c -Werror - -o $(VOID) 2>$(VOID) && echo 1 || echo 0),1) +LDFLAGS += -z cet-report=error +endif + +HAVE_COLORNEVER = $(shell echo a | grep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) +GREP_OPTIONS ?= +ifeq ($(HAVE_COLORNEVER), 1) + GREP_OPTIONS += --color=never +endif +GREP = grep $(GREP_OPTIONS) + +ZSTD_COMMON_FILES := $(sort $(wildcard $(LIB_SRCDIR)/common/*.c)) +ZSTD_COMPRESS_FILES := $(sort $(wildcard $(LIB_SRCDIR)/compress/*.c)) +ZSTD_DECOMPRESS_FILES := $(sort $(wildcard $(LIB_SRCDIR)/decompress/*.c)) +ZSTD_DICTBUILDER_FILES := $(sort $(wildcard $(LIB_SRCDIR)/dictBuilder/*.c)) +ZSTD_DEPRECATED_FILES := $(sort $(wildcard $(LIB_SRCDIR)/deprecated/*.c)) +ZSTD_LEGACY_FILES := + +ZSTD_DECOMPRESS_AMD64_ASM_FILES := $(sort $(wildcard $(LIB_SRCDIR)/decompress/*_amd64.S)) + +ifneq ($(ZSTD_NO_ASM), 0) + CPPFLAGS += -DZSTD_DISABLE_ASM +else + # Unconditionally add the ASM files they are disabled by + # macros in the .S file. + ZSTD_DECOMPRESS_FILES += $(ZSTD_DECOMPRESS_AMD64_ASM_FILES) +endif + +ifneq ($(HUF_FORCE_DECOMPRESS_X1), 0) + CFLAGS += -DHUF_FORCE_DECOMPRESS_X1 +endif + +ifneq ($(HUF_FORCE_DECOMPRESS_X2), 0) + CFLAGS += -DHUF_FORCE_DECOMPRESS_X2 +endif + +ifneq ($(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT), 0) + CFLAGS += -DZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT +endif + +ifneq ($(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG), 0) + CFLAGS += -DZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG +endif + +ifneq ($(ZSTD_NO_INLINE), 0) + CFLAGS += -DZSTD_NO_INLINE +endif + +ifneq ($(ZSTD_STRIP_ERROR_STRINGS), 0) + CFLAGS += -DZSTD_STRIP_ERROR_STRINGS +endif + +ifneq ($(ZSTD_LEGACY_MULTITHREADED_API), 0) + CFLAGS += -DZSTD_LEGACY_MULTITHREADED_API +endif + +ifneq ($(ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP), 0) + CFLAGS += -DZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +else +ifneq ($(ZSTD_LIB_EXCLUDE_COMPRESSORS_GREEDY_AND_UP), 0) + CFLAGS += -DZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +endif +endif + +ifneq ($(ZSTD_LEGACY_SUPPORT), 0) +ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0) + ZSTD_LEGACY_FILES += $(shell ls $(LIB_SRCDIR)/legacy/*.c | $(GREP) 'v0[$(ZSTD_LEGACY_SUPPORT)-7]') +endif +endif +CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) + +# Include install_oses.mk from the same directory +include $(dir $(lastword $(MAKEFILE_LIST)))/install_oses.mk +LN ?= ln +CP ?= cp -f + +ifndef BUILD_DIR +ifeq ($(UNAME), Darwin) + ifeq ($(shell md5 < /dev/null > /dev/null; echo $$?), 0) + HASH ?= md5 + endif +else ifeq ($(UNAME), NetBSD) + HASH ?= md5 -n +else ifeq ($(UNAME), OpenBSD) + HASH ?= md5 +endif +HASH ?= md5sum + +HASH_DIR = conf_$(shell echo $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(ZSTD_FILES) | $(HASH) | cut -f 1 -d " " ) +HAVE_HASH :=$(shell echo 1 | $(HASH) > /dev/null && echo 1 || echo 0) +ifeq ($(HAVE_HASH),0) + $(info warning : could not find HASH ($(HASH)), needed to differentiate builds using different flags) + BUILD_DIR := obj/generic_noconf +endif +endif # BUILD_DIR + +ZSTD_SUBDIR := $(LIB_SRCDIR)/common $(LIB_SRCDIR)/compress $(LIB_SRCDIR)/decompress $(LIB_SRCDIR)/dictBuilder $(LIB_SRCDIR)/legacy $(LIB_SRCDIR)/deprecated +vpath %.c $(ZSTD_SUBDIR) +vpath %.S $(ZSTD_SUBDIR) + +endif # LIBZSTD_MK_INCLUDED diff --git a/lib/libzstd.pc.in b/lib/libzstd.pc.in index 1d07b91f25a..2220a579733 100644 --- a/lib/libzstd.pc.in +++ b/lib/libzstd.pc.in @@ -1,14 +1,17 @@ # ZSTD - standard compression algorithm -# Copyright (C) 2014-2016, Yann Collet, Facebook -# BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +# Copyright (c) Meta Platforms, Inc. and affiliates. +# BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) prefix=@PREFIX@ -libdir=@LIBDIR@ +exec_prefix=@EXEC_PREFIX@ includedir=@INCLUDEDIR@ +libdir=@LIBDIR@ Name: zstd Description: fast lossless compression algorithm library -URL: http://www.zstd.net/ +URL: https://facebook.github.io/zstd/ Version: @VERSION@ -Libs: -L${libdir} -lzstd -Cflags: -I${includedir} +Libs: -L${libdir} -lzstd @LIBS_MT@ +Libs.private: @LIBS_PRIVATE@ +Cflags: -I${includedir} @LIBS_MT@ +License: BSD-3-Clause OR GPL-2.0-only diff --git a/lib/module.modulemap b/lib/module.modulemap new file mode 100644 index 00000000000..eff98dfacea --- /dev/null +++ b/lib/module.modulemap @@ -0,0 +1,35 @@ +module libzstd [extern_c] { + header "zstd.h" + export * + config_macros [exhaustive] \ + /* zstd.h */ \ + ZSTD_STATIC_LINKING_ONLY, \ + ZSTDLIB_VISIBILITY, \ + ZSTDLIB_VISIBLE, \ + ZSTDLIB_HIDDEN, \ + ZSTD_DLL_EXPORT, \ + ZSTDLIB_STATIC_API, \ + ZSTD_DISABLE_DEPRECATE_WARNINGS, \ + ZSTD_CLEVEL_DEFAULT, \ + /* zdict.h */ \ + ZDICT_STATIC_LINKING_ONLY, \ + ZDICTLIB_VISIBLE, \ + ZDICTLIB_HIDDEN, \ + ZDICTLIB_VISIBILITY, \ + ZDICTLIB_STATIC_API, \ + ZDICT_DISABLE_DEPRECATE_WARNINGS, \ + /* zstd_errors.h */ \ + ZSTDERRORLIB_VISIBLE, \ + ZSTDERRORLIB_HIDDEN, \ + ZSTDERRORLIB_VISIBILITY + + module dictbuilder [extern_c] { + header "zdict.h" + export * + } + + module errors [extern_c] { + header "zstd_errors.h" + export * + } +} diff --git a/lib/dictBuilder/zdict.h b/lib/zdict.h similarity index 52% rename from lib/dictBuilder/zdict.h rename to lib/zdict.h index e22973173cb..599b793013b 100644 --- a/lib/dictBuilder/zdict.h +++ b/lib/zdict.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,34 +8,184 @@ * You may select, at your option, one of the above-listed licenses. */ -#ifndef DICTBUILDER_H_001 -#define DICTBUILDER_H_001 - -#if defined (__cplusplus) -extern "C" { -#endif +#ifndef ZSTD_ZDICT_H +#define ZSTD_ZDICT_H /*====== Dependencies ======*/ #include /* size_t */ +#if defined (__cplusplus) +extern "C" { +#endif /* ===== ZDICTLIB_API : control library symbols visibility ===== */ -#ifndef ZDICTLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#ifndef ZDICTLIB_VISIBLE + /* Backwards compatibility with old macro name */ +# ifdef ZDICTLIB_VISIBILITY +# define ZDICTLIB_VISIBLE ZDICTLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZDICTLIB_VISIBLE __attribute__ ((visibility ("default"))) +# else +# define ZDICTLIB_VISIBLE +# endif +#endif + +#ifndef ZDICTLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZDICTLIB_HIDDEN __attribute__ ((visibility ("hidden"))) # else -# define ZDICTLIB_VISIBILITY +# define ZDICTLIB_HIDDEN # endif #endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY +# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZDICTLIB_API ZDICTLIB_VISIBILITY +# define ZDICTLIB_API ZDICTLIB_VISIBLE #endif +/******************************************************************************* + * Zstd dictionary builder + * + * FAQ + * === + * Why should I use a dictionary? + * ------------------------------ + * + * Zstd can use dictionaries to improve compression ratio of small data. + * Traditionally small files don't compress well because there is very little + * repetition in a single sample, since it is small. But, if you are compressing + * many similar files, like a bunch of JSON records that share the same + * structure, you can train a dictionary on ahead of time on some samples of + * these files. Then, zstd can use the dictionary to find repetitions that are + * present across samples. This can vastly improve compression ratio. + * + * When is a dictionary useful? + * ---------------------------- + * + * Dictionaries are useful when compressing many small files that are similar. + * The larger a file is, the less benefit a dictionary will have. Generally, + * we don't expect dictionary compression to be effective past 100KB. And the + * smaller a file is, the more we would expect the dictionary to help. + * + * How do I use a dictionary? + * -------------------------- + * + * Simply pass the dictionary to the zstd compressor with + * `ZSTD_CCtx_loadDictionary()`. The same dictionary must then be passed to + * the decompressor, using `ZSTD_DCtx_loadDictionary()`. There are other + * more advanced functions that allow selecting some options, see zstd.h for + * complete documentation. + * + * What is a zstd dictionary? + * -------------------------- + * + * A zstd dictionary has two pieces: Its header, and its content. The header + * contains a magic number, the dictionary ID, and entropy tables. These + * entropy tables allow zstd to save on header costs in the compressed file, + * which really matters for small data. The content is just bytes, which are + * repeated content that is common across many samples. + * + * What is a raw content dictionary? + * --------------------------------- + * + * A raw content dictionary is just bytes. It doesn't have a zstd dictionary + * header, a dictionary ID, or entropy tables. Any buffer is a valid raw + * content dictionary. + * + * How do I train a dictionary? + * ---------------------------- + * + * Gather samples from your use case. These samples should be similar to each + * other. If you have several use cases, you could try to train one dictionary + * per use case. + * + * Pass those samples to `ZDICT_trainFromBuffer()` and that will train your + * dictionary. There are a few advanced versions of this function, but this + * is a great starting point. If you want to further tune your dictionary + * you could try `ZDICT_optimizeTrainFromBuffer_cover()`. If that is too slow + * you can try `ZDICT_optimizeTrainFromBuffer_fastCover()`. + * + * If the dictionary training function fails, that is likely because you + * either passed too few samples, or a dictionary would not be effective + * for your data. Look at the messages that the dictionary trainer printed, + * if it doesn't say too few samples, then a dictionary would not be effective. + * + * How large should my dictionary be? + * ---------------------------------- + * + * A reasonable dictionary size, the `dictBufferCapacity`, is about 100KB. + * The zstd CLI defaults to a 110KB dictionary. You likely don't need a + * dictionary larger than that. But, most use cases can get away with a + * smaller dictionary. The advanced dictionary builders can automatically + * shrink the dictionary for you, and select the smallest size that doesn't + * hurt compression ratio too much. See the `shrinkDict` parameter. + * A smaller dictionary can save memory, and potentially speed up + * compression. + * + * How many samples should I provide to the dictionary builder? + * ------------------------------------------------------------ + * + * We generally recommend passing ~100x the size of the dictionary + * in samples. A few thousand should suffice. Having too few samples + * can hurt the dictionaries effectiveness. Having more samples will + * only improve the dictionaries effectiveness. But having too many + * samples can slow down the dictionary builder. + * + * How do I determine if a dictionary will be effective? + * ----------------------------------------------------- + * + * Simply train a dictionary and try it out. You can use zstd's built in + * benchmarking tool to test the dictionary effectiveness. + * + * # Benchmark levels 1-3 without a dictionary + * zstd -b1e3 -r /path/to/my/files + * # Benchmark levels 1-3 with a dictionary + * zstd -b1e3 -r /path/to/my/files -D /path/to/my/dictionary + * + * When should I retrain a dictionary? + * ----------------------------------- + * + * You should retrain a dictionary when its effectiveness drops. Dictionary + * effectiveness drops as the data you are compressing changes. Generally, we do + * expect dictionaries to "decay" over time, as your data changes, but the rate + * at which they decay depends on your use case. Internally, we regularly + * retrain dictionaries, and if the new dictionary performs significantly + * better than the old dictionary, we will ship the new dictionary. + * + * I have a raw content dictionary, how do I turn it into a zstd dictionary? + * ------------------------------------------------------------------------- + * + * If you have a raw content dictionary, e.g. by manually constructing it, or + * using a third-party dictionary builder, you can turn it into a zstd + * dictionary by using `ZDICT_finalizeDictionary()`. You'll also have to + * provide some samples of the data. It will add the zstd header to the + * raw content, which contains a dictionary ID and entropy tables, which + * will improve compression ratio, and allow zstd to write the dictionary ID + * into the frame, if you so choose. + * + * Do I have to use zstd's dictionary builder? + * ------------------------------------------- + * + * No! You can construct dictionary content however you please, it is just + * bytes. It will always be valid as a raw content dictionary. If you want + * a zstd dictionary, which can improve compression ratio, use + * `ZDICT_finalizeDictionary()`. + * + * What is the attack surface of a zstd dictionary? + * ------------------------------------------------ + * + * Zstd is heavily fuzz tested, including loading fuzzed dictionaries, so + * zstd should never crash, or access out-of-bounds memory no matter what + * the dictionary is. However, if an attacker can control the dictionary + * during decompression, they can cause zstd to generate arbitrary bytes, + * just like if they controlled the compressed data. + * + ******************************************************************************/ + /*! ZDICT_trainFromBuffer(): * Train a dictionary from an array of samples. @@ -61,15 +211,89 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCap const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); +typedef struct { + int compressionLevel; /**< optimize for a specific zstd compression level; 0 means default */ + unsigned notificationLevel; /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ + unsigned dictID; /**< force dictID value; 0 means auto mode (32-bits random value) + * NOTE: The zstd format reserves some dictionary IDs for future use. + * You may use them in private settings, but be warned that they + * may be used by zstd in a public dictionary registry in the future. + * These dictionary IDs are: + * - low range : <= 32767 + * - high range : >= (2^31) + */ +} ZDICT_params_t; + +/*! ZDICT_finalizeDictionary(): + * Given a custom content as a basis for dictionary, and a set of samples, + * finalize dictionary by adding headers and statistics according to the zstd + * dictionary format. + * + * Samples must be stored concatenated in a flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each + * sample in order. The samples are used to construct the statistics, so they + * should be representative of what you will compress with this dictionary. + * + * The compression level can be set in `parameters`. You should pass the + * compression level you expect to use in production. The statistics for each + * compression level differ, so tuning the dictionary for the compression level + * can help quite a bit. + * + * You can set an explicit dictionary ID in `parameters`, or allow us to pick + * a random dictionary ID for you, but we can't guarantee no collisions. + * + * The dstDictBuffer and the dictContent may overlap, and the content will be + * appended to the end of the header. If the header + the content doesn't fit in + * maxDictSize the beginning of the content is truncated to make room, since it + * is presumed that the most profitable content is at the end of the dictionary, + * since that is the cheapest to reference. + * + * `maxDictSize` must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN). + * + * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), + * or an error code, which can be tested by ZDICT_isError(). + * Note: ZDICT_finalizeDictionary() will push notifications into stderr if + * instructed to, using notificationLevel>0. + * NOTE: This function currently may fail in several edge cases including: + * * Not enough samples + * * Samples are uncompressible + * * Samples are all exactly the same + */ +ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dstDictBuffer, size_t maxDictSize, + const void* dictContent, size_t dictContentSize, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_params_t parameters); + /*====== Helper functions ======*/ ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */ +ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize); /* returns dict header size; returns a ZSTD error code on failure */ ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_ZDICT_H */ + +#if defined(ZDICT_STATIC_LINKING_ONLY) && !defined(ZSTD_ZDICT_H_STATIC) +#define ZSTD_ZDICT_H_STATIC +#if defined (__cplusplus) +extern "C" { +#endif -#ifdef ZDICT_STATIC_LINKING_ONLY +/* This can be overridden externally to hide static symbols. */ +#ifndef ZDICTLIB_STATIC_API +# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZDICTLIB_STATIC_API __declspec(dllexport) ZDICTLIB_VISIBLE +# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZDICTLIB_STATIC_API __declspec(dllimport) ZDICTLIB_VISIBLE +# else +# define ZDICTLIB_STATIC_API ZDICTLIB_VISIBLE +# endif +#endif /* ==================================================================================== * The definitions in this section are considered experimental. @@ -78,11 +302,9 @@ ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); * Use them only in association with static linking. * ==================================================================================== */ -typedef struct { - int compressionLevel; /* optimize for a specific zstd compression level; 0 means default */ - unsigned notificationLevel; /* Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ - unsigned dictID; /* force dictID value; 0 means auto mode (32-bits random value) */ -} ZDICT_params_t; +#define ZDICT_DICTSIZE_MIN 256 +/* Deprecated: Remove in v1.6.0 */ +#define ZDICT_CONTENTSIZE_MIN 128 /*! ZDICT_cover_params_t: * k and d are the only required parameters. @@ -94,6 +316,8 @@ typedef struct { unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ + unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ + unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ ZDICT_params_t zParams; } ZDICT_cover_params_t; @@ -105,6 +329,9 @@ typedef struct { unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ unsigned accel; /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ + unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ + unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ + ZDICT_params_t zParams; } ZDICT_fastCover_params_t; @@ -122,7 +349,7 @@ typedef struct { * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters); @@ -144,7 +371,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_cover_params_t* parameters); @@ -165,7 +392,7 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters); @@ -188,33 +415,11 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t* parameters); -/*! ZDICT_finalizeDictionary(): - * Given a custom content as a basis for dictionary, and a set of samples, - * finalize dictionary by adding headers and statistics. - * - * Samples must be stored concatenated in a flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample in order. - * - * dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes. - * maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes. - * - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`), - * or an error code, which can be tested by ZDICT_isError(). - * Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0. - * Note 2: dictBuffer and dictContent can overlap - */ -#define ZDICT_CONTENTSIZE_MIN 128 -#define ZDICT_DICTSIZE_MIN 256 -ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, - const void* dictContent, size_t dictContentSize, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, - ZDICT_params_t parameters); - typedef struct { unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */ ZDICT_params_t zParams; @@ -235,43 +440,42 @@ typedef struct { * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( - void *dictBuffer, size_t dictBufferCapacity, - const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_legacy( + void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters); + /* Deprecation warnings */ /* It is generally possible to disable deprecation warnings from compiler, for example with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ #ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS -# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ +# define ZDICT_DEPRECATED(message) /* disable deprecation warnings */ #else # define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API -# elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) +# define ZDICT_DEPRECATED(message) [[deprecated(message)]] +# elif defined(__clang__) || (ZDICT_GCC_VERSION >= 405) +# define ZDICT_DEPRECATED(message) __attribute__((deprecated(message))) # elif (ZDICT_GCC_VERSION >= 301) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) +# define ZDICT_DEPRECATED(message) __attribute__((deprecated)) # elif defined(_MSC_VER) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) +# define ZDICT_DEPRECATED(message) __declspec(deprecated(message)) # else # pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") -# define ZDICT_DEPRECATED(message) ZDICTLIB_API +# define ZDICT_DEPRECATED(message) # endif #endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") +ZDICTLIB_STATIC_API size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); - -#endif /* ZDICT_STATIC_LINKING_ONLY */ - #if defined (__cplusplus) } #endif -#endif /* DICTBUILDER_H_001 */ +#endif /* ZSTD_ZDICT_H_STATIC */ diff --git a/lib/zstd.h b/lib/zstd.h index 0cc72b4e4c1..415474d015e 100644 --- a/lib/zstd.h +++ b/lib/zstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -7,33 +7,73 @@ * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ -#if defined (__cplusplus) -extern "C" { -#endif #ifndef ZSTD_H_235446 #define ZSTD_H_235446 -/* ====== Dependency ======*/ + +/* ====== Dependencies ======*/ #include /* size_t */ +#include "zstd_errors.h" /* list of errors */ +#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) +#include /* INT_MAX */ +#endif /* ZSTD_STATIC_LINKING_ONLY */ + +#if defined (__cplusplus) +extern "C" { +#endif /* ===== ZSTDLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#ifndef ZSTDLIB_VISIBLE + /* Backwards compatibility with old macro name */ +# ifdef ZSTDLIB_VISIBILITY +# define ZSTDLIB_VISIBLE ZSTDLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default"))) # else -# define ZSTDLIB_VISIBILITY +# define ZSTDLIB_VISIBLE # endif #endif + +#ifndef ZSTDLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden"))) +# else +# define ZSTDLIB_HIDDEN +# endif +#endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY +# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZSTDLIB_API ZSTDLIB_VISIBILITY +# define ZSTDLIB_API ZSTDLIB_VISIBLE #endif +/* Deprecation warnings : + * Should these warnings be a problem, it is generally possible to disable them, + * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. + * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS. + */ +#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS +# define ZSTD_DEPRECATED(message) /* disable deprecation warnings */ +#else +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define ZSTD_DEPRECATED(message) [[deprecated(message)]] +# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) || defined(__IAR_SYSTEMS_ICC__) +# define ZSTD_DEPRECATED(message) __attribute__((deprecated(message))) +# elif defined(__GNUC__) && (__GNUC__ >= 3) +# define ZSTD_DEPRECATED(message) __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define ZSTD_DEPRECATED(message) __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler") +# define ZSTD_DEPRECATED(message) +# endif +#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */ + /******************************************************************************* Introduction @@ -70,17 +110,22 @@ extern "C" { /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 -#define ZSTD_VERSION_MINOR 4 -#define ZSTD_VERSION_RELEASE 0 - +#define ZSTD_VERSION_MINOR 5 +#define ZSTD_VERSION_RELEASE 8 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) -ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */ + +/*! ZSTD_versionNumber() : + * Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */ +ZSTDLIB_API unsigned ZSTD_versionNumber(void); #define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE #define ZSTD_QUOTE(str) #str #define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str) #define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION) -ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */ + +/*! ZSTD_versionString() : + * Return runtime library version, like "1.4.5". Requires v1.3.0+. */ +ZSTDLIB_API const char* ZSTD_versionString(void); /* ************************************* * Default constant @@ -103,13 +148,13 @@ ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */ #define ZSTD_BLOCKSIZE_MAX (1<= `ZSTD_compressBound(srcSize)`. + * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + * enough space to successfully compress the data. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, @@ -117,65 +162,106 @@ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, int compressionLevel); /*! ZSTD_decompress() : - * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. - * `dstCapacity` is an upper bound of originalSize to regenerate. - * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. - * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), - * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ + * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. + * Multiple compressed frames can be decompressed at once with this method. + * The result will be the concatenation of all decompressed frames, back to back. + * `dstCapacity` is an upper bound of originalSize to regenerate. + * First frame's decompressed size can be extracted using ZSTD_getFrameContentSize(). + * If maximum upper bound isn't known, prefer using streaming mode to decompress data. + * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), + * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity, const void* src, size_t compressedSize); -/*! ZSTD_getFrameContentSize() : requires v1.3.0+ - * `src` should point to the start of a ZSTD encoded frame. - * `srcSize` must be at least as large as the frame header. - * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. - * @return : - decompressed size of `src` frame content, if known - * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined - * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) - * note 1 : a 0 return value means the frame is valid but "empty". - * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. - * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. - * In which case, it's necessary to use streaming mode to decompress data. - * Optionally, application can rely on some implicit limit, - * as ZSTD_decompress() only needs an upper bound of decompressed size. - * (For example, data could be necessarily cut into blocks <= 16 KB). - * note 3 : decompressed size is always present when compression is completed using single-pass functions, - * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). - * note 4 : decompressed size can be very large (64-bits value), - * potentially larger than what local system can handle as a single memory segment. - * In which case, it's necessary to use streaming mode to decompress data. - * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. - * Always ensure return value fits within application's authorized limits. - * Each application can set its own limits. - * note 6 : This function replaces ZSTD_getDecompressedSize() */ + +/*====== Decompression helper functions ======*/ + +/*! @brief Returns the decompressed content size stored in a ZSTD frame header. + * + * @since v1.3.0 + * + * @param src Pointer to the beginning of a ZSTD encoded frame. + * @param srcSize Size of the buffer pointed to by @p src. It must be at least as large as the frame header. + * Any value greater than or equal to `ZSTD_frameHeaderSize_max` is sufficient. + * @return The decompressed size in bytes when the value is available in the frame header. + * @retval ZSTD_CONTENTSIZE_UNKNOWN The frame does not encode a decompressed size (typical for streaming). + * @retval ZSTD_CONTENTSIZE_ERROR An error occurred (e.g. invalid magic number, @p srcSize too small). + * + * @note The return value is not compatible with `ZSTD_isError()`. + * @note A return value of 0 denotes a valid but empty frame. Skippable frames also report 0. + * @note The decompressed size field is optional. When it is absent (the function returns @c ZSTD_CONTENTSIZE_UNKNOWN), + * the caller must rely on streaming decompression or an application-specific upper bound. `ZSTD_decompress()` + * only requires an upper bound, so applications may enforce their own block limits (for example 16 KB). + * @note The decompressed size is guaranteed to be present when compression was performed with single-pass APIs such as + * `ZSTD_compress()`, `ZSTD_compressCCtx()`, `ZSTD_compress_usingDict()`, or `ZSTD_compress_usingCDict()`. + * @note The decompressed size is a 64-bit value and may exceed the addressable space of the system. Use streaming + * decompression when the value is too large to materialize in contiguous memory. + * @warning When processing untrusted input, validate the returned size against the application's limits; attackers may + * forge an arbitrarily large value. + * @note This function replaces `ZSTD_getDecompressedSize()`. + */ #define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) #define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); -/*! ZSTD_getDecompressedSize() : - * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). +/*! ZSTD_getDecompressedSize() (obsolete): + * This function is now obsolete, in favor of ZSTD_getFrameContentSize(). * Both functions work the same way, but ZSTD_getDecompressedSize() blends * "empty", "unknown" and "error" results to the same return value (0), * while ZSTD_getFrameContentSize() gives them separate return values. * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */ +ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize") ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); -/*! ZSTD_findFrameCompressedSize() : +/*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+ * `src` should point to the start of a ZSTD frame or skippable frame. * `srcSize` must be >= first frame size * @return : the compressed size of the first frame starting at `src`, * suitable to pass as `srcSize` to `ZSTD_decompress` or similar, - * or an error code if input is invalid */ + * or an error code if input is invalid + * Note 1: this method is called _find*() because it's not enough to read the header, + * it may have to scan through the frame's content, to reach its end. + * Note 2: this method also works with Skippable Frames. In which case, + * it returns the size of the complete skippable frame, + * which is always equal to its content size + 8 bytes for headers. */ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize); -/*====== Helper functions ======*/ -#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ -ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ -ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ -ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ -ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */ -ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ +/*====== Compression helper functions ======*/ + +/*! ZSTD_compressBound() : + * maximum compressed size in worst case single-pass scenario. + * When invoking `ZSTD_compress()`, or any other one-pass compression function, + * it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize) + * as it eliminates one potential failure scenario, + * aka not enough room in dst buffer to write the compressed frame. + * Note : ZSTD_compressBound() itself can fail, if @srcSize >= ZSTD_MAX_INPUT_SIZE . + * In which case, ZSTD_compressBound() will return an error code + * which can be tested using ZSTD_isError(). + * + * ZSTD_COMPRESSBOUND() : + * same as ZSTD_compressBound(), but as a macro. + * It can be used to produce constants, which can be useful for static allocation, + * for example to size a static array on stack. + * Will produce constant value 0 if srcSize is too large. + */ +#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U) +#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ +ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ + + +/*====== Error helper functions ======*/ +/* ZSTD_isError() : + * Most ZSTD_* functions returning a size_t value can be tested for error, + * using ZSTD_isError(). + * @return 1 if error, 0 otherwise + */ +ZSTDLIB_API unsigned ZSTD_isError(size_t result); /*!< tells if a `size_t` function result is an error code */ +ZSTDLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); /* convert a result into an error code, which can be compared to error enum list */ +ZSTDLIB_API const char* ZSTD_getErrorName(size_t result); /*!< provides readable string from a function result */ +ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ +ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ +ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */ /*************************************** @@ -183,17 +269,26 @@ ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compres ***************************************/ /*= Compression context * When compressing many times, - * it is recommended to allocate a context just once, and re-use it for each successive compression operation. - * This will make workload friendlier for system's memory. - * Use one context per thread for parallel execution in multi-threaded environments. */ + * it is recommended to allocate a compression context just once, + * and reuse it for each successive compression operation. + * This will make the workload easier for system's memory. + * Note : re-using context is just a speed / resource optimization. + * It doesn't change the compression ratio, which remains identical. + * Note 2: For parallel execution in multi-threaded environments, + * use one different context per thread . + */ typedef struct ZSTD_CCtx_s ZSTD_CCtx; ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void); -ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); +ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* compatible with NULL pointer */ /*! ZSTD_compressCCtx() : - * Same as ZSTD_compress(), using an explicit ZSTD_CCtx - * The function will compress at requested compression level, - * ignoring any other parameter */ + * Same as ZSTD_compress(), using an explicit ZSTD_CCtx. + * Important : in order to mirror `ZSTD_compress()` behavior, + * this function compresses at the requested compression level, + * __ignoring any other advanced parameter__ . + * If any advanced parameter was set using the advanced API, + * they will all be reset. Only @compressionLevel remains. + */ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, @@ -202,38 +297,38 @@ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, /*= Decompression context * When decompressing many times, * it is recommended to allocate a context only once, - * and re-use it for each successive compression operation. + * and reuse it for each successive compression operation. * This will make workload friendlier for system's memory. * Use one context per thread for parallel execution. */ typedef struct ZSTD_DCtx_s ZSTD_DCtx; ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void); -ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); +ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer */ /*! ZSTD_decompressDCtx() : * Same as ZSTD_decompress(), * requires an allocated ZSTD_DCtx. - * Compatible with sticky parameters. + * Compatible with sticky parameters (see below). */ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -/*************************************** -* Advanced compression API -***************************************/ +/********************************************* +* Advanced compression API (Requires v1.4.0+) +**********************************************/ /* API design : * Parameters are pushed one by one into an existing context, * using ZSTD_CCtx_set*() functions. * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame. * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` ! - * They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx() + * __They do not apply to one-shot variants such as ZSTD_compressCCtx()__ . * * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset(). * - * This API supercedes all other "advanced" API entry points in the experimental section. - * In the future, we expect to remove from experimental API entry points which are redundant with this API. + * This API supersedes all other "advanced" API entry points in the experimental section. + * In the future, we expect to remove API entry points from experimental which are redundant with this API. */ @@ -251,23 +346,33 @@ typedef enum { ZSTD_fast=1, Only the order (from fast to strong) is guaranteed */ } ZSTD_strategy; - typedef enum { /* compression parameters * Note: When compressing with a ZSTD_CDict these parameters are superseded - * by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict() - * for more info (superseded-by-cdict). */ - ZSTD_c_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table + * by the parameters used to construct the ZSTD_CDict. + * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */ + ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table. + * Note that exact compression parameters are dynamically determined, + * depending on both compression level and srcSize (when known). * Default level is ZSTD_CLEVEL_DEFAULT==3. * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT. * Note 1 : it's possible to pass a negative compression level. - * Note 2 : setting a level sets all default values of other compression parameters */ + * Note 2 : setting a level does not automatically set all other compression parameters + * to default. Setting this will however eventually dynamically impact the compression + * parameters which have not been manually set. The manually set + * ones will 'stick'. */ + /* Advanced compression parameters : + * It's possible to pin down compression parameters to some specific values. + * In which case, these values are no longer dynamically selected by the compressor */ ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2. + * This will set a memory budget for streaming decompression, + * with larger values requiring more memory + * and typically compressing more. * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX. * Special: value 0 means "use default windowLog". * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT - * requires explicitly allowing such window size at decompression stage if using streaming. */ + * requires explicitly allowing such size at streaming decompression stage. */ ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2. * Resulting memory usage is (1 << (hashLog+2)). * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX. @@ -278,13 +383,13 @@ typedef enum { * Resulting memory usage is (1 << (chainLog+2)). * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX. * Larger tables result in better and slower compression. - * This parameter is useless when using "fast" strategy. + * This parameter is useless for "fast" strategy. * It's still useful when using "dfast" strategy, * in which case it defines a secondary probe table. * Special: value 0 means "use default chainLog". */ ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2. * More attempts result in better and slower compression. - * This parameter is useless when using "fast" and "dFast" strategies. + * This parameter is useless for "fast" and "dFast" strategies. * Special: value 0 means "use default searchLog". */ ZSTD_c_minMatch=105, /* Minimum size of searched matches. * Note that Zstandard can still find matches of smaller size, @@ -307,13 +412,27 @@ typedef enum { * resulting in stronger and slower compression. * Special: value 0 means "use default strategy". */ + ZSTD_c_targetCBlockSize=130, /* v1.5.6+ + * Attempts to fit compressed block size into approximately targetCBlockSize. + * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX. + * Note that it's not a guarantee, just a convergence target (default:0). + * No target when targetCBlockSize == 0. + * This is helpful in low bandwidth streaming environments to improve end-to-end latency, + * when a client can make use of partial documents (a prominent example being Chrome). + * Note: this parameter is stable since v1.5.6. + * It was present as an experimental parameter in earlier versions, + * but it's not recommended using it with earlier library versions + * due to massive performance regressions. + */ /* LDM mode parameters */ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching. * This parameter is designed to improve compression ratio * for large inputs, by finding large matches at long distance. * It increases memory usage and window size. * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB - * except when expressly set to a different value. */ + * except when expressly set to a different value. + * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and + * compression strategy >= ZSTD_btopt (== compression level 16+) */ ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2. * Larger values increase memory usage and compression ratio, * but decrease compression speed. @@ -339,26 +458,30 @@ typedef enum { ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1) * Content size must be known at the beginning of compression. * This is automatically the case when using ZSTD_compress2(), - * For streaming variants, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */ + * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */ ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */ ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */ /* multi-threading parameters */ - /* These parameters are only useful if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD). - * They return an error otherwise. */ + /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD). + * Otherwise, trying to set any other value than default (0) will be a no-op and return an error. + * In a situation where it's unknown if the linked library supports multi-threading or not, + * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property. + */ ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel. - * When nbWorkers >= 1, triggers asynchronous mode when used with ZSTD_compressStream*() : + * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() : * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller, - * while compression work is performed in parallel, within worker threads. + * while compression is performed in parallel, within worker thread(s). * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end : * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call). * More workers improve speed, but also increase memory usage. - * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */ + * Default value is `0`, aka "single-threaded mode" : no worker is spawned, + * compression is performed inside Caller's thread, and all invocations are blocking */ ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1. * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads. * 0 means default, which is dynamically determined based on compression parameters. - * Job size must be a minimum of overlap size, or 1 MB, whichever is largest. - * The minimum size is automatically and transparently enforced */ + * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest. + * The minimum size is automatically and transparently enforced. */ ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size. * The overlap size is an amount of data reloaded from previous job at the beginning of a new job. * It helps preserve compression ratio, while each job is compressed in parallel. @@ -380,6 +503,18 @@ typedef enum { * ZSTD_c_forceMaxWindow * ZSTD_c_forceAttachDict * ZSTD_c_literalCompressionMode + * ZSTD_c_srcSizeHint + * ZSTD_c_enableDedicatedDictSearch + * ZSTD_c_stableInBuffer + * ZSTD_c_stableOutBuffer + * ZSTD_c_blockDelimiters + * ZSTD_c_validateSequences + * ZSTD_c_blockSplitterLevel + * ZSTD_c_splitAfterSequences + * ZSTD_c_useRowMatchFinder + * ZSTD_c_prefetchCDictTables + * ZSTD_c_enableSeqProducerFallback + * ZSTD_c_maxBlockSize * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; * also, the enums values themselves are unstable and can still change. @@ -389,6 +524,21 @@ typedef enum { ZSTD_c_experimentalParam3=1000, ZSTD_c_experimentalParam4=1001, ZSTD_c_experimentalParam5=1002, + /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */ + ZSTD_c_experimentalParam7=1004, + ZSTD_c_experimentalParam8=1005, + ZSTD_c_experimentalParam9=1006, + ZSTD_c_experimentalParam10=1007, + ZSTD_c_experimentalParam11=1008, + ZSTD_c_experimentalParam12=1009, + ZSTD_c_experimentalParam13=1010, + ZSTD_c_experimentalParam14=1011, + ZSTD_c_experimentalParam15=1012, + ZSTD_c_experimentalParam16=1013, + ZSTD_c_experimentalParam17=1014, + ZSTD_c_experimentalParam18=1015, + ZSTD_c_experimentalParam19=1016, + ZSTD_c_experimentalParam20=1017 } ZSTD_cParameter; typedef struct { @@ -451,7 +601,7 @@ typedef enum { * They will be used to compress next frame. * Resetting session never fails. * - The parameters : changes all parameters back to "default". - * This removes any reference to any dictionary too. + * This also removes any reference to any dictionary or external sequence producer. * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing) * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError()) * - Both : similar to resetting the session, followed by resetting parameters. @@ -460,11 +610,13 @@ ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset); /*! ZSTD_compress2() : * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. + * (note that this entry point doesn't even expose a compression level parameter). * ZSTD_compress2() always starts a new frame. * Should cctx hold data from a previously unfinished frame, everything about it is forgotten. * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() * - The function is always blocking, returns when compression is completed. - * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + * enough space to successfully compress the data, though it is possible it fails for other reasons. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ @@ -473,9 +625,9 @@ ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx, const void* src, size_t srcSize); -/*************************************** -* Advanced decompression API -***************************************/ +/*********************************************** +* Advanced decompression API (Requires v1.4.0+) +************************************************/ /* The advanced API pushes parameters one by one into an existing DCtx context. * Parameters are sticky, and remain valid for all following frames @@ -497,11 +649,21 @@ typedef enum { /* note : additional experimental parameters are also available * within the experimental section of the API. * At the time of this writing, they include : - * ZSTD_c_format + * ZSTD_d_format + * ZSTD_d_stableOutBuffer + * ZSTD_d_forceIgnoreChecksum + * ZSTD_d_refMultipleDDicts + * ZSTD_d_disableHuffmanAssembly + * ZSTD_d_maxBlockSize * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly */ - ZSTD_d_experimentalParam1=1000 + ZSTD_d_experimentalParam1=1000, + ZSTD_d_experimentalParam2=1001, + ZSTD_d_experimentalParam3=1002, + ZSTD_d_experimentalParam4=1003, + ZSTD_d_experimentalParam5=1004, + ZSTD_d_experimentalParam6=1005 } ZSTD_dParameter; @@ -556,14 +718,14 @@ typedef struct ZSTD_outBuffer_s { * A ZSTD_CStream object is required to track streaming operation. * Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources. * ZSTD_CStream objects can be reused multiple times on consecutive compression operations. -* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory. +* It is recommended to reuse ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory. * * For parallel execution, use one separate ZSTD_CStream per thread. * * note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing. * * Parameters are sticky : when starting a new compression on the same context, -* it will re-use the same sticky parameters as previous compression session. +* it will reuse the same sticky parameters as previous compression session. * When in doubt, it's recommended to fully initialize the context before usage. * Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(), * ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to @@ -615,7 +777,7 @@ typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */ /*===== ZSTD_CStream management functions =====*/ ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void); -ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); +ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); /* accept NULL pointer */ /*===== Streaming compression functions =====*/ typedef enum { @@ -631,14 +793,15 @@ typedef enum { : note : multithreaded compression will block to flush as much output as possible. */ } ZSTD_EndDirective; -/*! ZSTD_compressStream2() : +/*! ZSTD_compressStream2() : Requires v1.4.0+ * Behaves about the same as ZSTD_compressStream, with additional control on end directive. * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode) * - output->pos must be <= dstCapacity, input->pos must be <= srcSize * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit. + * - endOp must be a valid directive * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller. - * - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available, + * - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available, * and then immediately returns, just indicating that there is some data remaining to be flushed. * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte. * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking. @@ -651,6 +814,11 @@ typedef enum { * only ZSTD_e_end or ZSTD_e_flush operations are allowed. * Before starting a new compression job, or changing compression parameters, * it is required to fully flush internal buffers. + * - note: if an operation ends with an error, it may leave @cctx in an undefined state. + * Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state. + * In order to be re-employed after an error, a state must be reset, + * which can be done explicitly (ZSTD_CCtx_reset()), + * or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx()) */ ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, @@ -676,11 +844,9 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output /* ***************************************************************************** - * This following is a legacy streaming API. + * This following is a legacy streaming API, available since v1.0+ . * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2(). * It is redundant, but remains fully supported. - * Advanced parameters and dictionary compression can only be used through the - * new API. ******************************************************************************/ /*! @@ -689,6 +855,9 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + * + * Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API + * to compress with a dictionary. */ ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); /*! @@ -709,7 +878,7 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); * * A ZSTD_DStream object is required to track streaming operations. * Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources. -* ZSTD_DStream objects can be re-used multiple times. +* ZSTD_DStream objects can be re-employed multiple times. * * Use ZSTD_initDStream() to start a new decompression operation. * @return : recommended first input size @@ -719,33 +888,63 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); * The function will update both `pos` fields. * If `input.pos < input.size`, some input has not been consumed. * It's up to the caller to present again remaining data. +* * The function tries to flush all data decoded immediately, respecting output buffer size. * If `output.pos < output.size`, decoder has flushed everything it could. -* But if `output.pos == output.size`, there might be some data left within internal buffers., +* +* However, when `output.pos == output.size`, it's more difficult to know. +* If @return > 0, the frame is not complete, meaning +* either there is still some data left to flush within internal buffers, +* or there is more input to read to complete the frame (or both). * In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer. * Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX. * @return : 0 when a frame is completely decoded and fully flushed, * or an error code, which can be tested using ZSTD_isError(), * or any other value > 0, which means there is still some decoding or flushing to do to complete current frame : * the return value is a suggested next input size (just a hint for better latency) -* that will never request more than the remaining frame size. +* that will never request more than the remaining content of the compressed frame. * *******************************************************************************/ typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */ /* For compatibility with versions <= v1.2.0, prefer differentiating them. */ /*===== ZSTD_DStream management functions =====*/ ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void); -ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); +ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer */ /*===== Streaming decompression functions =====*/ -/* This function is redundant with the advanced API and equivalent to: +/*! ZSTD_initDStream() : + * Initialize/reset DStream state for new decompression operation. + * Call before new decompression operation using same DStream. * - * ZSTD_DCtx_reset(zds); + * Note : This function is redundant with the advanced API and equivalent to: + * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * ZSTD_DCtx_refDDict(zds, NULL); */ ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds); +/*! ZSTD_decompressStream() : + * Streaming decompression function. + * Call repetitively to consume full input updating it as necessary. + * Function will update both input and output `pos` fields exposing current state via these fields: + * - `input.pos < input.size`, some input remaining and caller should provide remaining input + * on the next call. + * - `output.pos < output.size`, decoder flushed internal output buffer. + * - `output.pos == output.size`, unflushed data potentially present in the internal buffers, + * check ZSTD_decompressStream() @return value, + * if > 0, invoke it again to flush remaining data to output. + * Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. + * + * @return : 0 when a frame is completely decoded and fully flushed, + * or an error code, which can be tested using ZSTD_isError(), + * or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + * + * Note: when an operation returns with an error code, the @zds state may be left in undefined state. + * It's UB to invoke `ZSTD_decompressStream()` on such a state. + * In order to re-use such a state, it must be first reset, + * which can be done explicitly (`ZSTD_DCtx_reset()`), + * or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`) + */ ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */ @@ -758,7 +957,7 @@ ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output /*! ZSTD_compress_usingDict() : * Compression at an explicit compression level using a Dictionary. * A dictionary can be any arbitrary data segment (also called a prefix), - * or a buffer with specified information (see dictBuilder/zdict.h). + * or a buffer with specified information (see zdict.h). * Note : This function loads the dictionary, resulting in significant startup delay. * It's intended for a dictionary used only once. * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */ @@ -786,17 +985,23 @@ ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, typedef struct ZSTD_CDict_s ZSTD_CDict; /*! ZSTD_createCDict() : - * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once. - * ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost. + * When compressing multiple messages or blocks using the same dictionary, + * it's recommended to digest the dictionary only once, since it's a costly operation. + * ZSTD_createCDict() will create a state from digesting a dictionary. + * The resulting state can be used for future compression operations with very limited startup cost. * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - * `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict. - * Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content. - * Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */ + * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. + * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content. + * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, + * in which case the only thing that it transports is the @compressionLevel. + * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, + * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, int compressionLevel); /*! ZSTD_freeCDict() : - * Function frees memory allocated by ZSTD_createCDict(). */ + * Function frees memory allocated by ZSTD_createCDict(). + * If a NULL pointer is passed, no operation is performed. */ ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict); /*! ZSTD_compress_usingCDict() : @@ -818,7 +1023,8 @@ typedef struct ZSTD_DDict_s ZSTD_DDict; ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize); /*! ZSTD_freeDDict() : - * Function frees memory allocated with ZSTD_createDDict() */ + * Function frees memory allocated with ZSTD_createDDict() + * If a NULL pointer is passed, no operation is performed. */ ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict); /*! ZSTD_decompress_usingDDict() : @@ -834,24 +1040,30 @@ ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, * Dictionary helper functions *******************************/ -/*! ZSTD_getDictID_fromDict() : +/*! ZSTD_getDictID_fromDict() : Requires v1.4.0+ * Provides the dictID stored within dictionary. * if @return == 0, the dictionary is not conformant with Zstandard specification. * It can still be loaded, but as a content-only dictionary. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize); -/*! ZSTD_getDictID_fromDDict() : +/*! ZSTD_getDictID_fromCDict() : Requires v1.5.0+ + * Provides the dictID of the dictionary loaded into `cdict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ +ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict); + +/*! ZSTD_getDictID_fromDDict() : Requires v1.4.0+ * Provides the dictID of the dictionary loaded into `ddict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); -/*! ZSTD_getDictID_fromFrame() : +/*! ZSTD_getDictID_fromFrame() : Requires v1.4.0+ * Provides the dictID required to decompressed the frame stored within `src`. * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : * - The frame does not require a dictionary to be decoded (most common case). - * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. + * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). * - This is not a Zstandard frame. @@ -860,23 +1072,26 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); /******************************************************************************* - * Advanced dictionary and prefix API + * Advanced dictionary and prefix API (Requires v1.4.0+) * * This API allows dictionaries to be used with ZSTD_compress2(), - * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and - * only reset with the context is reset with ZSTD_reset_parameters or - * ZSTD_reset_session_and_parameters. Prefixes are single-use. + * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). + * Dictionaries are sticky, they remain valid when same context is reused, + * they only reset when the context is reset + * with ZSTD_reset_parameters or ZSTD_reset_session_and_parameters. + * In contrast, Prefixes are single-use. ******************************************************************************/ -/*! ZSTD_CCtx_loadDictionary() : +/*! ZSTD_CCtx_loadDictionary() : Requires v1.4.0+ * Create an internal CDict from `dict` buffer. * Decompression will have to use same dictionary. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, * meaning "return to no-dictionary mode". - * Note 1 : Dictionary is sticky, it will be used for all future compressed frames. - * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). + * Note 1 : Dictionary is sticky, it will be used for all future compressed frames, + * until parameters are reset, a new dictionary is loaded, or the dictionary + * is explicitly invalidated by loading a NULL dictionary. * Note 2 : Loading a dictionary involves building tables. * It's also a CPU consuming operation, with non-negligible impact on latency. * Tables are dependent on compression parameters, and for this reason, @@ -885,14 +1100,18 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. * In such a case, dictionary buffer must outlive its users. * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() - * to precisely select how dictionary content must be interpreted. */ + * to precisely select how dictionary content must be interpreted. + * Note 5 : This method does not benefit from LDM (long distance mode). + * If you want to employ LDM on some large dictionary content, + * prefer employing ZSTD_CCtx_refPrefix() described below. + */ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); -/*! ZSTD_CCtx_refCDict() : - * Reference a prepared dictionary, to be used for all next compressed frames. +/*! ZSTD_CCtx_refCDict() : Requires v1.4.0+ + * Reference a prepared dictionary, to be used for all future compressed frames. * Note that compression parameters are enforced from within CDict, * and supersede any compression parameter previously set within CCtx. - * The parameters ignored are labled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. + * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. * The dictionary will remain valid for future compressed frames using same CCtx. * @result : 0, or an error code (which can be tested with ZSTD_isError()). @@ -902,12 +1121,13 @@ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, s * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); -/*! ZSTD_CCtx_refPrefix() : +/*! ZSTD_CCtx_refPrefix() : Requires v1.4.0+ * Reference a prefix (single-usage dictionary) for next compressed frame. * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end). * Decompression will need same prefix to properly regenerate data. * Compressing with a prefix is similar in outcome as performing a diff and compressing it, * but performs much faster, especially during decompression (compression speed is tunable with compression level). + * This method is compatible with LDM (long distance mode). * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary * Note 1 : Prefix buffer is referenced. It **must** outlive compression. @@ -918,15 +1138,15 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters. * It's a CPU consuming operation, with non-negligible impact on latency. * If there is a need to use the same prefix multiple times, consider loadDictionary instead. - * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent). + * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize); -/*! ZSTD_DCtx_loadDictionary() : - * Create an internal DDict from dict buffer, - * to be used to decompress next frames. - * The dictionary remains valid for all future frames, until explicitly invalidated. +/*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ + * Create an internal DDict from dict buffer, to be used to decompress all future frames. + * The dictionary remains valid for all future frames, until explicitly invalidated, or + * a new dictionary is loaded. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, * meaning "return to no-dictionary mode". @@ -940,18 +1160,26 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, */ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); -/*! ZSTD_DCtx_refDDict() : +/*! ZSTD_DCtx_refDDict() : Requires v1.4.0+ * Reference a prepared dictionary, to be used to decompress next frames. * The dictionary remains active for decompression of future frames using same DCtx. + * + * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function + * will store the DDict references in a table, and the DDict used for decompression + * will be determined at decompression time, as per the dict ID in the frame. + * The memory for the table is allocated on the first call to refDDict, and can be + * freed with ZSTD_freeDCtx(). + * + * If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary + * will be managed, and referencing a dictionary effectively "discards" any previous one. + * * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : Currently, only one dictionary can be managed. - * Referencing a new dictionary effectively "discards" any previous one. * Special: referencing a NULL DDict means "return to no-dictionary mode". * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. */ ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); -/*! ZSTD_DCtx_refPrefix() : +/*! ZSTD_DCtx_refPrefix() : Requires v1.4.0+ * Reference a prefix (single-usage dictionary) to decompress next frame. * This is the reverse operation of ZSTD_CCtx_refPrefix(), * and must use the same prefix as the one used during compression. @@ -962,7 +1190,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); * Note 2 : Prefix buffer is referenced. It **must** outlive decompression. * Prefix buffer must remain unmodified up to the end of frame, * reached when ZSTD_decompressStream() returns 0. - * Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent). + * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent). * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section) * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. * A full dictionary is more costly, as it requires building tables. @@ -972,7 +1200,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, /* === Memory management === */ -/*! ZSTD_sizeof_*() : +/*! ZSTD_sizeof_*() : Requires v1.4.0+ * These functions give the _current_ memory usage of selected object. * Note that object memory usage can evolve (increase or decrease) over time. */ ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx); @@ -982,6 +1210,10 @@ ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds); ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict); ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); +#if defined (__cplusplus) +} +#endif + #endif /* ZSTD_H_235446 */ @@ -997,6 +1229,21 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) #define ZSTD_H_ZSTD_STATIC_LINKING_ONLY +#if defined (__cplusplus) +extern "C" { +#endif + +/* This can be overridden externally to hide static symbols. */ +#ifndef ZSTDLIB_STATIC_API +# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZSTDLIB_STATIC_API __declspec(dllexport) ZSTDLIB_VISIBLE +# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZSTDLIB_STATIC_API __declspec(dllimport) ZSTDLIB_VISIBLE +# else +# define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE +# endif +#endif + /**************************************************************************************** * experimental API (static linking only) **************************************************************************************** @@ -1007,8 +1254,8 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); * Some of them might be removed in the future (especially when redundant with existing stable functions) * ***************************************************************************************/ -#define ZSTD_FRAMEHEADERSIZE_PREFIX 5 /* minimum input size required to query frame header size */ -#define ZSTD_FRAMEHEADERSIZE_MIN 6 +#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */ +#define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2) #define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */ #define ZSTD_SKIPPABLEHEADERSIZE 8 @@ -1031,6 +1278,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */ #define ZSTD_STRATEGY_MIN ZSTD_fast #define ZSTD_STRATEGY_MAX ZSTD_btultra2 +#define ZSTD_BLOCKSIZE_MAX_MIN (1 << 10) /* The minimum valid max blocksize. Maximum blocksizes smaller than this make compressBound() inaccurate. */ #define ZSTD_OVERLAPLOG_MIN 0 @@ -1053,14 +1301,54 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #define ZSTD_LDM_HASHRATELOG_MIN 0 #define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN) -/* internal */ -#define ZSTD_HASHLOG3_MAX 17 +/* Advanced parameter bounds */ +#define ZSTD_TARGETCBLOCKSIZE_MIN 1340 /* suitable to fit into an ethernet / wifi / 4G transport frame */ +#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX +#define ZSTD_SRCSIZEHINT_MIN 0 +#define ZSTD_SRCSIZEHINT_MAX INT_MAX /* --- Advanced types --- */ typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params; +typedef struct { + unsigned int offset; /* The offset of the match. (NOT the same as the offset code) + * If offset == 0 and matchLength == 0, this sequence represents the last + * literals in the block of litLength size. + */ + + unsigned int litLength; /* Literal length of the sequence. */ + unsigned int matchLength; /* Match length of the sequence. */ + + /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0. + * In this case, we will treat the sequence as a marker for a block boundary. + */ + + unsigned int rep; /* Represents which repeat offset is represented by the field 'offset'. + * Ranges from [0, 3]. + * + * Repeat offsets are essentially previous offsets from previous sequences sorted in + * recency order. For more detail, see doc/zstd_compression_format.md + * + * If rep == 0, then 'offset' does not contain a repeat offset. + * If rep > 0: + * If litLength != 0: + * rep == 1 --> offset == repeat_offset_1 + * rep == 2 --> offset == repeat_offset_2 + * rep == 3 --> offset == repeat_offset_3 + * If litLength == 0: + * rep == 1 --> offset == repeat_offset_2 + * rep == 2 --> offset == repeat_offset_3 + * rep == 3 --> offset == repeat_offset_1 - 1 + * + * Note: This field is optional. ZSTD_generateSequences() will calculate the value of + * 'rep', but repeat offsets do not necessarily need to be calculated from an external + * sequence provider perspective. For example, ZSTD_compressSequences() does not + * use this 'rep' field at all (as of now). + */ +} ZSTD_Sequence; + typedef struct { unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */ unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ @@ -1090,32 +1378,35 @@ typedef enum { typedef enum { ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */ - ZSTD_dlm_byRef = 1, /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ + ZSTD_dlm_byRef = 1 /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ } ZSTD_dictLoadMethod_e; typedef enum { - /* Opened question : should we have a format ZSTD_f_auto ? - * Today, it would mean exactly the same as ZSTD_f_zstd1. - * But, in the future, should several formats become supported, - * on the compression side, it would mean "default format". - * On the decompression side, it would mean "automatic format detection", - * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames". - * Since meaning is a little different, another option could be to define different enums for compression and decompression. - * This question could be kept for later, when there are actually multiple formats to support, - * but there is also the question of pinning enum values, and pinning value `0` is especially important */ ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */ - ZSTD_f_zstd1_magicless = 1, /* Variant of zstd frame format, without initial 4-bytes magic number. + ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number. * Useful to save 4 bytes per generated frame. * Decoder cannot recognise automatically this format, requiring this instruction. */ } ZSTD_format_e; +typedef enum { + /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */ + ZSTD_d_validateChecksum = 0, + ZSTD_d_ignoreChecksum = 1 +} ZSTD_forceIgnoreChecksum_e; + +typedef enum { + /* Note: this enum controls ZSTD_d_refMultipleDDicts */ + ZSTD_rmd_refSingleDDict = 0, + ZSTD_rmd_refMultipleDDicts = 1 +} ZSTD_refMultipleDDicts_e; + typedef enum { /* Note: this enum and the behavior it controls are effectively internal * implementation details of the compressor. They are expected to continue * to evolve and should be considered only in the context of extremely * advanced performance tuning. * - * Zstd currently supports the use of a CDict in two ways: + * Zstd currently supports the use of a CDict in three ways: * * - The contents of the CDict can be copied into the working context. This * means that the compression can search both the dictionary and input @@ -1131,6 +1422,12 @@ typedef enum { * working context's tables can be reused). For small inputs, this can be * faster than copying the CDict's tables. * + * - The CDict's tables are not used at all, and instead we use the working + * context alone to reload the dictionary and use params based on the source + * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict(). + * This method is effective when the dictionary sizes are very small relative + * to the input size, and the input size is fairly large to begin with. + * * Zstd has a simple internal heuristic that selects which strategy to use * at the beginning of a compression. However, if experimentation shows that * Zstd is making poor choices, it is possible to override that choice with @@ -1139,6 +1436,7 @@ typedef enum { ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */ ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */ ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */ + ZSTD_dictForceLoad = 3 /* Always reload the dictionary */ } ZSTD_dictAttachPref_e; typedef enum { @@ -1147,12 +1445,22 @@ typedef enum { * levels will be compressed. */ ZSTD_lcm_huffman = 1, /**< Always attempt Huffman compression. Uncompressed literals will still be * emitted if Huffman compression is not profitable. */ - ZSTD_lcm_uncompressed = 2, /**< Always emit uncompressed literals. */ + ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */ } ZSTD_literalCompressionMode_e; +typedef enum { + /* Note: This enum controls features which are conditionally beneficial. + * Zstd can take a decision on whether or not to enable the feature (ZSTD_ps_auto), + * but setting the switch to ZSTD_ps_enable or ZSTD_ps_disable force enable/disable the feature. + */ + ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */ + ZSTD_ps_enable = 1, /* Force-enable the feature */ + ZSTD_ps_disable = 2 /* Do not use the feature */ +} ZSTD_ParamSwitch_e; +#define ZSTD_paramSwitch_e ZSTD_ParamSwitch_e /* old name */ /*************************************** -* Frame size functions +* Frame header and size functions ***************************************/ /*! ZSTD_findDecompressedSize() : @@ -1176,14 +1484,14 @@ typedef enum { * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to * read each contained frame header. This is fast as most of the data is skipped, * however it does mean that all frame data must be present and valid. */ -ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_decompressBound() : * `src` should point to the start of a series of ZSTD encoded and/or skippable frames * `srcSize` must be the _exact_ size of this series * (i.e. there should be a frame boundary at `src + srcSize`) * @return : - upper-bound for the decompressed size of all data in all successive frames - * - if an error occured: ZSTD_CONTENTSIZE_ERROR + * - if an error occurred: ZSTD_CONTENTSIZE_ERROR * * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame. * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`. @@ -1191,13 +1499,253 @@ ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by: * upper-bound = # blocks * min(128 KB, Window_Size) */ -ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); /*! ZSTD_frameHeaderSize() : - * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. + * srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX. * @return : size of the Frame Header, * or an error code (if srcSize is too small) */ -ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); + +typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_FrameType_e; +#define ZSTD_frameType_e ZSTD_FrameType_e /* old name */ +typedef struct { + unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ + unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ + unsigned blockSizeMax; + ZSTD_FrameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ + unsigned headerSize; + unsigned dictID; /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */ + unsigned checksumFlag; + unsigned _reserved1; + unsigned _reserved2; +} ZSTD_FrameHeader; +#define ZSTD_frameHeader ZSTD_FrameHeader /* old name */ + +/*! ZSTD_getFrameHeader() : + * decode Frame Header into `zfhPtr`, or requires larger `srcSize`. + * @return : 0 => header is complete, `zfhPtr` is correctly filled, + * >0 => `srcSize` is too small, @return value is the wanted `srcSize` amount, `zfhPtr` is not filled, + * or an error code, which can be tested using ZSTD_isError() */ +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize); +/*! ZSTD_getFrameHeader_advanced() : + * same as ZSTD_getFrameHeader(), + * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); + +/*! ZSTD_decompressionMargin() : + * Zstd supports in-place decompression, where the input and output buffers overlap. + * In this case, the output buffer must be at least (Margin + Output_Size) bytes large, + * and the input buffer must be at the end of the output buffer. + * + * _______________________ Output Buffer ________________________ + * | | + * | ____ Input Buffer ____| + * | | | + * v v v + * |---------------------------------------|-----------|----------| + * ^ ^ ^ + * |___________________ Output_Size ___________________|_ Margin _| + * + * NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). + * NOTE: This applies only to single-pass decompression through ZSTD_decompress() or + * ZSTD_decompressDCtx(). + * NOTE: This function supports multi-frame input. + * + * @param src The compressed frame(s) + * @param srcSize The size of the compressed frame(s) + * @returns The decompression margin or an error that can be checked with ZSTD_isError(). + */ +ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize); + +/*! ZSTD_DECOMPRESS_MARGIN() : + * Similar to ZSTD_decompressionMargin(), but instead of computing the margin from + * the compressed frame, compute it from the original size and the blockSizeLog. + * See ZSTD_decompressionMargin() for details. + * + * WARNING: This macro does not support multi-frame input, the input must be a single + * zstd frame. If you need that support use the function, or implement it yourself. + * + * @param originalSize The original uncompressed size of the data. + * @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX). + * Unless you explicitly set the windowLog smaller than + * ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX. + */ +#define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)( \ + ZSTD_FRAMEHEADERSIZE_MAX /* Frame header */ + \ + 4 /* checksum */ + \ + ((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \ + (blockSize) /* One block of margin */ \ + )) + +typedef enum { + ZSTD_sf_noBlockDelimiters = 0, /* ZSTD_Sequence[] has no block delimiters, just sequences */ + ZSTD_sf_explicitBlockDelimiters = 1 /* ZSTD_Sequence[] contains explicit block delimiters */ +} ZSTD_SequenceFormat_e; +#define ZSTD_sequenceFormat_e ZSTD_SequenceFormat_e /* old name */ + +/*! ZSTD_sequenceBound() : + * `srcSize` : size of the input buffer + * @return : upper-bound for the number of sequences that can be generated + * from a buffer of srcSize bytes + * + * note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). + */ +ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize); + +/*! ZSTD_generateSequences() : + * WARNING: This function is meant for debugging and informational purposes ONLY! + * Its implementation is flawed, and it will be deleted in a future version. + * It is not guaranteed to succeed, as there are several cases where it will give + * up and fail. You should NOT use this function in production code. + * + * This function is deprecated, and will be removed in a future version. + * + * Generate sequences using ZSTD_compress2(), given a source buffer. + * + * @param zc The compression context to be used for ZSTD_compress2(). Set any + * compression parameters you need on this context. + * @param outSeqs The output sequences buffer of size @p outSeqsSize + * @param outSeqsCapacity The size of the output sequences buffer. + * ZSTD_sequenceBound(srcSize) is an upper bound on the number + * of sequences that can be generated. + * @param src The source buffer to generate sequences from of size @p srcSize. + * @param srcSize The size of the source buffer. + * + * Each block will end with a dummy sequence + * with offset == 0, matchLength == 0, and litLength == length of last literals. + * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) + * simply acts as a block delimiter. + * + * @returns The number of sequences generated, necessarily less than + * ZSTD_sequenceBound(srcSize), or an error code that can be checked + * with ZSTD_isError(). + */ +ZSTD_DEPRECATED("For debugging only, will be replaced by ZSTD_extractSequences()") +ZSTDLIB_STATIC_API size_t +ZSTD_generateSequences(ZSTD_CCtx* zc, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize); + +/*! ZSTD_mergeBlockDelimiters() : + * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals + * by merging them into the literals of the next sequence. + * + * As such, the final generated result has no explicit representation of block boundaries, + * and the final last literals segment is not represented in the sequences. + * + * The output of this function can be fed into ZSTD_compressSequences() with CCtx + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters + * @return : number of sequences left after merging + */ +ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize); + +/*! ZSTD_compressSequences() : + * Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. + * @src contains the entire input (not just the literals). + * If @srcSize > sum(sequence.length), the remaining bytes are considered all literals + * If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.). + * The entire source is compressed into a single frame. + * + * The compression behavior changes based on cctx params. In particular: + * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain + * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on + * the block size derived from the cctx, and sequences may be split. This is the default setting. + * + * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain + * valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. + * + * When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes + * using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit + * can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation. + * By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10). + * ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction. + * + * If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined + * behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for + * specifics regarding offset/matchlength requirements) and then bail out and return an error. + * + * In addition to the two adjustable experimental params, there are other important cctx params. + * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN. + * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression. + * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset + * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md + * + * Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused. + * Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly, + * and cannot emit an RLE block that disagrees with the repcode history. + * @return : final compressed size, or a ZSTD error code. + */ +ZSTDLIB_STATIC_API size_t +ZSTD_compressSequences(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + const void* src, size_t srcSize); + + +/*! ZSTD_compressSequencesAndLiterals() : + * This is a variant of ZSTD_compressSequences() which, + * instead of receiving (src,srcSize) as input parameter, receives (literals,litSize), + * aka all the literals, already extracted and laid out into a single continuous buffer. + * This can be useful if the process generating the sequences also happens to generate the buffer of literals, + * thus skipping an extraction + caching stage. + * It's a speed optimization, useful when the right conditions are met, + * but it also features the following limitations: + * - Only supports explicit delimiter mode + * - Currently does not support Sequences validation (so input Sequences are trusted) + * - Not compatible with frame checksum, which must be disabled + * - If any block is incompressible, will fail and return an error + * - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error. + * - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals. + * @litBufCapacity must be at least 8 bytes larger than @litSize. + * - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error. + * @return : final compressed size, or a ZSTD error code. + */ +ZSTDLIB_STATIC_API size_t +ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t nbSequences, + const void* literals, size_t litSize, size_t litBufCapacity, + size_t decompressedSize); + + +/*! ZSTD_writeSkippableFrame() : + * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer. + * + * Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, + * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. + * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, + * so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. + * + * Returns an error if destination buffer is not large enough, if the source size is not representable + * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid). + * + * @return : number of bytes written or a ZSTD error. + */ +ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + unsigned magicVariant); + +/*! ZSTD_readSkippableFrame() : + * Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer. + * + * The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written, + * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. + * This can be NULL if the caller is not interested in the magicVariant. + * + * Returns an error if destination buffer is not large enough, or if the frame is not skippable. + * + * @return : number of bytes written or a ZSTD error. + */ +ZSTDLIB_STATIC_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, + unsigned* magicVariant, + const void* src, size_t srcSize); + +/*! ZSTD_isSkippableFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + */ +ZSTDLIB_STATIC_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); + /*************************************** @@ -1207,44 +1755,69 @@ ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); /*! ZSTD_estimate*() : * These functions make it possible to estimate memory usage * of a future {D,C}Ctx, before its creation. - * ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one. - * It will also consider src size to be arbitrarily "large", which is worst case. - * If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation. - * ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. - * ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. - * Note : CCtx size estimation is only correct for single-threaded compression. */ -ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); -ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void); + * This is useful in combination with ZSTD_initStatic(), + * which makes it possible to employ a static buffer for ZSTD_CCtx* state. + * + * ZSTD_estimateCCtxSize() will provide a memory budget large enough + * to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2() + * associated with any compression level up to max specified one. + * The estimate will assume the input may be arbitrarily large, + * which is the worst case. + * + * Note that the size estimation is specific for one-shot compression, + * it is not valid for streaming (see ZSTD_estimateCStreamSize*()) + * nor other potential ways of using a ZSTD_CCtx* state. + * + * When srcSize can be bound by a known and rather "small" value, + * this knowledge can be used to provide a tighter budget estimation + * because the ZSTD_CCtx* state will need less memory for small inputs. + * This tighter estimation can be provided by employing more advanced functions + * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), + * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). + * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. + * + * Note : only single-threaded compression is supported. + * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + */ +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int maxCompressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void); /*! ZSTD_estimateCStreamSize() : - * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. - * It will also consider src size to be arbitrarily "large", which is worst case. + * ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression + * using any compression level up to the max specified one. + * It will also consider src size to be arbitrarily "large", which is a worst case scenario. * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. * Note : CStream size estimation is only correct for single-threaded compression. - * ZSTD_DStream memory budget depends on window Size. + * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + * Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + * Size estimates assume that no external sequence producer is registered. + * + * ZSTD_DStream memory budget depends on frame's window Size. * This information can be passed manually, using ZSTD_estimateDStreamSize, * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); + * Any frame requesting a window size larger than max specified one will be rejected. * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), * an internal ?Dict will be created, which additional size is not estimated here. - * In this case, get total size by adding ZSTD_estimate?DictSize */ -ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); -ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize); -ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); + * In this case, get total size by adding ZSTD_estimate?DictSize + */ +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int maxCompressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t maxWindowSize); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); /*! ZSTD_estimate?DictSize() : * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. */ -ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); -ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); /*! ZSTD_initStatic*() : * Initialize an object using a pre-allocated fixed-size buffer. @@ -1267,20 +1840,20 @@ ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e * Limitation 2 : static cctx currently not compatible with multi-threading. * Limitation 3 : static dctx is incompatible with legacy support. */ -ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); -ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */ +ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); +ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */ -ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); -ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */ +ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); +ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */ -ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict( +ZSTDLIB_STATIC_API const ZSTD_CDict* ZSTD_initStaticCDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams); -ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict( +ZSTDLIB_STATIC_API const ZSTD_DDict* ZSTD_initStaticDDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, @@ -1295,24 +1868,61 @@ ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict( typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size); typedef void (*ZSTD_freeFunction) (void* opaque, void* address); typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem; -static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ +#if defined(__clang__) && __clang_major__ >= 5 +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" +#endif +static +#ifdef __GNUC__ +__attribute__((__unused__)) +#endif +ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ +#if defined(__clang__) && __clang_major__ >= 5 +#pragma clang diagnostic pop +#endif -ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, +ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_customMem customMem); +/*! Thread pool : + * These prototypes make it possible to share a thread pool among multiple compression contexts. + * This can limit resources for applications with multiple threads where each one uses + * a threaded compression mode (via ZSTD_c_nbWorkers parameter). + * ZSTD_createThreadPool creates a new thread pool with a given number of threads. + * Note that the lifetime of such pool must exist while being used. + * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value + * to use an internal thread pool). + * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer. + */ +typedef struct POOL_ctx_s ZSTD_threadPool; +ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads); +ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool); + + +/* + * This API is temporary and is expected to change or disappear in the future! + */ +ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced2( + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + const ZSTD_CCtx_params* cctxParams, + ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_advanced( + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_customMem customMem); /*************************************** @@ -1323,23 +1933,24 @@ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictS * Create a digested dictionary for compression * Dictionary content is just referenced, not duplicated. * As a consequence, `dictBuffer` **must** outlive CDict, - * and its content must remain unmodified throughout the lifetime of CDict. */ -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); + * and its content must remain unmodified throughout the lifetime of CDict. + * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ +ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); /*! ZSTD_getCParams() : * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. * `estimatedSrcSize` value is optional, select 0 if not known */ -ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); /*! ZSTD_getParams() : * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */ -ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); /*! ZSTD_checkCParams() : * Ensure param values remain within authorized range. * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */ -ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); +ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); /*! ZSTD_adjustCParams() : * optimize params for a given `srcSize` and `dictSize`. @@ -1347,19 +1958,48 @@ ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); * `dictSize` must be `0` when there is no dictionary. * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. * This function never fails (wide contract) */ -ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); + +/*! ZSTD_CCtx_setCParams() : + * Set all parameters provided within @p cparams into the working @p cctx. + * Note : if modifying parameters during compression (MT mode only), + * note that changes to the .windowLog parameter will be ignored. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + * On failure, no parameters are updated. + */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams); + +/*! ZSTD_CCtx_setFParams() : + * Set all parameters provided within @p fparams into the working @p cctx. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams); + +/*! ZSTD_CCtx_setParams() : + * Set all parameters provided within @p params into the working @p cctx. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params); /*! ZSTD_compress_advanced() : - * Same as ZSTD_compress_usingDict(), with fine-tune control over compression parameters (by structure) */ -ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - ZSTD_parameters params); + * Note : this function is now DEPRECATED. + * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. + * This prototype will generate compilation warnings. */ +ZSTD_DEPRECATED("use ZSTD_compress2") +ZSTDLIB_STATIC_API +size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params); /*! ZSTD_compress_usingCDict_advanced() : - * Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */ -ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, + * Note : this function is now DEPRECATED. + * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters. + * This prototype will generate compilation warnings. */ +ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary") +ZSTDLIB_STATIC_API +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, @@ -1369,18 +2009,18 @@ ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, /*! ZSTD_CCtx_loadDictionary_byReference() : * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); /*! ZSTD_CCtx_loadDictionary_advanced() : * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! ZSTD_CCtx_refPrefix_advanced() : * Same as ZSTD_CCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); /* === experimental parameters === */ /* these parameters can be used with ZSTD_setParameter() @@ -1419,18 +2059,308 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre * See the comments on that enum for an explanation of the feature. */ #define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4 -/* Controls how the literals are compressed (default is auto). - * The value must be of type ZSTD_literalCompressionMode_e. - * See ZSTD_literalCompressionMode_t enum definition for details. +/* Controlled with ZSTD_ParamSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never compress literals. + * Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals + * may still be emitted if huffman is not beneficial to use.) + * + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * literals compression based on the compression parameters - specifically, + * negative compression levels do not use literal compression. */ #define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 +/* User's best guess of source size. + * Hint is not valid when srcSizeHint == 0. + * There is no guarantee that hint is close to actual source size, + * but compression ratio may regress significantly if guess considerably underestimates */ +#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7 + +/* Controls whether the new and experimental "dedicated dictionary search + * structure" can be used. This feature is still rough around the edges, be + * prepared for surprising behavior! + * + * How to use it: + * + * When using a CDict, whether to use this feature or not is controlled at + * CDict creation, and it must be set in a CCtxParams set passed into that + * construction (via ZSTD_createCDict_advanced2()). A compression will then + * use the feature or not based on how the CDict was constructed; the value of + * this param, set in the CCtx, will have no effect. + * + * However, when a dictionary buffer is passed into a CCtx, such as via + * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control + * whether the CDict that is created internally can use the feature or not. + * + * What it does: + * + * Normally, the internal data structures of the CDict are analogous to what + * would be stored in a CCtx after compressing the contents of a dictionary. + * To an approximation, a compression using a dictionary can then use those + * data structures to simply continue what is effectively a streaming + * compression where the simulated compression of the dictionary left off. + * Which is to say, the search structures in the CDict are normally the same + * format as in the CCtx. + * + * It is possible to do better, since the CDict is not like a CCtx: the search + * structures are written once during CDict creation, and then are only read + * after that, while the search structures in the CCtx are both read and + * written as the compression goes along. This means we can choose a search + * structure for the dictionary that is read-optimized. + * + * This feature enables the use of that different structure. + * + * Note that some of the members of the ZSTD_compressionParameters struct have + * different semantics and constraints in the dedicated search structure. It is + * highly recommended that you simply set a compression level in the CCtxParams + * you pass into the CDict creation call, and avoid messing with the cParams + * directly. + * + * Effects: + * + * This will only have any effect when the selected ZSTD_strategy + * implementation supports this feature. Currently, that's limited to + * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2. + * + * Note that this means that the CDict tables can no longer be copied into the + * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be + * usable. The dictionary can only be attached or reloaded. + * + * In general, you should expect compression to be faster--sometimes very much + * so--and CDict creation to be slightly slower. Eventually, we will probably + * make this mode the default. + */ +#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8 + +/* ZSTD_c_stableInBuffer + * Experimental parameter. + * Default is 0 == disabled. Set to 1 to enable. + * + * Tells the compressor that input data presented with ZSTD_inBuffer + * will ALWAYS be the same between calls. + * Technically, the @src pointer must never be changed, + * and the @pos field can only be updated by zstd. + * However, it's possible to increase the @size field, + * allowing scenarios where more data can be appended after compressions starts. + * These conditions are checked by the compressor, + * and compression will fail if they are not respected. + * Also, data in the ZSTD_inBuffer within the range [src, src + pos) + * MUST not be modified during compression or it will result in data corruption. + * + * When this flag is enabled zstd won't allocate an input window buffer, + * because the user guarantees it can reference the ZSTD_inBuffer until + * the frame is complete. But, it will still allocate an output buffer + * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also + * avoid the memcpy() from the input buffer to the input window buffer. + * + * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using + * this flag is ALWAYS memory safe, and will never access out-of-bounds + * memory. However, compression WILL fail if conditions are not respected. + * + * WARNING: The data in the ZSTD_inBuffer in the range [src, src + pos) MUST + * not be modified during compression or it will result in data corruption. + * This is because zstd needs to reference data in the ZSTD_inBuffer to find + * matches. Normally zstd maintains its own window buffer for this purpose, + * but passing this flag tells zstd to rely on user provided buffer instead. + */ +#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9 + +/* ZSTD_c_stableOutBuffer + * Experimental parameter. + * Default is 0 == disabled. Set to 1 to enable. + * + * Tells he compressor that the ZSTD_outBuffer will not be resized between + * calls. Specifically: (out.size - out.pos) will never grow. This gives the + * compressor the freedom to say: If the compressed data doesn't fit in the + * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to + * always decompress directly into the output buffer, instead of decompressing + * into an internal buffer and copying to the output buffer. + * + * When this flag is enabled zstd won't allocate an output buffer, because + * it can write directly to the ZSTD_outBuffer. It will still allocate the + * input window buffer (see ZSTD_c_stableInBuffer). + * + * Zstd will check that (out.size - out.pos) never grows and return an error + * if it does. While not strictly necessary, this should prevent surprises. + */ +#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10 + +/* ZSTD_c_blockDelimiters + * Default is 0 == ZSTD_sf_noBlockDelimiters. + * + * For use with sequence compression API: ZSTD_compressSequences(). + * + * Designates whether or not the given array of ZSTD_Sequence contains block delimiters + * and last literals, which are defined as sequences with offset == 0 and matchLength == 0. + * See the definition of ZSTD_Sequence for more specifics. + */ +#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11 + +/* ZSTD_c_validateSequences + * Default is 0 == disabled. Set to 1 to enable sequence validation. + * + * For use with sequence compression API: ZSTD_compressSequences*(). + * Designates whether or not provided sequences are validated within ZSTD_compressSequences*() + * during function execution. + * + * When Sequence validation is disabled (default), Sequences are compressed as-is, + * so they must correct, otherwise it would result in a corruption error. + * + * Sequence validation adds some protection, by ensuring that all values respect boundary conditions. + * If a Sequence is detected invalid (see doc/zstd_compression_format.md for + * specifics regarding offset/matchlength requirements) then the function will bail out and + * return an error. + */ +#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12 + +/* ZSTD_c_blockSplitterLevel + * note: this parameter only influences the first splitter stage, + * which is active before producing the sequences. + * ZSTD_c_splitAfterSequences controls the next splitter stage, + * which is active after sequence production. + * Note that both can be combined. + * Allowed values are between 0 and ZSTD_BLOCKSPLITTER_LEVEL_MAX included. + * 0 means "auto", which will select a value depending on current ZSTD_c_strategy. + * 1 means no splitting. + * Then, values from 2 to 6 are sorted in increasing cpu load order. + * + * Note that currently the first block is never split, + * to ensure expansion guarantees in presence of incompressible data. + */ +#define ZSTD_BLOCKSPLITTER_LEVEL_MAX 6 +#define ZSTD_c_blockSplitterLevel ZSTD_c_experimentalParam20 + +/* ZSTD_c_splitAfterSequences + * This is a stronger splitter algorithm, + * based on actual sequences previously produced by the selected parser. + * It's also slower, and as a consequence, mostly used for high compression levels. + * While the post-splitter does overlap with the pre-splitter, + * both can nonetheless be combined, + * notably with ZSTD_c_blockSplitterLevel at ZSTD_BLOCKSPLITTER_LEVEL_MAX, + * resulting in higher compression ratio than just one of them. + * + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never use block splitter. + * Set to ZSTD_ps_enable to always use block splitter. + * + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * block splitting based on the compression parameters. + */ +#define ZSTD_c_splitAfterSequences ZSTD_c_experimentalParam13 + +/* ZSTD_c_useRowMatchFinder + * Controlled with ZSTD_ParamSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never use row-based matchfinder. + * Set to ZSTD_ps_enable to force usage of row-based matchfinder. + * + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * the row-based matchfinder based on support for SIMD instructions and the window log. + * Note that this only pertains to compression strategies: greedy, lazy, and lazy2 + */ +#define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14 + +/* ZSTD_c_deterministicRefPrefix + * Default is 0 == disabled. Set to 1 to enable. + * + * Zstd produces different results for prefix compression when the prefix is + * directly adjacent to the data about to be compressed vs. when it isn't. + * This is because zstd detects that the two buffers are contiguous and it can + * use a more efficient match finding algorithm. However, this produces different + * results than when the two buffers are non-contiguous. This flag forces zstd + * to always load the prefix in non-contiguous mode, even if it happens to be + * adjacent to the data, to guarantee determinism. + * + * If you really care about determinism when using a dictionary or prefix, + * like when doing delta compression, you should select this option. It comes + * at a speed penalty of about ~2.5% if the dictionary and data happened to be + * contiguous, and is free if they weren't contiguous. We don't expect that + * intentionally making the dictionary and data contiguous will be worth the + * cost to memcpy() the data. + */ +#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15 + +/* ZSTD_c_prefetchCDictTables + * Controlled with ZSTD_ParamSwitch_e enum. Default is ZSTD_ps_auto. + * + * In some situations, zstd uses CDict tables in-place rather than copying them + * into the working context. (See docs on ZSTD_dictAttachPref_e above for details). + * In such situations, compression speed is seriously impacted when CDict tables are + * "cold" (outside CPU cache). This parameter instructs zstd to prefetch CDict tables + * when they are used in-place. + * + * For sufficiently small inputs, the cost of the prefetch will outweigh the benefit. + * For sufficiently large inputs, zstd will by default memcpy() CDict tables + * into the working context, so there is no need to prefetch. This parameter is + * targeted at a middle range of input sizes, where a prefetch is cheap enough to be + * useful but memcpy() is too expensive. The exact range of input sizes where this + * makes sense is best determined by careful experimentation. + * + * Note: for this parameter, ZSTD_ps_auto is currently equivalent to ZSTD_ps_disable, + * but in the future zstd may conditionally enable this feature via an auto-detection + * heuristic for cold CDicts. + * Use ZSTD_ps_disable to opt out of prefetching under any circumstances. + */ +#define ZSTD_c_prefetchCDictTables ZSTD_c_experimentalParam16 + +/* ZSTD_c_enableSeqProducerFallback + * Allowed values are 0 (disable) and 1 (enable). The default setting is 0. + * + * Controls whether zstd will fall back to an internal sequence producer if an + * external sequence producer is registered and returns an error code. This fallback + * is block-by-block: the internal sequence producer will only be called for blocks + * where the external sequence producer returns an error code. Fallback parsing will + * follow any other cParam settings, such as compression level, the same as in a + * normal (fully-internal) compression operation. + * + * The user is strongly encouraged to read the full Block-Level Sequence Producer API + * documentation (below) before setting this parameter. */ +#define ZSTD_c_enableSeqProducerFallback ZSTD_c_experimentalParam17 + +/* ZSTD_c_maxBlockSize + * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB). + * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default. + * + * This parameter can be used to set an upper bound on the blocksize + * that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper + * bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make + * compressBound() inaccurate). Only currently meant to be used for testing. + */ +#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18 + +/* ZSTD_c_repcodeResolution + * This parameter only has an effect if ZSTD_c_blockDelimiters is + * set to ZSTD_sf_explicitBlockDelimiters (may change in the future). + * + * This parameter affects how zstd parses external sequences, + * provided via the ZSTD_compressSequences*() API + * or from an external block-level sequence producer. + * + * If set to ZSTD_ps_enable, the library will check for repeated offsets within + * external sequences, even if those repcodes are not explicitly indicated in + * the "rep" field. Note that this is the only way to exploit repcode matches + * while using compressSequences*() or an external sequence producer, since zstd + * currently ignores the "rep" field of external sequences. + * + * If set to ZSTD_ps_disable, the library will not exploit repeated offsets in + * external sequences, regardless of whether the "rep" field has been set. This + * reduces sequence compression overhead by about 25% while sacrificing some + * compression ratio. + * + * The default value is ZSTD_ps_auto, for which the library will enable/disable + * based on compression level (currently: level<10 disables, level>=10 enables). + */ +#define ZSTD_c_repcodeResolution ZSTD_c_experimentalParam19 +#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19 /* older name */ + + /*! ZSTD_CCtx_getParameter() : * Get the requested compression parameter value, selected by enum ZSTD_cParameter, * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value); /*! ZSTD_CCtx_params : @@ -1445,45 +2375,47 @@ ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param * These parameters will be applied to * all subsequent frames. * - ZSTD_compressStream2() : Do compression using the CCtx. - * - ZSTD_freeCCtxParams() : Free the memory. + * - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer. * * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() * for static allocation of CCtx for single-threaded compression. */ -ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); -ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); +ZSTDLIB_STATIC_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); +ZSTDLIB_STATIC_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */ /*! ZSTD_CCtxParams_reset() : * Reset params to default values. */ -ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); /*! ZSTD_CCtxParams_init() : * Initializes the compression parameters of cctxParams according to * compression level. All other parameters are reset to their default values. */ -ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); /*! ZSTD_CCtxParams_init_advanced() : * Initializes the compression and frame parameters of cctxParams according to * params. All other parameters are reset to their default values. */ -ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); -/*! ZSTD_CCtxParams_setParameter() : +/*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+ * Similar to ZSTD_CCtx_setParameter. * Set one compression parameter, selected by enum ZSTD_cParameter. - * Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams(). - * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Parameters must be applied to a ZSTD_CCtx using + * ZSTD_CCtx_setParametersUsingCCtxParams(). + * @result : a code representing success or failure (which can be tested with + * ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value); /*! ZSTD_CCtxParams_getParameter() : * Similar to ZSTD_CCtx_getParameter. * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. * @result : 0, or an error code (which can be tested with ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value); /*! ZSTD_CCtx_setParametersUsingCCtxParams() : * Apply a set of ZSTD_CCtx_params to the compression context. @@ -1492,7 +2424,7 @@ ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_c * if nbWorkers>=1, new parameters will be picked up at next job, * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated). */ -ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params); /*! ZSTD_compressStream2_simpleArgs() : @@ -1501,7 +2433,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( * This variant might be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ -ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs ( +ZSTDLIB_STATIC_API size_t ZSTD_compressStream2_simpleArgs ( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos, @@ -1517,33 +2449,33 @@ ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs ( * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. * Note 3 : Skippable Frame Identifiers are considered valid. */ -ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size); +ZSTDLIB_STATIC_API unsigned ZSTD_isFrame(const void* buffer, size_t size); /*! ZSTD_createDDict_byReference() : * Create a digested dictionary, ready to start decompression operation without startup delay. * Dictionary content is referenced, and therefore stays in dictBuffer. * It is important that dictBuffer outlives DDict, * it must remain read accessible throughout the lifetime of DDict */ -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); /*! ZSTD_DCtx_loadDictionary_byReference() : * Same as ZSTD_DCtx_loadDictionary(), * but references `dict` content instead of copying it into `dctx`. * This saves memory if `dict` remains around., * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ -ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /*! ZSTD_DCtx_loadDictionary_advanced() : * Same as ZSTD_DCtx_loadDictionary(), * but gives direct control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ -ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! ZSTD_DCtx_refPrefix_advanced() : * Same as ZSTD_DCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); /*! ZSTD_DCtx_setMaxWindowSize() : * Refuses allocating internal buffers for frames requiring a window size larger than provided limit. @@ -1552,20 +2484,123 @@ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* pre * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); + +/*! ZSTD_DCtx_getParameter() : + * Get the requested decompression parameter value, selected by enum ZSTD_dParameter, + * and store it into int* value. + * @return : 0, or an error code (which can be tested with ZSTD_isError()). + */ +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value); /* ZSTD_d_format * experimental parameter, * allowing selection between ZSTD_format_e input compression formats */ #define ZSTD_d_format ZSTD_d_experimentalParam1 +/* ZSTD_d_stableOutBuffer + * Experimental parameter. + * Default is 0 == disabled. Set to 1 to enable. + * + * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same + * between calls, except for the modifications that zstd makes to pos (the + * caller must not modify pos). This is checked by the decompressor, and + * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer + * MUST be large enough to fit the entire decompressed frame. This will be + * checked when the frame content size is known. The data in the ZSTD_outBuffer + * in the range [dst, dst + pos) MUST not be modified during decompression + * or you will get data corruption. + * + * When this flag is enabled zstd won't allocate an output buffer, because + * it can write directly to the ZSTD_outBuffer, but it will still allocate + * an input buffer large enough to fit any compressed block. This will also + * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer. + * If you need to avoid the input buffer allocation use the buffer-less + * streaming API. + * + * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using + * this flag is ALWAYS memory safe, and will never access out-of-bounds + * memory. However, decompression WILL fail if you violate the preconditions. + * + * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST + * not be modified during decompression or you will get data corruption. This + * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate + * matches. Normally zstd maintains its own buffer for this purpose, but passing + * this flag tells zstd to use the user provided buffer. + */ +#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2 + +/* ZSTD_d_forceIgnoreChecksum + * Experimental parameter. + * Default is 0 == disabled. Set to 1 to enable + * + * Tells the decompressor to skip checksum validation during decompression, regardless + * of whether checksumming was specified during compression. This offers some + * slight performance benefits, and may be useful for debugging. + * Param has values of type ZSTD_forceIgnoreChecksum_e + */ +#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3 + +/* ZSTD_d_refMultipleDDicts + * Experimental parameter. + * Default is 0 == disabled. Set to 1 to enable + * + * If enabled and dctx is allocated on the heap, then additional memory will be allocated + * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict() + * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead + * store all references. At decompression time, the appropriate dictID is selected + * from the set of DDicts based on the dictID in the frame. + * + * Usage is simply calling ZSTD_refDDict() on multiple dict buffers. + * + * Param has values of byte ZSTD_refMultipleDDicts_e + * + * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory + * allocation for the hash table. ZSTD_freeDCtx() also frees this memory. + * Memory is allocated as per ZSTD_DCtx::customMem. + * + * Although this function allocates memory for the table, the user is still responsible for + * memory management of the underlying ZSTD_DDict* themselves. + */ +#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4 + +/* ZSTD_d_disableHuffmanAssembly + * Set to 1 to disable the Huffman assembly implementation. + * The default value is 0, which allows zstd to use the Huffman assembly + * implementation if available. + * + * This parameter can be used to disable Huffman assembly at runtime. + * If you want to disable it at compile time you can define the macro + * ZSTD_DISABLE_ASM. + */ +#define ZSTD_d_disableHuffmanAssembly ZSTD_d_experimentalParam5 + +/* ZSTD_d_maxBlockSize + * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB). + * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default. + * + * Forces the decompressor to reject blocks whose content size is + * larger than the configured maxBlockSize. When maxBlockSize is + * larger than the windowSize, the windowSize is used instead. + * This saves memory on the decoder when you know all blocks are small. + * + * This option is typically used in conjunction with ZSTD_c_maxBlockSize. + * + * WARNING: This causes the decoder to reject otherwise valid frames + * that have block sizes larger than the configured maxBlockSize. + */ +#define ZSTD_d_maxBlockSize ZSTD_d_experimentalParam6 + /*! ZSTD_DCtx_setFormat() : + * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). * Instruct the decoder context about what kind of data to decode next. * This instruction is mandatory to decode data without a fully-formed header, * such ZSTD_f_zstd1_magicless for example. * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); +ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead") +ZSTDLIB_STATIC_API +size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); /*! ZSTD_decompressStream_simpleArgs() : * Same as ZSTD_decompressStream(), @@ -1573,7 +2608,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); * This can be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ -ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs ( +ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs ( ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos); @@ -1587,8 +2622,9 @@ ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs ( ********************************************************************/ /*===== Advanced Streaming compression functions =====*/ -/**! ZSTD_initCStream_srcSize() : - * This function is deprecated, and equivalent to: + +/*! ZSTD_initCStream_srcSize() : + * This function is DEPRECATED, and equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); @@ -1597,68 +2633,103 @@ ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs ( * pledgedSrcSize must be correct. If it is not known at init time, use * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, * "0" also disables frame content size field. It may be enabled in the future. + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); -/**! ZSTD_initCStream_usingDict() : - * This function is deprecated, and is equivalent to: +ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, + int compressionLevel, + unsigned long long pledgedSrcSize); + +/*! ZSTD_initCStream_usingDict() : + * This function is DEPRECATED, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); * * Creates of an internal CDict (incompatible with static CCtx), except if * dict == NULL or dictSize < 8, in which case no dict is used. - * Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if + * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); -/**! ZSTD_initCStream_advanced() : - * This function is deprecated, and is approximately equivalent to: +ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, + int compressionLevel); + +/*! ZSTD_initCStream_advanced() : + * This function is DEPRECATED, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is + * ZSTD_CCtx_setParams(zcs, params); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); * - * pledgedSrcSize must be correct. If srcSize is not known at init time, use - * value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy. + * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy. + * pledgedSrcSize must be correct. + * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, - ZSTD_parameters params, unsigned long long pledgedSrcSize); -/**! ZSTD_initCStream_usingCDict() : - * This function is deprecated, and equivalent to: +ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, + ZSTD_parameters params, + unsigned long long pledgedSrcSize); + +/*! ZSTD_initCStream_usingCDict() : + * This function is DEPRECATED, and equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, cdict); * * note : cdict will just be referenced, and must outlive compression session + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); -/**! ZSTD_initCStream_usingCDict_advanced() : - * This function is deprecated, and is approximately equivalent to: +ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); + +/*! ZSTD_initCStream_usingCDict_advanced() : + * This function is DEPRECATED, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is + * ZSTD_CCtx_setFParams(zcs, fParams); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_refCDict(zcs, cdict); * * same as ZSTD_initCStream_usingCDict(), with control over frame parameters. * pledgedSrcSize must be correct. If srcSize is not known at init time, use * value ZSTD_CONTENTSIZE_UNKNOWN. + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); +ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, + const ZSTD_CDict* cdict, + ZSTD_frameParameters fParams, + unsigned long long pledgedSrcSize); /*! ZSTD_resetCStream() : - * This function is deprecated, and is equivalent to: + * This function is DEPRECATED, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + * Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but + * ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be + * explicitly specified. * * start a new frame, using same parameters from previous frame. - * This is typically useful to skip dictionary loading stage, since it will re-use it in-place. + * This is typically useful to skip dictionary loading stage, since it will reuse it in-place. * Note that zcs must be init at least once before using ZSTD_resetCStream(). * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN. * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs, * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead. * @return : 0, or an error code (which can be tested using ZSTD_isError()) + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); +ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); typedef struct { @@ -1676,7 +2747,7 @@ typedef struct { * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed. * Aggregates progression inside active worker threads. */ -ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); +ZSTDLIB_STATIC_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); /*! ZSTD_toFlushNow() : * Tell how many bytes are ready to be flushed immediately. @@ -1691,11 +2762,12 @@ ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx * therefore flush speed is limited by production speed of oldest job * irrespective of the speed of concurrent (and newer) jobs. */ -ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); +ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); /*===== Advanced Streaming decompression functions =====*/ -/** + +/*! * This function is deprecated, and is equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); @@ -1703,8 +2775,10 @@ ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); * * note: no dictionary will be used if dict == NULL or dictSize < 8 */ -ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); -/** +ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); + +/*! * This function is deprecated, and is equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); @@ -1712,23 +2786,210 @@ ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dic * * note : ddict is referenced, it must outlive decompression session */ -ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); -/** +ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); + +/*! * This function is deprecated, and is equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * - * re-use decompression parameters from previous init; saves dictionary loading + * reuse decompression parameters from previous init; saves dictionary loading + */ +ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); + + +/* ********************* BLOCK-LEVEL SEQUENCE PRODUCER API ********************* + * + * *** OVERVIEW *** + * The Block-Level Sequence Producer API allows users to provide their own custom + * sequence producer which libzstd invokes to process each block. The produced list + * of sequences (literals and matches) is then post-processed by libzstd to produce + * valid compressed blocks. + * + * This block-level offload API is a more granular complement of the existing + * frame-level offload API compressSequences() (introduced in v1.5.1). It offers + * an easier migration story for applications already integrated with libzstd: the + * user application continues to invoke the same compression functions + * ZSTD_compress2() or ZSTD_compressStream2() as usual, and transparently benefits + * from the specific advantages of the external sequence producer. For example, + * the sequence producer could be tuned to take advantage of known characteristics + * of the input, to offer better speed / ratio, or could leverage hardware + * acceleration not available within libzstd itself. + * + * See contrib/externalSequenceProducer for an example program employing the + * Block-Level Sequence Producer API. + * + * *** USAGE *** + * The user is responsible for implementing a function of type + * ZSTD_sequenceProducer_F. For each block, zstd will pass the following + * arguments to the user-provided function: + * + * - sequenceProducerState: a pointer to a user-managed state for the sequence + * producer. + * + * - outSeqs, outSeqsCapacity: an output buffer for the sequence producer. + * outSeqsCapacity is guaranteed >= ZSTD_sequenceBound(srcSize). The memory + * backing outSeqs is managed by the CCtx. + * + * - src, srcSize: an input buffer for the sequence producer to parse. + * srcSize is guaranteed to be <= ZSTD_BLOCKSIZE_MAX. + * + * - dict, dictSize: a history buffer, which may be empty, which the sequence + * producer may reference as it parses the src buffer. Currently, zstd will + * always pass dictSize == 0 into external sequence producers, but this will + * change in the future. + * + * - compressionLevel: a signed integer representing the zstd compression level + * set by the user for the current operation. The sequence producer may choose + * to use this information to change its compression strategy and speed/ratio + * tradeoff. Note: the compression level does not reflect zstd parameters set + * through the advanced API. + * + * - windowSize: a size_t representing the maximum allowed offset for external + * sequences. Note that sequence offsets are sometimes allowed to exceed the + * windowSize if a dictionary is present, see doc/zstd_compression_format.md + * for details. + * + * The user-provided function shall return a size_t representing the number of + * sequences written to outSeqs. This return value will be treated as an error + * code if it is greater than outSeqsCapacity. The return value must be non-zero + * if srcSize is non-zero. The ZSTD_SEQUENCE_PRODUCER_ERROR macro is provided + * for convenience, but any value greater than outSeqsCapacity will be treated as + * an error code. + * + * If the user-provided function does not return an error code, the sequences + * written to outSeqs must be a valid parse of the src buffer. Data corruption may + * occur if the parse is not valid. A parse is defined to be valid if the + * following conditions hold: + * - The sum of matchLengths and literalLengths must equal srcSize. + * - All sequences in the parse, except for the final sequence, must have + * matchLength >= ZSTD_MINMATCH_MIN. The final sequence must have + * matchLength >= ZSTD_MINMATCH_MIN or matchLength == 0. + * - All offsets must respect the windowSize parameter as specified in + * doc/zstd_compression_format.md. + * - If the final sequence has matchLength == 0, it must also have offset == 0. + * + * zstd will only validate these conditions (and fail compression if they do not + * hold) if the ZSTD_c_validateSequences cParam is enabled. Note that sequence + * validation has a performance cost. + * + * If the user-provided function returns an error, zstd will either fall back + * to an internal sequence producer or fail the compression operation. The user can + * choose between the two behaviors by setting the ZSTD_c_enableSeqProducerFallback + * cParam. Fallback compression will follow any other cParam settings, such as + * compression level, the same as in a normal compression operation. + * + * The user shall instruct zstd to use a particular ZSTD_sequenceProducer_F + * function by calling + * ZSTD_registerSequenceProducer(cctx, + * sequenceProducerState, + * sequenceProducer) + * This setting will persist until the next parameter reset of the CCtx. + * + * The sequenceProducerState must be initialized by the user before calling + * ZSTD_registerSequenceProducer(). The user is responsible for destroying the + * sequenceProducerState. + * + * *** LIMITATIONS *** + * This API is compatible with all zstd compression APIs which respect advanced parameters. + * However, there are three limitations: + * + * First, the ZSTD_c_enableLongDistanceMatching cParam is not currently supported. + * COMPRESSION WILL FAIL if it is enabled and the user tries to compress with a block-level + * external sequence producer. + * - Note that ZSTD_c_enableLongDistanceMatching is auto-enabled by default in some + * cases (see its documentation for details). Users must explicitly set + * ZSTD_c_enableLongDistanceMatching to ZSTD_ps_disable in such cases if an external + * sequence producer is registered. + * - As of this writing, ZSTD_c_enableLongDistanceMatching is disabled by default + * whenever ZSTD_c_windowLog < 128MB, but that's subject to change. Users should + * check the docs on ZSTD_c_enableLongDistanceMatching whenever the Block-Level Sequence + * Producer API is used in conjunction with advanced settings (like ZSTD_c_windowLog). + * + * Second, history buffers are not currently supported. Concretely, zstd will always pass + * dictSize == 0 to the external sequence producer (for now). This has two implications: + * - Dictionaries are not currently supported. Compression will *not* fail if the user + * references a dictionary, but the dictionary won't have any effect. + * - Stream history is not currently supported. All advanced compression APIs, including + * streaming APIs, work with external sequence producers, but each block is treated as + * an independent chunk without history from previous blocks. + * + * Third, multi-threading within a single compression is not currently supported. In other words, + * COMPRESSION WILL FAIL if ZSTD_c_nbWorkers > 0 and an external sequence producer is registered. + * Multi-threading across compressions is fine: simply create one CCtx per thread. + * + * Long-term, we plan to overcome all three limitations. There is no technical blocker to + * overcoming them. It is purely a question of engineering effort. */ -ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); + +#define ZSTD_SEQUENCE_PRODUCER_ERROR ((size_t)(-1)) + +typedef size_t (*ZSTD_sequenceProducer_F) ( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +); + +/*! ZSTD_registerSequenceProducer() : + * Instruct zstd to use a block-level external sequence producer function. + * + * The sequenceProducerState must be initialized by the caller, and the caller is + * responsible for managing its lifetime. This parameter is sticky across + * compressions. It will remain set until the user explicitly resets compression + * parameters. + * + * Sequence producer registration is considered to be an "advanced parameter", + * part of the "advanced API". This means it will only have an effect on compression + * APIs which respect advanced parameters, such as compress2() and compressStream2(). + * Older compression APIs such as compressCCtx(), which predate the introduction of + * "advanced parameters", will ignore any external sequence producer setting. + * + * The sequence producer can be "cleared" by registering a NULL function pointer. This + * removes all limitations described above in the "LIMITATIONS" section of the API docs. + * + * The user is strongly encouraged to read the full API documentation (above) before + * calling this function. */ +ZSTDLIB_STATIC_API void +ZSTD_registerSequenceProducer( + ZSTD_CCtx* cctx, + void* sequenceProducerState, + ZSTD_sequenceProducer_F sequenceProducer +); + +/*! ZSTD_CCtxParams_registerSequenceProducer() : + * Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params. + * This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(), + * which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx(). + * + * If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx() + * is required, then this function is for you. Otherwise, you probably don't need it. + * + * See tests/zstreamtest.c for example usage. */ +ZSTDLIB_STATIC_API void +ZSTD_CCtxParams_registerSequenceProducer( + ZSTD_CCtx_params* params, + void* sequenceProducerState, + ZSTD_sequenceProducer_F sequenceProducer +); /********************************************************************* -* Buffer-less and synchronous inner streaming functions +* Buffer-less and synchronous inner streaming functions (DEPRECATED) * -* This is an advanced API, giving full control over buffer management, for users which need direct control over memory. -* But it's also a complex one, with several restrictions, documented below. -* Prefer normal streaming API for an easier experience. +* This API is deprecated, and will be removed in a future version. +* It allows streaming (de)compression with user allocated buffers. +* However, it is hard to use, and not as well tested as the rest of +* our API. +* +* Please use the normal streaming API instead: ZSTD_compressStream2, +* and ZSTD_decompressStream. +* If there is functionality that you need, but it doesn't provide, +* please open an issue on our GitHub. ********************************************************************* */ /** @@ -1736,12 +2997,10 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); A ZSTD_CCtx object is required to track streaming operations. Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. - ZSTD_CCtx object can be re-used multiple times within successive compression operations. + ZSTD_CCtx object can be reused multiple times within successive compression operations. Start by initializing a context. - Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression, - or ZSTD_compressBegin_advanced(), for finer parameter control. - It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() + Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression. Then, consume your input using ZSTD_compressContinue(). There are some important considerations to keep in mind when using this advanced function : @@ -1759,37 +3018,49 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders. - `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again. + `ZSTD_CCtx` object can be reused (ZSTD_compressBegin()) to compress again. */ /*===== Buffer-less streaming compression functions =====*/ -ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); -ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); -ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ -ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ - -ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); - - -/*- +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ + +ZSTD_DEPRECATED("This function will likely be removed in a future release. It is misleading and has very limited utility.") +ZSTDLIB_STATIC_API +size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ + +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") +ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") +ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); + +/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */ +ZSTD_DEPRECATED("use advanced API to access custom parameters") +ZSTDLIB_STATIC_API +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ +ZSTD_DEPRECATED("use advanced API to access custom parameters") +ZSTDLIB_STATIC_API +size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ +/** Buffer-less streaming decompression (synchronous mode) A ZSTD_DCtx object is required to track streaming operations. Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. - A ZSTD_DCtx object can be re-used multiple times. + A ZSTD_DCtx object can be reused multiple times. First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader(). Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough. Data fragment must be large enough to ensure successful decoding. `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough. - @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. - >0 : `srcSize` is too small, please provide at least @result bytes on next attempt. + result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. + >0 : `srcSize` is too small, please provide at least result bytes on next attempt. errorCode, which can be tested using ZSTD_isError(). - It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, + It fills a ZSTD_FrameHeader structure with important information to correctly decode the frame, such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`). Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information. As a consequence, check that values remain within valid application range. @@ -1805,7 +3076,7 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci The most memory efficient way is to use a round buffer of sufficient size. Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), - which can @return an error code if required value is too large for current system (in 32-bits mode). + which can return an error code if required value is too large for current system (in 32-bits mode). In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, up to the moment there is not enough room left in the buffer to guarantee decoding another full block, which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`. @@ -1825,7 +3096,7 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. - @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). + result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. It can also be an error code, which can be tested with ZSTD_isError(). @@ -1848,52 +3119,57 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci */ /*===== Buffer-less streaming decompression functions =====*/ -typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e; -typedef struct { - unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ - unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ - unsigned blockSizeMax; - ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ - unsigned headerSize; - unsigned dictID; - unsigned checksumFlag; -} ZSTD_frameHeader; -/*! ZSTD_getFrameHeader() : - * decode Frame Header, or requires larger `srcSize`. - * @return : 0, `zfhPtr` is correctly filled, - * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ -ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ -/*! ZSTD_getFrameHeader_advanced() : - * same as ZSTD_getFrameHeader(), - * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ -ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); -ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ +ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); -ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); -ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); -ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); -ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); +ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* misc */ -ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); +ZSTD_DEPRECATED("This function will likely be removed in the next minor release. It is misleading and has very limited utility.") +ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; -ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); +ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); +/*! ZSTD_isDeterministicBuild() : + * Returns 1 if the library is built using standard compilation flags, + * and participates in determinism guarantees with other builds of the + * same version. + * If this function returns 0, it means the library was compiled with + * non-standard compilation flags that change the output of the + * compressor. + * This is mainly used for Zstd's determinism test suite, which is only + * run when this function returns 1. + */ +ZSTDLIB_API int ZSTD_isDeterministicBuild(void); + -/* ============================ */ -/** Block level API */ -/* ============================ */ +/* ========================================= */ +/** Block level API (DEPRECATED) */ +/* ========================================= */ /*! + + This API is deprecated in favor of the regular compression API. + You can get the frame header down to 2 bytes by setting: + - ZSTD_c_format = ZSTD_f_zstd1_magicless + - ZSTD_c_contentSizeFlag = 0 + - ZSTD_c_checksumFlag = 0 + - ZSTD_c_dictIDFlag = 0 + + This API is not as well tested as our normal API, so we recommend not using it. + We will be removing it in a future version. If the normal API doesn't provide + the functionality you need, please open a GitHub issue. + Block functions produce and decode raw zstd blocks, without frame metadata. - Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes). - User will have to take in charge required information to regenerate data, such as compressed and content sizes. + Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). + But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. A few rules to respect : - Compressing and decompressing require a context structure @@ -1901,29 +3177,33 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); - It is necessary to init context before starting + compression : any ZSTD_compressBegin*() variant, including with dictionary + decompression : any ZSTD_decompressBegin*() variant, including with dictionary - + copyCCtx() and copyDCtx() can be used too - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB + If input is larger than a block size, it's necessary to split input data into multiple blocks - + For inputs larger than a single block, really consider using regular ZSTD_compress() instead. - Frame metadata is not that costly, and quickly becomes negligible as source size grows larger. - - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero. - In which case, nothing is produced into `dst` ! - + User must test for such outcome and deal directly with uncompressed data - + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!! + + For inputs larger than a single block, consider using regular ZSTD_compress() instead. + Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block. + - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) ! + ===> In which case, nothing is produced into `dst` ! + + User __must__ test for such outcome and deal directly with uncompressed data + + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0. + Doing so would mess up with statistics history, leading to potential data corruption. + + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !! + In case of multiple successive blocks, should some of them be uncompressed, decoder must be informed of their existence in order to follow proper history. Use ZSTD_insertBlock() for such a case. */ /*===== Raw zstd block functions =====*/ -ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); -ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ - - -#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") +ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") +ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") +ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") +ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ #if defined (__cplusplus) } #endif + +#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ diff --git a/lib/common/zstd_errors.h b/lib/zstd_errors.h similarity index 70% rename from lib/common/zstd_errors.h rename to lib/zstd_errors.h index 92a3433896c..8ebc95cbb2a 100644 --- a/lib/common/zstd_errors.h +++ b/lib/zstd_errors.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,24 +15,32 @@ extern "C" { #endif -/*===== dependency =====*/ -#include /* size_t */ - - /* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDERRORLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#ifndef ZSTDERRORLIB_VISIBLE + /* Backwards compatibility with old macro name */ +# ifdef ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_VISIBLE ZSTDERRORLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDERRORLIB_VISIBLE __attribute__ ((visibility ("default"))) # else -# define ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_VISIBLE # endif #endif + +#ifndef ZSTDERRORLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDERRORLIB_HIDDEN __attribute__ ((visibility ("hidden"))) +# else +# define ZSTDERRORLIB_HIDDEN +# endif +#endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE #endif /*-********************************************* @@ -58,14 +66,18 @@ typedef enum { ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, + ZSTD_error_literals_headerWrong = 24, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_combination_unsupported = 41, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_cannotProduce_uncompressedBlock = 49, + ZSTD_error_stabilityCondition_notRespected = 50, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, @@ -73,16 +85,18 @@ typedef enum { ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, + ZSTD_error_noForwardProgress_destFull = 80, + ZSTD_error_noForwardProgress_inputEmpty = 82, /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */ ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, + ZSTD_error_dstBuffer_wrong = 104, + ZSTD_error_srcBuffer_wrong = 105, + ZSTD_error_sequenceProducer_failed = 106, + ZSTD_error_externalSequences_invalid = 107, ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ } ZSTD_ErrorCode; -/*! ZSTD_getErrorCode() : - convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, - which can be used to compare with enum list published above */ -ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */ diff --git a/programs/.gitignore b/programs/.gitignore index 701830c777c..42a7e30dc68 100644 --- a/programs/.gitignore +++ b/programs/.gitignore @@ -7,6 +7,10 @@ zstd-decompress zstd-frugal zstd-small zstd-nolegacy +zstd-dictBuilder +zstd-dll +zstd_arm64 +zstd_x64 # Object files *.o @@ -33,4 +37,5 @@ afl # Misc files *.bat +!windres/generate_res.bat dirTest* diff --git a/programs/Makefile b/programs/Makefile index 64dcae0028e..94f2179d028 100644 --- a/programs/Makefile +++ b/programs/Makefile @@ -1,251 +1,295 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the # LICENSE file in the root directory of this source tree) and the GPLv2 (found # in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. # ########################################################################## # zstd : Command Line Utility, supporting gzip-like arguments # zstd32 : Same as zstd, but forced to compile in 32-bits mode -# zstd_nolegacy : zstd without support of decompression of legacy versions +# zstd-nolegacy : zstd without support of decompression of legacy versions # zstd-small : minimal zstd without dictionary builder and benchmark # zstd-compress : compressor-only version of zstd # zstd-decompress : decompressor-only version of zstd # ########################################################################## -ZSTDDIR = ../lib - -# Version numbers -LIBVER_SRC := $(ZSTDDIR)/zstd.h -LIBVER_MAJOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` -LIBVER_MINOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` -LIBVER_PATCH_SCRIPT:=`sed -n '/define ZSTD_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` -LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT) -LIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT)) -LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT)) -LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT)) -LIBVER := $(shell echo $(LIBVER_SCRIPT)) - -ZSTD_VERSION = $(LIBVER) - -HAVE_COLORNEVER = $(shell echo a | grep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) -GREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) -GREP_OPTIONS += --color=never -endif -GREP = grep $(GREP_OPTIONS) +# default target (when running `make` with no argument) +zstd-release: + +LIBZSTD_MK_DIR = ../lib +include $(LIBZSTD_MK_DIR)/libzstd.mk ifeq ($(shell $(CC) -v 2>&1 | $(GREP) -c "gcc version "), 1) -ALIGN_LOOP = -falign-loops=32 + ALIGN_LOOP = -falign-loops=32 else -ALIGN_LOOP = + ALIGN_LOOP = endif -CPPFLAGS+= -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \ - -I$(ZSTDDIR)/dictBuilder \ - -DXXH_NAMESPACE=ZSTD_ -ifeq ($(OS),Windows_NT) # MinGW assumed -CPPFLAGS += -D__USE_MINGW_ANSI_STDIO # compatibility with %zu formatting -endif -CFLAGS ?= -O3 -DEBUGFLAGS+=-Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ - -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ - -Wstrict-prototypes -Wundef -Wpointer-arith \ - -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ - -Wredundant-decls -Wmissing-prototypes -Wc++-compat -CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) -FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) - - -ZSTDCOMMON_FILES := $(ZSTDDIR)/common/*.c -ZSTDCOMP_FILES := $(ZSTDDIR)/compress/*.c -ZSTDDECOMP_FILES := $(ZSTDDIR)/decompress/*.c -ZSTD_FILES := $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES) -ZDICT_FILES := $(ZSTDDIR)/dictBuilder/*.c -ZSTDDECOMP_O = $(ZSTDDIR)/decompress/zstd_decompress.o - -ZSTD_LEGACY_SUPPORT ?= 5 -ZSTDLEGACY_FILES := -ifneq ($(ZSTD_LEGACY_SUPPORT), 0) -ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0) - ZSTDLEGACY_FILES += $(shell ls $(ZSTDDIR)/legacy/*.c | $(GREP) 'v0[$(ZSTD_LEGACY_SUPPORT)-7]') -endif - CPPFLAGS += -I$(ZSTDDIR)/legacy -else -endif +ZSTDLIB_COMMON_SRC := $(sort $(ZSTD_COMMON_FILES)) +ZSTDLIB_COMPRESS_SRC := $(sort $(ZSTD_COMPRESS_FILES)) +ZSTDLIB_DECOMPRESS_SRC := $(sort $(ZSTD_DECOMPRESS_FILES)) +ZSTDLIB_CORE_SRC := $(sort $(ZSTD_DECOMPRESS_FILES) $(ZSTD_COMMON_FILES) $(ZSTD_COMPRESS_FILES)) +ZDICT_SRC := $(sort $(ZSTD_DICTBUILDER_FILES)) +ZSTDLEGACY_SRC := $(sort $(ZSTD_LEGACY_FILES)) # Sort files in alphabetical order for reproducible builds -ZSTDLIB_FILES := $(sort $(wildcard $(ZSTD_FILES)) $(wildcard $(ZSTDLEGACY_FILES)) $(wildcard $(ZDICT_FILES))) +ZSTDLIB_FULL_SRC = $(sort $(ZSTDLIB_CORE_SRC) $(ZSTDLEGACY_SRC) $(ZDICT_SRC)) +ZSTDLIB_LOCAL_SRC = $(notdir $(ZSTDLIB_FULL_SRC)) +ZSTDLIB_LOCAL_OBJ0 := $(ZSTDLIB_LOCAL_SRC:.c=.o) +ZSTDLIB_LOCAL_OBJ := $(ZSTDLIB_LOCAL_OBJ0:.S=.o) + +ZSTD_CLI_SRC := $(sort $(wildcard *.c)) +ZSTD_CLI_OBJ := $(ZSTD_CLI_SRC:.c=.o) + +ZSTD_ALL_SRC = $(ZSTDLIB_LOCAL_SRC) $(ZSTD_CLI_SRC) +ZSTD_ALL_OBJ0 := $(ZSTD_ALL_SRC:.c=.o) +ZSTD_ALL_OBJ := $(ZSTD_ALL_OBJ0:.S=.o) # Define *.exe as extension for Windows systems ifneq (,$(filter Windows%,$(OS))) -EXT =.exe -RES64_FILE = windres/zstd64.res -RES32_FILE = windres/zstd32.res + EXT =.exe + RES64_FILE = windres/zstd64.res + RES32_FILE = windres/zstd32.res ifneq (,$(filter x86_64%,$(shell $(CC) -dumpmachine))) RES_FILE = $(RES64_FILE) else RES_FILE = $(RES32_FILE) endif else -EXT = + EXT = endif -VOID = /dev/null - # thread detection NO_THREAD_MSG := ==> no threads, building without multithreading support -HAVE_PTHREAD := $(shell printf '\#include \nint main(void) { return 0; }' > have_pthread.c && $(CC) $(FLAGS) -o have_pthread$(EXT) have_pthread.c -pthread 2> $(VOID) && rm have_pthread$(EXT) && echo 1 || echo 0; rm have_pthread.c) +HAVE_PTHREAD := $(shell printf '$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_pthread.c && $(CC) $(FLAGS) -o have_pthread$(EXT) have_pthread.c -pthread 2> $(VOID) && rm have_pthread$(EXT) && echo 1 || echo 0; rm have_pthread.c) HAVE_THREAD := $(shell [ "$(HAVE_PTHREAD)" -eq "1" -o -n "$(filter Windows%,$(OS))" ] && echo 1 || echo 0) ifeq ($(HAVE_THREAD), 1) -THREAD_MSG := ==> building with threading support -THREAD_CPP := -DZSTD_MULTITHREAD -THREAD_LD := -pthread + THREAD_MSG := ==> building with threading support + THREAD_CPP := -DZSTD_MULTITHREAD + THREAD_LD := -pthread else -THREAD_MSG := $(NO_THREAD_MSG) + THREAD_MSG := $(NO_THREAD_MSG) endif # zlib detection NO_ZLIB_MSG := ==> no zlib, building zstd without .gz support -HAVE_ZLIB := $(shell printf '\#include \nint main(void) { return 0; }' > have_zlib.c && $(CC) $(FLAGS) -o have_zlib$(EXT) have_zlib.c -lz 2> $(VOID) && rm have_zlib$(EXT) && echo 1 || echo 0; rm have_zlib.c) +HAVE_ZLIB ?= $(shell printf '$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_zlib.c && $(CC) $(FLAGS) -o have_zlib$(EXT) have_zlib.c -lz 2> $(VOID) && rm have_zlib$(EXT) && echo 1 || echo 0; rm have_zlib.c) ifeq ($(HAVE_ZLIB), 1) -ZLIB_MSG := ==> building zstd with .gz compression support -ZLIBCPP = -DZSTD_GZCOMPRESS -DZSTD_GZDECOMPRESS -ZLIBLD = -lz + ZLIB_MSG := ==> building zstd with .gz compression support + ZLIBCPP = -DZSTD_GZCOMPRESS -DZSTD_GZDECOMPRESS + ZLIBLD = -lz else -ZLIB_MSG := $(NO_ZLIB_MSG) + ZLIB_MSG := $(NO_ZLIB_MSG) endif # lzma detection NO_LZMA_MSG := ==> no liblzma, building zstd without .xz/.lzma support -HAVE_LZMA := $(shell printf '\#include \nint main(void) { return 0; }' > have_lzma.c && $(CC) $(FLAGS) -o have_lzma$(EXT) have_lzma.c -llzma 2> $(VOID) && rm have_lzma$(EXT) && echo 1 || echo 0; rm have_lzma.c) +HAVE_LZMA ?= $(shell printf '$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_lzma.c && $(CC) $(FLAGS) -o have_lzma$(EXT) have_lzma.c -llzma 2> $(VOID) && rm have_lzma$(EXT) && echo 1 || echo 0; rm have_lzma.c) ifeq ($(HAVE_LZMA), 1) -LZMA_MSG := ==> building zstd with .xz/.lzma compression support -LZMACPP = -DZSTD_LZMACOMPRESS -DZSTD_LZMADECOMPRESS -LZMALD = -llzma + LZMA_MSG := ==> building zstd with .xz/.lzma compression support + LZMACPP = -DZSTD_LZMACOMPRESS -DZSTD_LZMADECOMPRESS + LZMALD = -llzma else -LZMA_MSG := $(NO_LZMA_MSG) + LZMA_MSG := $(NO_LZMA_MSG) endif # lz4 detection NO_LZ4_MSG := ==> no liblz4, building zstd without .lz4 support -HAVE_LZ4 := $(shell printf '\#include \n\#include \nint main(void) { return 0; }' > have_lz4.c && $(CC) $(FLAGS) -o have_lz4$(EXT) have_lz4.c -llz4 2> $(VOID) && rm have_lz4$(EXT) && echo 1 || echo 0; rm have_lz4.c) +HAVE_LZ4 ?= $(shell printf '$(NUM_SYMBOL)include \n$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_lz4.c && $(CC) $(FLAGS) -o have_lz4$(EXT) have_lz4.c -llz4 2> $(VOID) && rm have_lz4$(EXT) && echo 1 || echo 0; rm have_lz4.c) ifeq ($(HAVE_LZ4), 1) -LZ4_MSG := ==> building zstd with .lz4 compression support -LZ4CPP = -DZSTD_LZ4COMPRESS -DZSTD_LZ4DECOMPRESS -LZ4LD = -llz4 + LZ4_MSG := ==> building zstd with .lz4 compression support + LZ4CPP = -DZSTD_LZ4COMPRESS -DZSTD_LZ4DECOMPRESS + LZ4LD = -llz4 else -LZ4_MSG := $(NO_LZ4_MSG) + LZ4_MSG := $(NO_LZ4_MSG) endif # explicit backtrace enable/disable for Linux & Darwin ifeq ($(BACKTRACE), 0) -DEBUGFLAGS += -DBACKTRACE_ENABLE=0 + DEBUGFLAGS += -DBACKTRACE_ENABLE=0 endif ifeq (,$(filter Windows%, $(OS))) ifeq ($(BACKTRACE), 1) -DEBUGFLAGS += -DBACKTRACE_ENABLE=1 -DEBUGFLAGS_LD += -rdynamic + DEBUGFLAGS += -DBACKTRACE_ENABLE=1 + DEBUGFLAGS_LD += -rdynamic endif endif +SET_CACHE_DIRECTORY = \ + +$(MAKE) --no-print-directory $@ \ + BUILD_DIR=obj/$(HASH_DIR) \ + CPPFLAGS="$(CPPFLAGS)" \ + CFLAGS="$(CFLAGS)" \ + LDFLAGS="$(LDFLAGS)" \ + LDLIBS="$(LDLIBS)" \ + ZSTD_ALL_SRC="$(ZSTD_ALL_SRC)" -.PHONY: default -default: zstd-release .PHONY: all -all: zstd +all: zstd zstd-compress zstd-decompress zstd-small .PHONY: allVariants -allVariants: zstd zstd-compress zstd-decompress zstd-small zstd-nolegacy - -$(ZSTDDECOMP_O): CFLAGS += $(ALIGN_LOOP) +allVariants: all zstd-frugal zstd-nolegacy zstd-dictBuilder +.PHONY: zstd # must always be run zstd : CPPFLAGS += $(THREAD_CPP) $(ZLIBCPP) $(LZMACPP) $(LZ4CPP) -zstd : LDFLAGS += $(THREAD_LD) $(ZLIBLD) $(LZMALD) $(LZ4LD) $(DEBUGFLAGS_LD) +zstd : LDFLAGS += $(THREAD_LD) $(DEBUGFLAGS_LD) +zstd : LDLIBS += $(ZLIBLD) $(LZMALD) $(LZ4LD) zstd : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) -zstd : $(ZSTDLIB_FILES) zstdcli.o util.o timefn.o fileio.o benchfn.o benchzstd.o datagen.o dibio.o +ifneq (,$(filter Windows%,$(OS))) +zstd : $(RES_FILE) +endif + +ifndef BUILD_DIR +# generate BUILD_DIR from flags + +zstd: + $(SET_CACHE_DIRECTORY) + +else +# BUILD_DIR is defined + +ZSTD_OBJ := $(addprefix $(BUILD_DIR)/, $(ZSTD_ALL_OBJ)) +$(BUILD_DIR)/zstd : $(ZSTD_OBJ) @echo "$(THREAD_MSG)" @echo "$(ZLIB_MSG)" @echo "$(LZMA_MSG)" @echo "$(LZ4_MSG)" -ifneq (,$(filter Windows%,$(OS))) - windres/generate_res.bat + @echo LINK $@ + $(CC) $(FLAGS) $^ $(LDLIBS) -o $@$(EXT) + +ifeq ($(HAVE_HASH),1) +SRCBIN_HASH = $(shell cat $(BUILD_DIR)/zstd$(EXT) 2> $(VOID) | $(HASH) | cut -f 1 -d " ") +DSTBIN_HASH = $(shell cat zstd$(EXT) 2> $(VOID) | $(HASH) | cut -f 1 -d " ") +BIN_ISDIFFERENT = $(if $(filter $(SRCBIN_HASH),$(DSTBIN_HASH)),0,1) +else +BIN_ISDIFFERENT = 1 endif - $(CC) $(FLAGS) $^ $(RES_FILE) -o $@$(EXT) $(LDFLAGS) +zstd : $(BUILD_DIR)/zstd + if [ $(BIN_ISDIFFERENT) -eq 1 ]; then \ + $(CP) $<$(EXT) $@$(EXT); \ + echo zstd build completed; \ + else \ + echo zstd already built; \ + fi + +endif # BUILD_DIR + + +CLEAN += zstd .PHONY: zstd-release zstd-release: DEBUGFLAGS := -DBACKTRACE_ENABLE=0 zstd-release: DEBUGFLAGS_LD := zstd-release: zstd +CLEAN += zstd32 zstd32 : CPPFLAGS += $(THREAD_CPP) zstd32 : LDFLAGS += $(THREAD_LD) zstd32 : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) -zstd32 : $(ZSTDLIB_FILES) zstdcli.c util.c timefn.c fileio.c benchfn.c benchzstd.c datagen.c dibio.c ifneq (,$(filter Windows%,$(OS))) - windres/generate_res.bat +zstd32 : $(RES32_FILE) endif - $(CC) -m32 $(FLAGS) $^ $(RES32_FILE) -o $@$(EXT) - -zstd-nolegacy : $(ZSTD_FILES) $(ZDICT_FILES) zstdcli.o util.o fileio.c benchfn.o benchzstd.o timefn.o datagen.o dibio.o +zstd32 : $(ZSTDLIB_FULL_SRC) $(ZSTD_CLI_SRC) + $(CC) -m32 $(FLAGS) $^ -o $@$(EXT) + +## zstd-nolegacy: same scope as zstd, with removed support of legacy formats +CLEAN += zstd-nolegacy +zstd-nolegacy : LDFLAGS += $(THREAD_LD) $(ZLIBLD) $(LZMALD) $(LZ4LD) $(DEBUGFLAGS_LD) +zstd-nolegacy : CPPFLAGS += -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 +zstd-nolegacy : $(ZSTDLIB_CORE_SRC) $(ZDICT_SRC) $(ZSTD_CLI_OBJ) $(CC) $(FLAGS) $^ -o $@$(EXT) $(LDFLAGS) +.PHONY: zstd-nomt zstd-nomt : THREAD_CPP := zstd-nomt : THREAD_LD := zstd-nomt : THREAD_MSG := - multi-threading disabled zstd-nomt : zstd +.PHONY: zstd-nogz zstd-nogz : ZLIBCPP := zstd-nogz : ZLIBLD := zstd-nogz : ZLIB_MSG := - gzip support is disabled zstd-nogz : zstd +.PHONY: zstd-noxz zstd-noxz : LZMACPP := zstd-noxz : LZMALD := zstd-noxz : LZMA_MSG := - xz/lzma support is disabled zstd-noxz : zstd +## zstd-dll: zstd executable linked to dynamic library libzstd (must have same version) +.PHONY: zstd-dll +zstd-dll : LDFLAGS+= -L$(LIB_BINDIR) +zstd-dll : LDLIBS += -lzstd +zstd-dll : ZSTDLIB_LOCAL_SRC = xxhash.c pool.c threading.c +zstd-dll : zstd + +## zstd-pgo: zstd executable optimized with PGO. +.PHONY: zstd-pgo +zstd-pgo : LLVM_PROFDATA?=llvm-profdata +zstd-pgo : PROF_GENERATE_FLAGS=-fprofile-generate $(if $(findstring gcc,$(CC)),-fprofile-dir=.) +zstd-pgo : PROF_USE_FLAGS=-fprofile-use $(if $(findstring gcc,$(CC)),-fprofile-dir=. -Wno-error=missing-profile -Wno-error=coverage-mismatch) zstd-pgo : - $(MAKE) clean - $(MAKE) zstd MOREFLAGS=-fprofile-generate + $(MAKE) clean HASH_DIR=$(HASH_DIR) + $(MAKE) zstd HASH_DIR=$(HASH_DIR) MOREFLAGS="$(PROF_GENERATE_FLAGS)" ./zstd -b19i1 $(PROFILE_WITH) ./zstd -b16i1 $(PROFILE_WITH) ./zstd -b9i2 $(PROFILE_WITH) ./zstd -b $(PROFILE_WITH) ./zstd -b7i2 $(PROFILE_WITH) ./zstd -b5 $(PROFILE_WITH) - $(RM) zstd *.o $(ZSTDDECOMP_O) $(ZSTDDIR)/compress/*.o - $(MAKE) zstd MOREFLAGS=-fprofile-use +ifndef BUILD_DIR + $(RM) zstd obj/$(HASH_DIR)/zstd obj/$(HASH_DIR)/*.o +else + $(RM) zstd $(BUILD_DIR)/zstd $(BUILD_DIR)/*.o +endif + case $(CC) in *clang*) if ! [ -e default.profdata ]; then $(LLVM_PROFDATA) merge -output=default.profdata default*.profraw; fi ;; esac + $(MAKE) zstd HASH_DIR=$(HASH_DIR) MOREFLAGS="$(PROF_USE_FLAGS)" + +## zstd-small: minimal target, supporting only zstd compression and decompression. no bench. no legacy. no other format. +CLEAN += zstd-small zstd-frugal +zstd-small: CFLAGS = -Os -Wl,-s +zstd-frugal zstd-small: $(ZSTDLIB_CORE_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c + $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) -# minimal target, with only zstd compression and decompression. no bench. no legacy. -zstd-small: CFLAGS = -Os -s -zstd-frugal zstd-small: $(ZSTD_FILES) zstdcli.c util.c timefn.c fileio.c - $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT $^ -o $@$(EXT) +CLEAN += zstd-decompress +zstd-decompress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_DECOMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c + $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOCOMPRESS -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) -zstd-decompress: $(ZSTDCOMMON_FILES) $(ZSTDDECOMP_FILES) zstdcli.c util.c timefn.c fileio.c - $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOCOMPRESS $^ -o $@$(EXT) +CLEAN += zstd-compress +zstd-compress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c + $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NODECOMPRESS -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) -zstd-compress: $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES) zstdcli.c util.c timefn.c fileio.c - $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NODECOMPRESS $^ -o $@$(EXT) +## zstd-dictBuilder: executable supporting dictionary creation and compression (only) +CLEAN += zstd-dictBuilder +zstd-dictBuilder: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) $(ZDICT_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c dibio.c + $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODECOMPRESS -DZSTD_NOTRACE $^ -o $@$(EXT) +CLEAN += zstdmt zstdmt: zstd - ln -sf zstd zstdmt + $(LN) -sf zstd zstdmt .PHONY: generate_res -generate_res: - windres/generate_res.bat +generate_res: $(RES64_FILE) $(RES32_FILE) + +ifneq (,$(filter Windows%,$(OS))) +RC ?= windres +# https://stackoverflow.com/questions/708238/how-do-i-add-an-icon-to-a-mingw-gcc-compiled-executable +$(RES64_FILE): windres/zstd.rc + $(RC) -o $@ -I ../lib -I windres -i $< -O coff -F pe-x86-64 +$(RES32_FILE): windres/zstd.rc + $(RC) -o $@ -I ../lib -I windres -i $< -O coff -F pe-i386 +endif .PHONY: clean clean: - $(MAKE) -C $(ZSTDDIR) clean - @$(RM) $(ZSTDDIR)/decompress/*.o $(ZSTDDIR)/decompress/zstd_decompress.gcda - @$(RM) core *.o tmp* result* *.gcda dictionary *.zst \ - zstd$(EXT) zstd32$(EXT) zstd-compress$(EXT) zstd-decompress$(EXT) \ - zstd-small$(EXT) zstd-frugal$(EXT) zstd-nolegacy$(EXT) zstd4$(EXT) \ - *.gcda default.profraw have_zlib$(EXT) + $(RM) $(CLEAN) core *.o tmp* result* dictionary *.zst \ + *.gcda default*.profraw default.profdata have_zlib + $(RM) -r obj/* @echo Cleaning completed MD2ROFF = ronn @@ -275,17 +319,41 @@ preview-man: clean-man man man ./zstdgrep.1 man ./zstdless.1 + +# Generate .h dependencies automatically + +DEPFLAGS = -MT $@ -MMD -MP -MF + +$(BUILD_DIR)/%.o : %.c $(BUILD_DIR)/%.d | $(BUILD_DIR) + @echo CC $@ + $(COMPILE.c) $(DEPFLAGS) $(BUILD_DIR)/$*.d $(OUTPUT_OPTION) $< + +$(BUILD_DIR)/%.o : %.S | $(BUILD_DIR) + @echo AS $@ + $(COMPILE.S) $(OUTPUT_OPTION) $< + +MKDIR ?= mkdir +$(BUILD_DIR): ; $(MKDIR) -p $@ + +DEPFILES := $(ZSTD_OBJ:.o=.d) +$(DEPFILES): + +include $(wildcard $(DEPFILES)) + + + #----------------------------------------------------------------------------- # make install is validated only for Linux, macOS, BSD, Hurd and Solaris targets #----------------------------------------------------------------------------- -ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku)) +ifneq (,$(filter $(INSTALL_OS_LIST),$(UNAME))) HAVE_COLORNEVER = $(shell echo a | egrep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) EGREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) -EGREP_OPTIONS += --color=never +ifeq ($(HAVE_COLORNEVER), 1) + EGREP_OPTIONS += --color=never endif EGREP = egrep $(EGREP_OPTIONS) +AWK = awk # Print a two column output of targets and their description. To add a target description, put a # comment in the Makefile with the format "## : ". For example: @@ -293,15 +361,15 @@ EGREP = egrep $(EGREP_OPTIONS) ## list: Print all targets and their descriptions (if provided) .PHONY: list list: - @TARGETS=$$($(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null \ - | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' \ + TARGETS=$$($(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null \ + | $(AWK) -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' \ | $(EGREP) -v -e '^[^[:alnum:]]' | sort); \ { \ printf "Target Name\tDescription\n"; \ printf "%0.s-" {1..16}; printf "\t"; printf "%0.s-" {1..40}; printf "\n"; \ for target in $$TARGETS; do \ line=$$($(EGREP) "^##[[:space:]]+$$target:" $(lastword $(MAKEFILE_LIST))); \ - description=$$(echo $$line | awk '{i=index($$0,":"); print substr($$0,i+1)}' | xargs); \ + description=$$(echo $$line | $(AWK) '{i=index($$0,":"); print substr($$0,i+1)}' | xargs); \ printf "$$target\t$$description\n"; \ done \ } | column -t -s $$'\t' @@ -320,17 +388,17 @@ datarootdir ?= $(PREFIX)/share mandir ?= $(datarootdir)/man man1dir ?= $(mandir)/man1 -ifneq (,$(filter $(shell uname),OpenBSD FreeBSD NetBSD DragonFly SunOS)) -MANDIR ?= $(PREFIX)/man -MAN1DIR ?= $(MANDIR)/man1 +ifneq (,$(filter OpenBSD NetBSD DragonFly SunOS,$(UNAME))) + MANDIR ?= $(PREFIX)/man + MAN1DIR ?= $(MANDIR)/man1 else -MAN1DIR ?= $(man1dir) + MAN1DIR ?= $(man1dir) endif -ifneq (,$(filter $(shell uname),SunOS)) -INSTALL ?= ginstall +ifneq (,$(filter SunOS,$(UNAME))) + INSTALL ?= ginstall else -INSTALL ?= install + INSTALL ?= install endif INSTALL_PROGRAM ?= $(INSTALL) @@ -339,36 +407,39 @@ INSTALL_DATA ?= $(INSTALL) -m 644 INSTALL_MAN ?= $(INSTALL_DATA) .PHONY: install -install: zstd +install: + # generate zstd only if not already present + [ -e zstd ] || $(MAKE) zstd-release + [ -e $(DESTDIR)$(BINDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(BINDIR)/ + [ -e $(DESTDIR)$(MAN1DIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(MAN1DIR)/ @echo Installing binaries - @$(INSTALL) -d -m 755 $(DESTDIR)$(BINDIR)/ $(DESTDIR)$(MAN1DIR)/ - @$(INSTALL_PROGRAM) zstd $(DESTDIR)$(BINDIR)/zstd - @ln -sf zstd $(DESTDIR)$(BINDIR)/zstdcat - @ln -sf zstd $(DESTDIR)$(BINDIR)/unzstd - @ln -sf zstd $(DESTDIR)$(BINDIR)/zstdmt - @$(INSTALL_SCRIPT) zstdless $(DESTDIR)$(BINDIR)/zstdless - @$(INSTALL_SCRIPT) zstdgrep $(DESTDIR)$(BINDIR)/zstdgrep + $(INSTALL_PROGRAM) zstd$(EXT) $(DESTDIR)$(BINDIR)/zstd$(EXT) + $(LN) -sf zstd$(EXT) $(DESTDIR)$(BINDIR)/zstdcat$(EXT) + $(LN) -sf zstd$(EXT) $(DESTDIR)$(BINDIR)/unzstd$(EXT) + $(LN) -sf zstd$(EXT) $(DESTDIR)$(BINDIR)/zstdmt$(EXT) + $(INSTALL_SCRIPT) zstdless $(DESTDIR)$(BINDIR)/zstdless + $(INSTALL_SCRIPT) zstdgrep $(DESTDIR)$(BINDIR)/zstdgrep @echo Installing man pages - @$(INSTALL_MAN) zstd.1 $(DESTDIR)$(MAN1DIR)/zstd.1 - @ln -sf zstd.1 $(DESTDIR)$(MAN1DIR)/zstdcat.1 - @ln -sf zstd.1 $(DESTDIR)$(MAN1DIR)/unzstd.1 - @$(INSTALL_MAN) zstdgrep.1 $(DESTDIR)$(MAN1DIR)/zstdgrep.1 - @$(INSTALL_MAN) zstdless.1 $(DESTDIR)$(MAN1DIR)/zstdless.1 + $(INSTALL_MAN) zstd.1 $(DESTDIR)$(MAN1DIR)/zstd.1 + $(LN) -sf zstd.1 $(DESTDIR)$(MAN1DIR)/zstdcat.1 + $(LN) -sf zstd.1 $(DESTDIR)$(MAN1DIR)/unzstd.1 + $(INSTALL_MAN) zstdgrep.1 $(DESTDIR)$(MAN1DIR)/zstdgrep.1 + $(INSTALL_MAN) zstdless.1 $(DESTDIR)$(MAN1DIR)/zstdless.1 @echo zstd installation completed .PHONY: uninstall uninstall: - @$(RM) $(DESTDIR)$(BINDIR)/zstdgrep - @$(RM) $(DESTDIR)$(BINDIR)/zstdless - @$(RM) $(DESTDIR)$(BINDIR)/zstdcat - @$(RM) $(DESTDIR)$(BINDIR)/unzstd - @$(RM) $(DESTDIR)$(BINDIR)/zstdmt - @$(RM) $(DESTDIR)$(BINDIR)/zstd - @$(RM) $(DESTDIR)$(MAN1DIR)/zstdless.1 - @$(RM) $(DESTDIR)$(MAN1DIR)/zstdgrep.1 - @$(RM) $(DESTDIR)$(MAN1DIR)/zstdcat.1 - @$(RM) $(DESTDIR)$(MAN1DIR)/unzstd.1 - @$(RM) $(DESTDIR)$(MAN1DIR)/zstd.1 + $(RM) $(DESTDIR)$(BINDIR)/zstdgrep + $(RM) $(DESTDIR)$(BINDIR)/zstdless + $(RM) $(DESTDIR)$(BINDIR)/zstdcat + $(RM) $(DESTDIR)$(BINDIR)/unzstd + $(RM) $(DESTDIR)$(BINDIR)/zstdmt + $(RM) $(DESTDIR)$(BINDIR)/zstd + $(RM) $(DESTDIR)$(MAN1DIR)/zstdless.1 + $(RM) $(DESTDIR)$(MAN1DIR)/zstdgrep.1 + $(RM) $(DESTDIR)$(MAN1DIR)/zstdcat.1 + $(RM) $(DESTDIR)$(MAN1DIR)/unzstd.1 + $(RM) $(DESTDIR)$(MAN1DIR)/zstd.1 @echo zstd programs successfully uninstalled endif diff --git a/programs/README.md b/programs/README.md index d9ef5dd2a62..2c2e94dc612 100644 --- a/programs/README.md +++ b/programs/README.md @@ -3,14 +3,14 @@ Command Line Interface for Zstandard library Command Line Interface (CLI) can be created using the `make` command without any additional parameters. There are however other Makefile targets that create different variations of CLI: -- `zstd` : default CLI supporting gzip-like arguments; includes dictionary builder, benchmark, and support for decompression of legacy zstd formats +- `zstd` : default CLI supporting gzip-like arguments; includes dictionary builder, benchmark, and supports decompression of legacy zstd formats - `zstd_nolegacy` : Same as `zstd` but without support for legacy zstd formats - `zstd-small` : CLI optimized for minimal size; no dictionary builder, no benchmark, and no support for legacy zstd formats - `zstd-compress` : version of CLI which can only compress into zstd format - `zstd-decompress` : version of CLI which can only decompress zstd format -#### Compilation variables +### Compilation variables `zstd` scope can be altered by modifying the following `make` variables : - __HAVE_THREAD__ : multithreading is automatically enabled when `pthread` is detected. @@ -61,6 +61,24 @@ There are however other Makefile targets that create different variations of CLI In which case, linking stage will fail if `lz4` library cannot be found. This is useful to prevent silent feature disabling. +- __ZSTD_NOBENCH__ : `zstd` cli will be compiled without its integrated benchmark module. + This can be useful to produce smaller binaries. + In this case, the corresponding unit can also be excluded from compilation target. + +- __ZSTD_NODICT__ : `zstd` cli will be compiled without support for the integrated dictionary builder. + This can be useful to produce smaller binaries. + In this case, the corresponding unit can also be excluded from compilation target. + +- __ZSTD_NOCOMPRESS__ : `zstd` cli will be compiled without support for compression. + The resulting binary will only be able to decompress files. + This can be useful to produce smaller binaries. + A corresponding `Makefile` target using this ability is `zstd-decompress`. + +- __ZSTD_NODECOMPRESS__ : `zstd` cli will be compiled without support for decompression. + The resulting binary will only be able to compress files. + This can be useful to produce smaller binaries. + A corresponding `Makefile` target using this ability is `zstd-compress`. + - __BACKTRACE__ : `zstd` can display a stack backtrace when execution generates a runtime exception. By default, this feature may be degraded/disabled on some platforms unless additional compiler directives are @@ -69,11 +87,11 @@ There are however other Makefile targets that create different variations of CLI Example : `make zstd BACKTRACE=1` -#### Aggregation of parameters +### Aggregation of parameters CLI supports aggregation of parameters i.e. `-b1`, `-e18`, and `-i1` can be joined into `-b1e18i1`. -#### Symlink shortcuts +### Symlink shortcuts It's possible to invoke `zstd` through a symlink. When the name of the symlink has a specific value, it triggers an associated behavior. - `zstdmt` : compress using all cores available on local system. @@ -86,7 +104,7 @@ When the name of the symlink has a specific value, it triggers an associated beh - `ungz`, `unxz` and `unlzma` will do the same, and will also remove source file by default (use `--keep` to preserve). -#### Dictionary builder in Command Line Interface +### Dictionary builder in Command Line Interface Zstd offers a training mode, which can be used to tune the algorithm for a selected type of data, by providing it with a few samples. The result of the training is stored in a file selected with the `-o` option (default name is `dictionary`), @@ -106,79 +124,161 @@ Usage of the dictionary builder and created dictionaries with CLI: 3. Decompress with the dictionary: `zstd --decompress FILE.zst -D dictionaryName` -#### Benchmark in Command Line Interface +### Benchmark in Command Line Interface CLI includes in-memory compression benchmark module for zstd. The benchmark is conducted using given filenames. The files are read into memory and joined together. It makes benchmark more precise as it eliminates I/O overhead. Multiple filenames can be supplied, as multiple parameters, with wildcards, -or names of directories can be used as parameters with `-r` option. +or directory names can be used with `-r` option. +If no file is provided, the benchmark will use a procedurally generated "lorem ipsum" content. The benchmark measures ratio, compressed size, compression and decompression speed. One can select compression levels starting from `-b` and ending with `-e`. The `-i` parameter selects minimal time used for each of tested levels. +The benchmark can also be used to test specific parameters, +such as number of threads (`-T#`), or advanced parameters (`--zstd=#`), or dictionary compression (`-D DICTIONARY`), +and many others available on command for regular compression and decompression. + -#### Usage of Command Line Interface +### Usage of Command Line Interface The full list of options can be obtained with `-h` or `-H` parameter: ``` -Usage : - zstd [args] [FILE(s)] [-o file] - -FILE : a filename - with no FILE, or when FILE is - , read standard input -Arguments : - -# : # compression level (1-19, default: 3) - -d : decompression - -D file: use `file` as Dictionary - -o file: result stored into `file` (only if 1 input file) - -f : overwrite output without prompting and (de)compress links ---rm : remove source file(s) after successful de/compression - -k : preserve source file(s) (default) - -h/-H : display help/long help and exit - -Advanced arguments : - -V : display Version number and exit - -v : verbose mode; specify multiple times to increase verbosity - -q : suppress warnings; specify twice to suppress errors too - -c : force write to standard output, even if it is the console - -l : print information about zstd compressed files ---ultra : enable levels beyond 19, up to 22 (requires more memory) ---long : enable long distance matching (requires more memory) ---no-dictID : don't write dictID into header (dictionary compression) ---[no-]check : integrity check (default: enabled) - -r : operate recursively on directories ---format=gzip : compress files to the .gz format ---format=xz : compress files to the .xz format ---format=lzma : compress files to the .lzma format ---test : test compressed file integrity ---[no-]sparse : sparse mode (default: disabled) - -M# : Set a memory usage limit for decompression --- : All arguments after "--" are treated as files - -Dictionary builder : ---train ## : create a dictionary from a training set of files ---train-cover[=k=#,d=#,steps=#,split=#] : use the cover algorithm with optional args ---train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#] : use the fastcover algorithm with optional args ---train-legacy[=s=#] : use the legacy algorithm with selectivity (default: 9) - -o file : `file` is dictionary name (default: dictionary) ---maxdict=# : limit dictionary to specified size (default: 112640) ---dictID=# : force dictionary ID to specified value (default: random) - -Benchmark arguments : - -b# : benchmark file(s), using # compression level (default: 3) - -e# : test all compression levels from -bX to # (default: 1) - -i# : minimum evaluation time in seconds (default: 3s) - -B# : cut file into independent blocks of size # (default: no block) ---priority=rt : set process priority to real-time +*** Zstandard CLI (64-bit) v1.5.6, by Yann Collet *** + +Compress or decompress the INPUT file(s); reads from STDIN if INPUT is `-` or not provided. + +Usage: zstd [OPTIONS...] [INPUT... | -] [-o OUTPUT] + +Options: + -o OUTPUT Write output to a single file, OUTPUT. + -k, --keep Preserve INPUT file(s). [Default] + --rm Remove INPUT file(s) after successful (de)compression to file. + + -# Desired compression level, where `#` is a number between 1 and 19; + lower numbers provide faster compression, higher numbers yield + better compression ratios. [Default: 3] + + -d, --decompress Perform decompression. + -D DICT Use DICT as the dictionary for compression or decompression. + + -f, --force Disable input and output checks. Allows overwriting existing files, + receiving input from the console, printing output to STDOUT, and + operating on links, block devices, etc. Unrecognized formats will be + passed-through through as-is. + + -h Display short usage and exit. + -H, --help Display full help and exit. + -V, --version Display the program version and exit. + +Advanced options: + -c, --stdout Write to STDOUT (even if it is a console) and keep the INPUT file(s). + + -v, --verbose Enable verbose output; pass multiple times to increase verbosity. + -q, --quiet Suppress warnings; pass twice to suppress errors. + --trace LOG Log tracing information to LOG. + + --[no-]progress Forcibly show/hide the progress counter. NOTE: Any (de)compressed + output to terminal will mix with progress counter text. + + -r Operate recursively on directories. + --filelist LIST Read a list of files to operate on from LIST. + --output-dir-flat DIR Store processed files in DIR. + --output-dir-mirror DIR Store processed files in DIR, respecting original directory structure. + --[no-]asyncio Use asynchronous IO. [Default: Enabled] + + --[no-]check Add XXH64 integrity checksums during compression. [Default: Add, Validate] + If `-d` is present, ignore/validate checksums during decompression. + + -- Treat remaining arguments after `--` as files. + +Advanced compression options: + --ultra Enable levels beyond 19, up to 22; requires more memory. + --fast[=#] Use to very fast compression levels. [Default: 1] + --adapt Dynamically adapt compression level to I/O conditions. + --long[=#] Enable long distance matching with window log #. [Default: 27] + --patch-from=REF Use REF as the reference point for Zstandard's diff engine. + + -T# Spawn # compression threads. [Default: 1; pass 0 for core count.] + --single-thread Share a single thread for I/O and compression (slightly different than `-T1`). + --auto-threads={physical|logical} + Use physical/logical cores when using `-T0`. [Default: Physical] + + -B# Set job size to #. [Default: 0 (automatic)] + --rsyncable Compress using a rsync-friendly method (`-B` sets block size). + + --exclude-compressed Only compress files that are not already compressed. + + --stream-size=# Specify size of streaming input from STDIN. + --size-hint=# Optimize compression parameters for streaming input of approximately size #. + --target-compressed-block-size=# + Generate compressed blocks of approximately # size. + + --no-dictID Don't write `dictID` into the header (dictionary compression only). + --[no-]compress-literals Force (un)compressed literals. + --[no-]row-match-finder Explicitly enable/disable the fast, row-based matchfinder for + the 'greedy', 'lazy', and 'lazy2' strategies. + + --format=zstd Compress files to the `.zst` format. [Default] + --[no-]mmap-dict Memory-map dictionary file rather than mallocing and loading all at once + --format=gzip Compress files to the `.gz` format. + --format=xz Compress files to the `.xz` format. + --format=lzma Compress files to the `.lzma` format. + --format=lz4 Compress files to the `.lz4` format. + +Advanced decompression options: + -l Print information about Zstandard-compressed files. + --test Test compressed file integrity. + -M# Set the memory usage limit to # megabytes. + --[no-]sparse Enable sparse mode. [Default: Enabled for files, disabled for STDOUT.] + --[no-]pass-through Pass through uncompressed files as-is. [Default: Disabled] + +Dictionary builder: + --train Create a dictionary from a training set of files. + + --train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]] + Use the cover algorithm (with optional arguments). + --train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]] + Use the fast cover algorithm (with optional arguments). + + --train-legacy[=s=#] Use the legacy algorithm with selectivity #. [Default: 9] + -o NAME Use NAME as dictionary name. [Default: dictionary] + --maxdict=# Limit dictionary to specified size #. [Default: 112640] + --dictID=# Force dictionary ID to #. [Default: Random] + +Benchmark options: + -b# Perform benchmarking with compression level #. [Default: 3] + -e# Test all compression levels up to #; starting level is `-b#`. [Default: 1] + -i# Set the minimum evaluation to time # seconds. [Default: 3] + -B# Cut file into independent chunks of size #. [Default: No chunking] + -S Output one benchmark result per input file. [Default: Consolidated result] + -D dictionary Benchmark using dictionary + --priority=rt Set process priority to real-time. ``` -#### Restricted usage of Environment Variables -Using environment variables to set compression/decompression parameters has security implications. Therefore, -we intentionally restrict its usage. Currently, only `ZSTD_CLEVEL` is supported for setting compression level. -If the value of `ZSTD_CLEVEL` is not a valid integer, it will be ignored with a warning message. -Note that command line options will override corresponding environment variable settings. +### Passing parameters through Environment Variables +There is no "generic" way to pass "any kind of parameter" to `zstd` in a pass-through manner. +Using environment variables for this purpose has security implications. +Therefore, this avenue is intentionally restricted and only supports `ZSTD_CLEVEL` and `ZSTD_NBTHREADS`. + +`ZSTD_CLEVEL` can be used to modify the default compression level of `zstd` +(usually set to `3`) to another value between 1 and 19 (the "normal" range). + +`ZSTD_NBTHREADS` can be used to specify a number of threads +that `zstd` will use for compression, which by default is `1`. +This functionality only exists when `zstd` is compiled with multithread support. +`0` means "use as many threads as detected cpu cores on local system". +The max # of threads is capped at `ZSTDMT_NBWORKERS_MAX`, +which is either 64 in 32-bit mode, or 256 for 64-bit environments. + +This functionality can be useful when `zstd` CLI is invoked in a way that doesn't allow passing arguments. +One such scenario is `tar --zstd`. +As `ZSTD_CLEVEL` and `ZSTD_NBTHREADS` only replace the default compression level +and number of threads respectively, they can both be overridden by corresponding command line arguments: +`-#` for compression level and `-T#` for number of threads. + -#### Long distance matching mode +### Long distance matching mode The long distance matching mode, enabled with `--long`, is designed to improve the compression ratio for files with long matches at a large distance (up to the maximum window size, `128 MiB`) while still maintaining compression speed. @@ -202,12 +302,12 @@ Compression Speed vs Ratio | Decompression Speed | Method | Compression ratio | Compression speed | Decompression speed | |:-------|------------------:|-------------------------:|---------------------------:| -| `zstd -1` | `5.065` | `284.8 MB/s` | `759.3 MB/s` | +| `zstd -1` | `5.065` | `284.8 MB/s` | `759.3 MB/s` | | `zstd -5` | `5.826` | `124.9 MB/s` | `674.0 MB/s` | | `zstd -10` | `6.504` | `29.5 MB/s` | `771.3 MB/s` | | `zstd -1 --long` | `17.426` | `220.6 MB/s` | `1638.4 MB/s` | -| `zstd -5 --long` | `19.661` | `165.5 MB/s` | `1530.6 MB/s`| -| `zstd -10 --long`| `21.949` | `75.6 MB/s` | `1632.6 MB/s`| +| `zstd -5 --long` | `19.661` | `165.5 MB/s` | `1530.6 MB/s` | +| `zstd -10 --long`| `21.949` | `75.6 MB/s` | `1632.6 MB/s` | On this file, the compression ratio improves significantly with minimal impact on compression speed, and the decompression speed doubles. @@ -218,7 +318,7 @@ compression speed (for lower levels) with minimal change in compression ratio. The below table illustrates this on the [Silesia compression corpus]. -[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia +[Silesia compression corpus]: https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia | Method | Compression ratio | Compression speed | Decompression speed | |:-------|------------------:|------------------:|---------------------:| @@ -230,7 +330,7 @@ The below table illustrates this on the [Silesia compression corpus]. | `zstd -10 --long`| `3.566` | `16.2 MB/s` | `415.7 MB/s` | -#### zstdgrep +### zstdgrep `zstdgrep` is a utility which makes it possible to `grep` directly a `.zst` compressed file. It's used the same way as normal `grep`, for example : diff --git a/programs/benchfn.c b/programs/benchfn.c index 0932d155de4..3e042cf38f8 100644 --- a/programs/benchfn.c +++ b/programs/benchfn.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,7 +15,6 @@ ***************************************/ #include /* malloc, free */ #include /* memset */ -#undef NDEBUG /* assert must not be disabled */ #include /* assert */ #include "timefn.h" /* UTIL_time_t, UTIL_getTime */ @@ -54,6 +53,9 @@ return retValue; \ } +/* Abort execution if a condition is not met */ +#define CONTROL(c) { if (!(c)) { DEBUGOUTPUT("error: %s \n", #c); abort(); } } + /* ************************************* * Benchmarking an arbitrary function @@ -68,13 +70,13 @@ int BMK_isSuccessful_runOutcome(BMK_runOutcome_t outcome) * check outcome validity first, using BMK_isValid_runResult() */ BMK_runTime_t BMK_extract_runTime(BMK_runOutcome_t outcome) { - assert(outcome.error_tag_never_ever_use_directly == 0); + CONTROL(outcome.error_tag_never_ever_use_directly == 0); return outcome.internal_never_ever_use_directly; } size_t BMK_extract_errorResult(BMK_runOutcome_t outcome) { - assert(outcome.error_tag_never_ever_use_directly != 0); + CONTROL(outcome.error_tag_never_ever_use_directly != 0); return outcome.error_result_never_ever_use_directly; } @@ -106,7 +108,6 @@ static BMK_runOutcome_t BMK_setValid_runTime(BMK_runTime_t runTime) BMK_runOutcome_t BMK_benchFunction(BMK_benchParams_t p, unsigned nbLoops) { - size_t dstSize = 0; nbLoops += !nbLoops; /* minimum nbLoops is 1 */ /* init */ @@ -116,7 +117,8 @@ BMK_runOutcome_t BMK_benchFunction(BMK_benchParams_t p, } } /* benchmark */ - { UTIL_time_t const clockStart = UTIL_getTime(); + { size_t dstSize = 0; + UTIL_time_t const clockStart = UTIL_getTime(); unsigned loopNb, blockNb; if (p.initFn != NULL) p.initFn(p.initPayload); for (loopNb = 0; loopNb < nbLoops; loopNb++) { @@ -227,9 +229,9 @@ BMK_runOutcome_t BMK_benchTimedFn(BMK_timedFnState_t* cont, cont->timeSpent_ns += (unsigned long long)loopDuration_ns; /* estimate nbLoops for next run to last approximately 1 second */ - if (loopDuration_ns > (runBudget_ns / 50)) { + if (loopDuration_ns > ((double)runBudget_ns / 50)) { double const fastestRun_ns = MIN(bestRunTime.nanoSecPerRun, newRunTime.nanoSecPerRun); - cont->nbLoops = (unsigned)(runBudget_ns / fastestRun_ns) + 1; + cont->nbLoops = (unsigned)((double)runBudget_ns / fastestRun_ns) + 1; } else { /* previous run was too short : blindly increase workload by x multiplier */ const unsigned multiplier = 10; @@ -237,7 +239,7 @@ BMK_runOutcome_t BMK_benchTimedFn(BMK_timedFnState_t* cont, cont->nbLoops *= multiplier; } - if(loopDuration_ns < runTimeMin_ns) { + if(loopDuration_ns < (double)runTimeMin_ns) { /* don't report results for which benchmark run time was too small : increased risks of rounding errors */ assert(completed == 0); continue; diff --git a/programs/benchfn.h b/programs/benchfn.h index 19e056581a5..3fc6e0d0455 100644 --- a/programs/benchfn.h +++ b/programs/benchfn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,17 +15,12 @@ * or detecting and returning an error */ -#if defined (__cplusplus) -extern "C" { -#endif - #ifndef BENCH_FN_H_23876 #define BENCH_FN_H_23876 /* === Dependencies === */ #include /* size_t */ - /* ==== Benchmark any function, iterated on a set of blocks ==== */ /* BMK_runTime_t: valid result return type */ @@ -123,7 +118,7 @@ BMK_runTime_t BMK_extract_runTime(BMK_runOutcome_t outcome); /* when benchmark failed, it means one invocation of `benchFn` failed. * The failure was detected by `errorFn`, operating on return values of `benchFn`. * Returns the faulty return value. - * note : this function will abort() program execution if benchmark did not failed. + * note : this function will abort() program execution if benchmark did not fail. * always check if benchmark failed first ! */ size_t BMK_extract_errorResult(BMK_runOutcome_t outcome); @@ -175,9 +170,4 @@ typedef union { } BMK_timedFnState_shell; BMK_timedFnState_t* BMK_initStatic_timedFnState(void* buffer, size_t size, unsigned total_ms, unsigned run_ms); - #endif /* BENCH_FN_H_23876 */ - -#if defined (__cplusplus) -} -#endif diff --git a/programs/benchzstd.c b/programs/benchzstd.c index 263dc08887f..f55c8697504 100644 --- a/programs/benchzstd.c +++ b/programs/benchzstd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,196 +8,285 @@ * You may select, at your option, one of the above-listed licenses. */ - /* ************************************** -* Tuning parameters -****************************************/ -#ifndef BMK_TIMETEST_DEFAULT_S /* default minimum time per test */ -#define BMK_TIMETEST_DEFAULT_S 3 + * Tuning parameters + ****************************************/ +#ifndef BMK_TIMETEST_DEFAULT_S /* default minimum time per test */ +# define BMK_TIMETEST_DEFAULT_S 3 #endif - /* ************************************* -* Includes -***************************************/ -#include "platform.h" /* Large Files support */ -#include "util.h" /* UTIL_getFileSize, UTIL_sleep */ -#include /* malloc, free */ -#include /* memset, strerror */ -#include /* fprintf, fopen */ -#include -#include /* assert */ + * Includes + ***************************************/ +/* this must be included first */ +#include "platform.h" /* Large Files support, compiler specifics */ -#include "timefn.h" /* UTIL_time_t */ +/* then following system includes */ +#include /* assert */ +#include +#include /* fprintf, fopen */ +#include /* malloc, free */ +#include /* memset, strerror */ +#include "util.h" /* UTIL_getFileSize, UTIL_sleep */ +#include "../lib/common/mem.h" #include "benchfn.h" -#include "mem.h" -#define ZSTD_STATIC_LINKING_ONLY -#include "zstd.h" -#include "datagen.h" /* RDG_genBuffer */ -#include "xxhash.h" +#include "timefn.h" /* UTIL_time_t */ +#ifndef ZSTD_STATIC_LINKING_ONLY +# define ZSTD_STATIC_LINKING_ONLY +#endif +#include "../lib/zstd.h" +#include "datagen.h" /* RDG_genBuffer */ +#include "lorem.h" /* LOREM_genBuffer */ +#ifndef XXH_INLINE_ALL +# define XXH_INLINE_ALL +#endif +#include "../lib/common/xxhash.h" +#include "../lib/zstd_errors.h" #include "benchzstd.h" -#include "zstd_errors.h" - /* ************************************* -* Constants -***************************************/ + * Constants + ***************************************/ #ifndef ZSTD_GIT_COMMIT -# define ZSTD_GIT_COMMIT_STRING "" +# define ZSTD_GIT_COMMIT_STRING "" #else -# define ZSTD_GIT_COMMIT_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_GIT_COMMIT) +# define ZSTD_GIT_COMMIT_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_GIT_COMMIT) #endif -#define TIMELOOP_MICROSEC (1*1000000ULL) /* 1 second */ -#define TIMELOOP_NANOSEC (1*1000000000ULL) /* 1 second */ -#define ACTIVEPERIOD_MICROSEC (70*TIMELOOP_MICROSEC) /* 70 seconds */ -#define COOLPERIOD_SEC 10 +#define TIMELOOP_MICROSEC (1 * 1000000ULL) /* 1 second */ +#define TIMELOOP_NANOSEC (1 * 1000000000ULL) /* 1 second */ +#define ACTIVEPERIOD_MICROSEC (70 * TIMELOOP_MICROSEC) /* 70 seconds */ +#define COOLPERIOD_SEC 10 -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) +#define KB *(1 << 10) +#define MB *(1 << 20) +#define GB *(1U << 30) #define BMK_RUNTEST_DEFAULT_MS 1000 -static const size_t maxMemory = (sizeof(size_t)==4) ? - /* 32-bit */ (2 GB - 64 MB) : - /* 64-bit */ (size_t)(1ULL << ((sizeof(size_t)*8)-31)); +static const size_t maxMemory = (sizeof(size_t) == 4) + ? + /* 32-bit */ (2 GB - 64 MB) + : + /* 64-bit */ (size_t)(1ULL << ((sizeof(size_t) * 8) - 31)); +/* ************************************* + * console display + ***************************************/ +#define DISPLAY(...) \ + { \ + fprintf(stderr, __VA_ARGS__); \ + fflush(NULL); \ + } +#define DISPLAYLEVEL(l, ...) \ + if (displayLevel >= l) { \ + DISPLAY(__VA_ARGS__); \ + } +/* 0 : no display; 1: errors; 2 : + result + interaction + warnings; 3 : + + * progression; 4 : + information */ +#define OUTPUT(...) \ + { \ + fprintf(stdout, __VA_ARGS__); \ + fflush(NULL); \ + } +#define OUTPUTLEVEL(l, ...) \ + if (displayLevel >= l) { \ + OUTPUT(__VA_ARGS__); \ + } /* ************************************* -* console display -***************************************/ -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } -/* 0 : no display; 1: errors; 2 : + result + interaction + warnings; 3 : + progression; 4 : + information */ + * Exceptions + ***************************************/ +#ifndef DEBUG +# define DEBUG 0 +#endif +#define DEBUGOUTPUT(...) \ + { \ + if (DEBUG) \ + DISPLAY(__VA_ARGS__); \ + } -static const U64 g_refreshRate = SEC_TO_MICRO / 6; -static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; +#define RETURN_ERROR_INT(errorNum, ...) \ + { \ + DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ + DISPLAYLEVEL(1, "Error %i : ", errorNum); \ + DISPLAYLEVEL(1, __VA_ARGS__); \ + DISPLAYLEVEL(1, " \n"); \ + return errorNum; \ + } -#define DISPLAYUPDATE(l, ...) { if (displayLevel>=l) { \ - if ((UTIL_clockSpanMicro(g_displayClock) > g_refreshRate) || (displayLevel>=4)) \ - { g_displayClock = UTIL_getTime(); DISPLAY(__VA_ARGS__); \ - if (displayLevel>=4) fflush(stderr); } } } +#define CHECK_Z(zf) \ + { \ + size_t const zerr = zf; \ + if (ZSTD_isError(zerr)) { \ + DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ + DISPLAY("Error : "); \ + DISPLAY("%s failed : %s", #zf, ZSTD_getErrorName(zerr)); \ + DISPLAY(" \n"); \ + exit(1); \ + } \ + } +#define RETURN_ERROR(errorNum, retType, ...) \ + { \ + retType r; \ + memset(&r, 0, sizeof(retType)); \ + DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ + DISPLAYLEVEL(1, "Error %i : ", errorNum); \ + DISPLAYLEVEL(1, __VA_ARGS__); \ + DISPLAYLEVEL(1, " \n"); \ + r.tag = errorNum; \ + return r; \ + } -/* ************************************* -* Exceptions -***************************************/ -#ifndef DEBUG -# define DEBUG 0 -#endif -#define DEBUGOUTPUT(...) { if (DEBUG) DISPLAY(__VA_ARGS__); } - -#define EXM_THROW_INT(errorNum, ...) { \ - DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "Error %i : ", errorNum); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, " \n"); \ - return errorNum; \ +static size_t uintSize(unsigned value) +{ + size_t size = 1; + while (value >= 10) { + size++; + value /= 10; + } + return size; } -#define CHECK_Z(zf) { \ - size_t const zerr = zf; \ - if (ZSTD_isError(zerr)) { \ - DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ - DISPLAY("Error : "); \ - DISPLAY("%s failed : %s", \ - #zf, ZSTD_getErrorName(zerr)); \ - DISPLAY(" \n"); \ - exit(1); \ - } \ +/* Note: presume @buffer is large enough */ +static void writeUint_varLen(char* buffer, size_t capacity, unsigned value) +{ + int endPos = (int)uintSize(value) - 1; + assert(uintSize(value) >= 1); + assert(uintSize(value) < capacity); (void)capacity; + while (endPos >= 0) { + char c = '0' + (char)(value % 10); + buffer[endPos--] = c; + value /= 10; + } } -#define RETURN_ERROR(errorNum, retType, ...) { \ - retType r; \ - memset(&r, 0, sizeof(retType)); \ - DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "Error %i : ", errorNum); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, " \n"); \ - r.tag = errorNum; \ - return r; \ -} +/* replacement for snprintf(), which is not supported by C89. + * sprintf() would be the supported one, but it's labelled unsafe: + * modern static analyzer will flag sprintf() as dangerous, making it unusable. + * formatString_u() replaces snprintf() for the specific case where there is only one %u argument */ +static int formatString_u(char* buffer, size_t buffer_size, const char* formatString, unsigned int value) +{ + size_t const valueSize = uintSize(value); + size_t written = 0; + int i; + for (i = 0; formatString[i] != '\0' && written < buffer_size - 1; i++) { + if (formatString[i] != '%') { + buffer[written++] = formatString[i]; + continue; + } + + i++; + if (formatString[i] == 'u') { + if (written + valueSize >= buffer_size) abort(); /* buffer not large enough */ + writeUint_varLen(buffer + written, buffer_size - written, value); + written += valueSize; + } else if (formatString[i] == '%') { /* Check for escaped percent sign */ + buffer[written++] = '%'; + } else { + abort(); /* unsupported format */ + } + } + + if (written < buffer_size) { + buffer[written] = '\0'; + } else { + abort(); /* buffer not large enough */ + } + + return (int)written; +} /* ************************************* -* Benchmark Parameters -***************************************/ + * Benchmark Parameters + ***************************************/ -BMK_advancedParams_t BMK_initAdvancedParams(void) { +BMK_advancedParams_t BMK_initAdvancedParams(void) +{ BMK_advancedParams_t const res = { - BMK_both, /* mode */ + BMK_both, /* mode */ BMK_TIMETEST_DEFAULT_S, /* nbSeconds */ - 0, /* blockSize */ - 0, /* nbWorkers */ - 0, /* realTime */ - 0, /* additionalParam */ - 0, /* ldmFlag */ - 0, /* ldmMinMatch */ - 0, /* ldmHashLog */ - 0, /* ldmBuckSizeLog */ - 0, /* ldmHashRateLog */ - ZSTD_lcm_auto /* literalCompressionMode */ + 0, /* chunkSizeMax */ + 0, /* targetCBlockSize */ + 0, /* nbWorkers */ + 0, /* realTime */ + 0, /* additionalParam */ + 0, /* ldmFlag */ + 0, /* ldmMinMatch */ + 0, /* ldmHashLog */ + 0, /* ldmBuckSizeLog */ + 0, /* ldmHashRateLog */ + ZSTD_ps_auto, /* literalCompressionMode */ + 0 /* useRowMatchFinder */ }; return res; } - /* ******************************************************** -* Bench functions -**********************************************************/ -typedef struct { - const void* srcPtr; - size_t srcSize; - void* cPtr; - size_t cRoom; - size_t cSize; - void* resPtr; - size_t resSize; -} blockParam_t; - + * Bench functions + **********************************************************/ #undef MIN #undef MAX -#define MIN(a,b) ((a) < (b) ? (a) : (b)) -#define MAX(a,b) ((a) > (b) ? (a) : (b)) - -static void -BMK_initCCtx(ZSTD_CCtx* ctx, - const void* dictBuffer, size_t dictBufferSize, - int cLevel, - const ZSTD_compressionParameters* comprParams, - const BMK_advancedParams_t* adv) +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +static void BMK_initCCtx( + ZSTD_CCtx* ctx, + const void* dictBuffer, + size_t dictBufferSize, + int cLevel, + const ZSTD_compressionParameters* comprParams, + const BMK_advancedParams_t* adv) { ZSTD_CCtx_reset(ctx, ZSTD_reset_session_and_parameters); - if (adv->nbWorkers==1) { + if (adv->nbWorkers == 1) { CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_nbWorkers, 0)); } else { CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_nbWorkers, adv->nbWorkers)); } CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_compressionLevel, cLevel)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_enableLongDistanceMatching, adv->ldmFlag)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_useRowMatchFinder, adv->useRowMatchFinder)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_enableLongDistanceMatching, adv->ldmFlag)); CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_ldmMinMatch, adv->ldmMinMatch)); CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_ldmHashLog, adv->ldmHashLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_ldmBucketSizeLog, adv->ldmBucketSizeLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_ldmHashRateLog, adv->ldmHashRateLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_windowLog, (int)comprParams->windowLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_hashLog, (int)comprParams->hashLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_chainLog, (int)comprParams->chainLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_searchLog, (int)comprParams->searchLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_minMatch, (int)comprParams->minMatch)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_targetLength, (int)comprParams->targetLength)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_literalCompressionMode, (int)adv->literalCompressionMode)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_strategy, comprParams->strategy)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_ldmBucketSizeLog, adv->ldmBucketSizeLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_ldmHashRateLog, adv->ldmHashRateLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_windowLog, (int)comprParams->windowLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_hashLog, (int)comprParams->hashLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_chainLog, (int)comprParams->chainLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_searchLog, (int)comprParams->searchLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_minMatch, (int)comprParams->minMatch)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_targetLength, (int)comprParams->targetLength)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, + ZSTD_c_literalCompressionMode, + (int)adv->literalCompressionMode)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_strategy, (int)comprParams->strategy)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_targetCBlockSize, (int)adv->targetCBlockSize)); CHECK_Z(ZSTD_CCtx_loadDictionary(ctx, dictBuffer, dictBufferSize)); } -static void BMK_initDCtx(ZSTD_DCtx* dctx, - const void* dictBuffer, size_t dictBufferSize) { +static void +BMK_initDCtx(ZSTD_DCtx* dctx, const void* dictBuffer, size_t dictBufferSize) +{ CHECK_Z(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters)); CHECK_Z(ZSTD_DCtx_loadDictionary(dctx, dictBuffer, dictBufferSize)); } - typedef struct { ZSTD_CCtx* cctx; const void* dictBuffer; @@ -207,9 +296,16 @@ typedef struct { const BMK_advancedParams_t* adv; } BMK_initCCtxArgs; -static size_t local_initCCtx(void* payload) { +static size_t local_initCCtx(void* payload) +{ BMK_initCCtxArgs* ag = (BMK_initCCtxArgs*)payload; - BMK_initCCtx(ag->cctx, ag->dictBuffer, ag->dictBufferSize, ag->cLevel, ag->comprParams, ag->adv); + BMK_initCCtx( + ag->cctx, + ag->dictBuffer, + ag->dictBufferSize, + ag->cLevel, + ag->comprParams, + ag->adv); return 0; } @@ -219,18 +315,20 @@ typedef struct { size_t dictBufferSize; } BMK_initDCtxArgs; -static size_t local_initDCtx(void* payload) { +static size_t local_initDCtx(void* payload) +{ BMK_initDCtxArgs* ag = (BMK_initDCtxArgs*)payload; BMK_initDCtx(ag->dctx, ag->dictBuffer, ag->dictBufferSize); return 0; } - /* `addArgs` is the context */ static size_t local_defaultCompress( - const void* srcBuffer, size_t srcSize, - void* dstBuffer, size_t dstSize, - void* addArgs) + const void* srcBuffer, + size_t srcSize, + void* dstBuffer, + size_t dstSize, + void* addArgs) { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)addArgs; return ZSTD_compress2(cctx, dstBuffer, dstSize, srcBuffer, srcSize); @@ -238,18 +336,24 @@ static size_t local_defaultCompress( /* `addArgs` is the context */ static size_t local_defaultDecompress( - const void* srcBuffer, size_t srcSize, - void* dstBuffer, size_t dstCapacity, - void* addArgs) + const void* srcBuffer, + size_t srcSize, + void* dstBuffer, + size_t dstCapacity, + void* addArgs) { - size_t moreToFlush = 1; + size_t moreToFlush = 1; ZSTD_DCtx* const dctx = (ZSTD_DCtx*)addArgs; ZSTD_inBuffer in; ZSTD_outBuffer out; - in.src = srcBuffer; in.size = srcSize; in.pos = 0; - out.dst = dstBuffer; out.size = dstCapacity; out.pos = 0; + in.src = srcBuffer; + in.size = srcSize; + in.pos = 0; + out.dst = dstBuffer; + out.size = dstCapacity; + out.pos = 0; while (moreToFlush) { - if(out.pos == out.size) { + if (out.pos == out.size) { return (size_t)-ZSTD_error_dstSize_tooSmall; } moreToFlush = ZSTD_decompressStream(dctx, &out, &in); @@ -258,10 +362,8 @@ static size_t local_defaultDecompress( } } return out.pos; - } - /* ================================================================= */ /* Benchmark Zstandard, mem-to-mem scenarios */ /* ================================================================= */ @@ -285,102 +387,145 @@ static BMK_benchOutcome_t BMK_benchOutcome_error(void) return b; } -static BMK_benchOutcome_t BMK_benchOutcome_setValidResult(BMK_benchResult_t result) +static BMK_benchOutcome_t BMK_benchOutcome_setValidResult( + BMK_benchResult_t result) { BMK_benchOutcome_t b; - b.tag = 0; + b.tag = 0; b.internal_never_use_directly = result; return b; } - /* benchMem with no allocation */ -static BMK_benchOutcome_t -BMK_benchMemAdvancedNoAlloc( - const void** srcPtrs, size_t* srcSizes, - void** cPtrs, size_t* cCapacities, size_t* cSizes, - void** resPtrs, size_t* resSizes, - void** resultBufferPtr, void* compressedBuffer, - size_t maxCompressedSize, - BMK_timedFnState_t* timeStateCompress, - BMK_timedFnState_t* timeStateDecompress, - - const void* srcBuffer, size_t srcSize, - const size_t* fileSizes, unsigned nbFiles, - const int cLevel, - const ZSTD_compressionParameters* comprParams, - const void* dictBuffer, size_t dictBufferSize, - ZSTD_CCtx* cctx, ZSTD_DCtx* dctx, - int displayLevel, const char* displayName, - const BMK_advancedParams_t* adv) +static BMK_benchOutcome_t BMK_benchMemAdvancedNoAlloc( + const void** srcPtrs, + size_t* srcSizes, + void** cPtrs, + size_t* cCapacities, + size_t* cSizes, + void** resPtrs, + size_t* resSizes, + void** resultBufferPtr, + void* compressedBuffer, + size_t maxCompressedSize, + BMK_timedFnState_t* timeStateCompress, + BMK_timedFnState_t* timeStateDecompress, + + const void* srcBuffer, + size_t srcSize, + const size_t* fileSizes, + unsigned nbFiles, + const int cLevel, + const ZSTD_compressionParameters* comprParams, + const void* dictBuffer, + size_t dictBufferSize, + ZSTD_CCtx* cctx, + ZSTD_DCtx* dctx, + int displayLevel, + const char* displayName, + const BMK_advancedParams_t* adv) { - size_t const blockSize = ((adv->blockSize>=32 && (adv->mode != BMK_decodeOnly)) ? adv->blockSize : srcSize) + (!srcSize); /* avoid div by 0 */ + size_t const chunkSizeMax = + ((adv->chunkSizeMax >= 32 && (adv->mode != BMK_decodeOnly)) + ? adv->chunkSizeMax + : srcSize) + + (!srcSize); /* avoid div by 0 */ BMK_benchResult_t benchResult; size_t const loadedCompressedSize = srcSize; - size_t cSize = 0; - double ratio = 0.; - U32 nbBlocks; + size_t cSize = 0; + double ratio = 0.; + U32 nbChunks = 0; - assert(cctx != NULL); assert(dctx != NULL); + assert(cctx != NULL); + assert(dctx != NULL); /* init */ memset(&benchResult, 0, sizeof(benchResult)); - if (strlen(displayName)>17) displayName += strlen(displayName) - 17; /* display last 17 characters */ - if (adv->mode == BMK_decodeOnly) { /* benchmark only decompression : source must be already compressed */ + if (strlen(displayName) > 17) + displayName += + strlen(displayName) - 17; /* display last 17 characters */ + if (adv->mode == BMK_decodeOnly) { + /* benchmark only decompression : source must be already compressed */ const char* srcPtr = (const char*)srcBuffer; - U64 totalDSize64 = 0; + U64 totalDSize64 = 0; U32 fileNb; - for (fileNb=0; fileNb decodedSize) { /* size_t overflow */ + RETURN_ERROR( + 32, + BMK_benchOutcome_t, + "decompressed size is too large for local system"); + } *resultBufferPtr = malloc(decodedSize); if (!(*resultBufferPtr)) { - RETURN_ERROR(33, BMK_benchOutcome_t, "not enough memory"); + RETURN_ERROR( + 33, + BMK_benchOutcome_t, + "allocation error: not enough memory"); } - if (totalDSize64 > decodedSize) { /* size_t overflow */ - free(*resultBufferPtr); - RETURN_ERROR(32, BMK_benchOutcome_t, "original size is too large"); - } - cSize = srcSize; + cSize = srcSize; srcSize = decodedSize; - ratio = (double)srcSize / (double)cSize; + ratio = (double)srcSize / (double)cSize; } } - /* Init data blocks */ - { const char* srcPtr = (const char*)srcBuffer; - char* cPtr = (char*)compressedBuffer; - char* resPtr = (char*)(*resultBufferPtr); - U32 fileNb; - for (nbBlocks=0, fileNb=0; fileNbmode == BMK_decodeOnly) ? 1 : (U32)((remaining + (blockSize-1)) / blockSize); - U32 const blockEnd = nbBlocks + nbBlocksforThisFile; - for ( ; nbBlocksmode == BMK_decodeOnly) ? thisBlockSize : ZSTD_compressBound(thisBlockSize); - resPtrs[nbBlocks] = resPtr; - resSizes[nbBlocks] = (adv->mode == BMK_decodeOnly) ? (size_t) ZSTD_findDecompressedSize(srcPtr, thisBlockSize) : thisBlockSize; - srcPtr += thisBlockSize; - cPtr += cCapacities[nbBlocks]; - resPtr += thisBlockSize; - remaining -= thisBlockSize; + /* Init data chunks */ + { + const char* srcPtr = (const char*)srcBuffer; + char* cPtr = (char*)compressedBuffer; + char* resPtr = (char*)(*resultBufferPtr); + U32 fileNb, chunkID; + for (chunkID = 0, fileNb = 0; fileNb < nbFiles; fileNb++) { + size_t remaining = fileSizes[fileNb]; + U32 const nbChunksforThisFile = (adv->mode == BMK_decodeOnly) + ? 1 + : (U32)((remaining + (chunkSizeMax - 1)) / chunkSizeMax); + U32 const chunkIdEnd = chunkID + nbChunksforThisFile; + for (; chunkID < chunkIdEnd; chunkID++) { + size_t const chunkSize = MIN(remaining, chunkSizeMax); + srcPtrs[chunkID] = srcPtr; + srcSizes[chunkID] = chunkSize; + cPtrs[chunkID] = cPtr; + cCapacities[chunkID] = (adv->mode == BMK_decodeOnly) + ? chunkSize + : ZSTD_compressBound(chunkSize); + resPtrs[chunkID] = resPtr; + resSizes[chunkID] = (adv->mode == BMK_decodeOnly) + ? (size_t)ZSTD_findDecompressedSize( + srcPtr, chunkSize) + : chunkSize; + srcPtr += chunkSize; + cPtr += cCapacities[chunkID]; + resPtr += chunkSize; + remaining -= chunkSize; if (adv->mode == BMK_decodeOnly) { - assert(nbBlocks==0); - cSizes[nbBlocks] = thisBlockSize; - benchResult.cSize = thisBlockSize; + cSizes[chunkID] = chunkSize; + benchResult.cSize = chunkSize; } } } + nbChunks = chunkID; } /* warming up `compressedBuffer` */ @@ -390,231 +535,328 @@ BMK_benchMemAdvancedNoAlloc( RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1); } + if (!UTIL_support_MT_measurements() && adv->nbWorkers > 1) { + OUTPUTLEVEL( + 2, + "Warning : time measurements may be incorrect in multithreading mode... \n") + } + /* Bench */ - { U64 const crcOrig = (adv->mode == BMK_decodeOnly) ? 0 : XXH64(srcBuffer, srcSize, 0); -# define NB_MARKS 4 + { + U64 const crcOrig = (adv->mode == BMK_decodeOnly) + ? 0 + : XXH64(srcBuffer, srcSize, 0); +#define NB_MARKS 4 const char* marks[NB_MARKS] = { " |", " /", " =", " \\" }; - U32 markNb = 0; - int compressionCompleted = (adv->mode == BMK_decodeOnly); - int decompressionCompleted = (adv->mode == BMK_compressOnly); + U32 markNb = 0; + int compressionCompleted = (adv->mode == BMK_decodeOnly); + int decompressionCompleted = (adv->mode == BMK_compressOnly); BMK_benchParams_t cbp, dbp; BMK_initCCtxArgs cctxprep; BMK_initDCtxArgs dctxprep; - cbp.benchFn = local_defaultCompress; - cbp.benchPayload = cctx; - cbp.initFn = local_initCCtx; - cbp.initPayload = &cctxprep; - cbp.errorFn = ZSTD_isError; - cbp.blockCount = nbBlocks; - cbp.srcBuffers = srcPtrs; - cbp.srcSizes = srcSizes; - cbp.dstBuffers = cPtrs; + cbp.benchFn = local_defaultCompress; /* ZSTD_compress2 */ + cbp.benchPayload = cctx; + cbp.initFn = local_initCCtx; /* BMK_initCCtx */ + cbp.initPayload = &cctxprep; + cbp.errorFn = ZSTD_isError; + cbp.blockCount = nbChunks; + cbp.srcBuffers = srcPtrs; + cbp.srcSizes = srcSizes; + cbp.dstBuffers = cPtrs; cbp.dstCapacities = cCapacities; - cbp.blockResults = cSizes; + cbp.blockResults = cSizes; - cctxprep.cctx = cctx; - cctxprep.dictBuffer = dictBuffer; + cctxprep.cctx = cctx; + cctxprep.dictBuffer = dictBuffer; cctxprep.dictBufferSize = dictBufferSize; - cctxprep.cLevel = cLevel; - cctxprep.comprParams = comprParams; - cctxprep.adv = adv; - - dbp.benchFn = local_defaultDecompress; - dbp.benchPayload = dctx; - dbp.initFn = local_initDCtx; - dbp.initPayload = &dctxprep; - dbp.errorFn = ZSTD_isError; - dbp.blockCount = nbBlocks; - dbp.srcBuffers = (const void* const *) cPtrs; - dbp.srcSizes = cSizes; - dbp.dstBuffers = resPtrs; + cctxprep.cLevel = cLevel; + cctxprep.comprParams = comprParams; + cctxprep.adv = adv; + + dbp.benchFn = local_defaultDecompress; + dbp.benchPayload = dctx; + dbp.initFn = local_initDCtx; + dbp.initPayload = &dctxprep; + dbp.errorFn = ZSTD_isError; + dbp.blockCount = nbChunks; + dbp.srcBuffers = (const void* const*)cPtrs; + dbp.srcSizes = cSizes; + dbp.dstBuffers = resPtrs; dbp.dstCapacities = resSizes; - dbp.blockResults = NULL; + dbp.blockResults = NULL; - dctxprep.dctx = dctx; - dctxprep.dictBuffer = dictBuffer; + dctxprep.dctx = dctx; + dctxprep.dictBuffer = dictBuffer; dctxprep.dictBufferSize = dictBufferSize; - DISPLAYLEVEL(2, "\r%70s\r", ""); /* blank line */ - DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (unsigned)srcSize); + OUTPUTLEVEL(2, "\r%70s\r", ""); /* blank line */ + assert(srcSize < UINT_MAX); + OUTPUTLEVEL( + 2, + "%2s-%-17.17s :%10u -> \r", + marks[markNb], + displayName, + (unsigned)srcSize); while (!(compressionCompleted && decompressionCompleted)) { if (!compressionCompleted) { - BMK_runOutcome_t const cOutcome = BMK_benchTimedFn( timeStateCompress, cbp); + BMK_runOutcome_t const cOutcome = + BMK_benchTimedFn(timeStateCompress, cbp); if (!BMK_isSuccessful_runOutcome(cOutcome)) { - return BMK_benchOutcome_error(); + RETURN_ERROR(30, BMK_benchOutcome_t, "compression error"); } - { BMK_runTime_t const cResult = BMK_extract_runTime(cOutcome); - cSize = cResult.sumOfReturn; - ratio = (double)srcSize / cSize; - { BMK_benchResult_t newResult; - newResult.cSpeed = (U64)((double)srcSize * TIMELOOP_NANOSEC / cResult.nanoSecPerRun); + { + BMK_runTime_t const cResult = BMK_extract_runTime(cOutcome); + cSize = cResult.sumOfReturn; + ratio = (double)srcSize / (double)cSize; + { + BMK_benchResult_t newResult; + newResult.cSpeed = + (U64)((double)srcSize * TIMELOOP_NANOSEC + / cResult.nanoSecPerRun); benchResult.cSize = cSize; if (newResult.cSpeed > benchResult.cSpeed) benchResult.cSpeed = newResult.cSpeed; - } } - - { int const ratioAccuracy = (ratio < 10.) ? 3 : 2; - DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s\r", - marks[markNb], displayName, - (unsigned)srcSize, (unsigned)cSize, - ratioAccuracy, ratio, - benchResult.cSpeed < (10 MB) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT); + } } - compressionCompleted = BMK_isCompleted_TimedFn(timeStateCompress); + + { + int const ratioDigits = 1 + (ratio < 100.) + (ratio < 10.); + assert(cSize < UINT_MAX); + OUTPUTLEVEL( + 2, + "%2s-%-17.17s :%10u ->%10u (x%5.*f), %6.*f MB/s \r", + marks[markNb], + displayName, + (unsigned)srcSize, + (unsigned)cSize, + ratioDigits, + ratio, + benchResult.cSpeed < (10 * MB_UNIT) ? 2 : 1, + (double)benchResult.cSpeed / MB_UNIT); + } + compressionCompleted = + BMK_isCompleted_TimedFn(timeStateCompress); } - if(!decompressionCompleted) { - BMK_runOutcome_t const dOutcome = BMK_benchTimedFn(timeStateDecompress, dbp); + if (!decompressionCompleted) { + BMK_runOutcome_t const dOutcome = + BMK_benchTimedFn(timeStateDecompress, dbp); - if(!BMK_isSuccessful_runOutcome(dOutcome)) { - return BMK_benchOutcome_error(); + if (!BMK_isSuccessful_runOutcome(dOutcome)) { + RETURN_ERROR(30, BMK_benchOutcome_t, "decompression error"); } - { BMK_runTime_t const dResult = BMK_extract_runTime(dOutcome); - U64 const newDSpeed = (U64)((double)srcSize * TIMELOOP_NANOSEC / dResult.nanoSecPerRun); + { + BMK_runTime_t const dResult = BMK_extract_runTime(dOutcome); + U64 const newDSpeed = + (U64)((double)srcSize * TIMELOOP_NANOSEC + / dResult.nanoSecPerRun); if (newDSpeed > benchResult.dSpeed) benchResult.dSpeed = newDSpeed; } - { int const ratioAccuracy = (ratio < 10.) ? 3 : 2; - DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s ,%6.1f MB/s \r", - marks[markNb], displayName, - (unsigned)srcSize, (unsigned)benchResult.cSize, - ratioAccuracy, ratio, - benchResult.cSpeed < (10 MB) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT, + { + int const ratioDigits = 1 + (ratio < 100.) + (ratio < 10.); + OUTPUTLEVEL( + 2, + "%2s-%-17.17s :%10u ->%10u (x%5.*f), %6.*f MB/s, %6.1f MB/s\r", + marks[markNb], + displayName, + (unsigned)srcSize, + (unsigned)cSize, + ratioDigits, + ratio, + benchResult.cSpeed < (10 * MB_UNIT) ? 2 : 1, + (double)benchResult.cSpeed / MB_UNIT, (double)benchResult.dSpeed / MB_UNIT); } - decompressionCompleted = BMK_isCompleted_TimedFn(timeStateDecompress); + decompressionCompleted = + BMK_isCompleted_TimedFn(timeStateDecompress); } - markNb = (markNb+1) % NB_MARKS; - } /* while (!(compressionCompleted && decompressionCompleted)) */ + markNb = (markNb + 1) % NB_MARKS; + } /* while (!(compressionCompleted && decompressionCompleted)) */ /* CRC Checking */ { const BYTE* resultBuffer = (const BYTE*)(*resultBufferPtr); - U64 const crcCheck = XXH64(resultBuffer, srcSize, 0); - if ((adv->mode == BMK_both) && (crcOrig!=crcCheck)) { + U64 const crcCheck = XXH64(resultBuffer, srcSize, 0); + if ((adv->mode == BMK_both) && (crcOrig != crcCheck)) { size_t u; DISPLAY("!!! WARNING !!! %14s : Invalid Checksum : %x != %x \n", - displayName, (unsigned)crcOrig, (unsigned)crcCheck); - for (u=0; u u) break; + for (segNb = 0; segNb < nbChunks; segNb++) { + if (bacc + srcSizes[segNb] > u) + break; bacc += srcSizes[segNb]; } pos = (U32)(u - bacc); bNb = pos / (128 KB); - DISPLAY("(sample %u, block %u, pos %u) \n", segNb, bNb, pos); - { size_t const lowest = (u>5) ? 5 : u; + DISPLAY("(sample %u, chunk %u, pos %u) \n", + segNb, + bNb, + pos); + { + size_t const lowest = (u > 5) ? 5 : u; size_t n; DISPLAY("origin: "); - for (n=lowest; n>0; n--) - DISPLAY("%02X ", ((const BYTE*)srcBuffer)[u-n]); + for (n = lowest; n > 0; n--) + DISPLAY("%02X ", + ((const BYTE*)srcBuffer)[u - n]); DISPLAY(" :%02X: ", ((const BYTE*)srcBuffer)[u]); - for (n=1; n<3; n++) - DISPLAY("%02X ", ((const BYTE*)srcBuffer)[u+n]); + for (n = 1; n < 3; n++) + DISPLAY("%02X ", + ((const BYTE*)srcBuffer)[u + n]); DISPLAY(" \n"); DISPLAY("decode: "); - for (n=lowest; n>0; n++) - DISPLAY("%02X ", resultBuffer[u-n]); + for (n = lowest; n > 0; n--) + DISPLAY("%02X ", resultBuffer[u - n]); DISPLAY(" :%02X: ", resultBuffer[u]); - for (n=1; n<3; n++) - DISPLAY("%02X ", resultBuffer[u+n]); + for (n = 1; n < 3; n++) + DISPLAY("%02X ", resultBuffer[u + n]); DISPLAY(" \n"); } break; } - if (u==srcSize-1) { /* should never happen */ + if (u == srcSize - 1) { /* should never happen */ DISPLAY("no difference detected\n"); } - } - } - } /* CRC Checking */ + } /* for (u=0; umode == BMK_both) && (crcOrig!=crcCheck)) */ + } /* CRC Checking */ - if (displayLevel == 1) { /* hidden display mode -q, used by python speed benchmark */ + if (displayLevel + == 1) { /* hidden display mode -q, used by python speed benchmark */ double const cSpeed = (double)benchResult.cSpeed / MB_UNIT; double const dSpeed = (double)benchResult.dSpeed / MB_UNIT; if (adv->additionalParam) { - DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, adv->additionalParam); + OUTPUT("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", + cLevel, + (int)cSize, + ratio, + cSpeed, + dSpeed, + displayName, + adv->additionalParam); } else { - DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName); + OUTPUT("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", + cLevel, + (int)cSize, + ratio, + cSpeed, + dSpeed, + displayName); } } - DISPLAYLEVEL(2, "%2i#\n", cLevel); - } /* Bench */ + OUTPUTLEVEL(2, "%2i#\n", cLevel); + } /* Bench */ - benchResult.cMem = (1ULL << (comprParams->windowLog)) + ZSTD_sizeof_CCtx(cctx); + benchResult.cMem = + (1ULL << (comprParams->windowLog)) + ZSTD_sizeof_CCtx(cctx); return BMK_benchOutcome_setValidResult(benchResult); } -BMK_benchOutcome_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize, - void* dstBuffer, size_t dstCapacity, - const size_t* fileSizes, unsigned nbFiles, - int cLevel, const ZSTD_compressionParameters* comprParams, - const void* dictBuffer, size_t dictBufferSize, - int displayLevel, const char* displayName, const BMK_advancedParams_t* adv) +BMK_benchOutcome_t BMK_benchMemAdvanced( + const void* srcBuffer, + size_t srcSize, + void* dstBuffer, + size_t dstCapacity, + const size_t* fileSizes, + unsigned nbFiles, + int cLevel, + const ZSTD_compressionParameters* comprParams, + const void* dictBuffer, + size_t dictBufferSize, + int displayLevel, + const char* displayName, + const BMK_advancedParams_t* adv) { - int const dstParamsError = !dstBuffer ^ !dstCapacity; /* must be both NULL or none */ - - size_t const blockSize = ((adv->blockSize>=32 && (adv->mode != BMK_decodeOnly)) ? adv->blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ; - U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + nbFiles; + int const dstParamsError = + !dstBuffer ^ !dstCapacity; /* must be both NULL or none */ - /* these are the blockTable parameters, just split up */ - const void ** const srcPtrs = (const void**)malloc(maxNbBlocks * sizeof(void*)); - size_t* const srcSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t)); + size_t const chunkSize = + ((adv->chunkSizeMax >= 32 && (adv->mode != BMK_decodeOnly)) + ? adv->chunkSizeMax + : srcSize) + + (!srcSize) /* avoid div by 0 */; + U32 const nbChunksMax = + (U32)((srcSize + (chunkSize - 1)) / chunkSize) + nbFiles; + const void** const srcPtrs = + (const void**)malloc(nbChunksMax * sizeof(void*)); + size_t* const srcSizes = (size_t*)malloc(nbChunksMax * sizeof(size_t)); - void ** const cPtrs = (void**)malloc(maxNbBlocks * sizeof(void*)); - size_t* const cSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t)); - size_t* const cCapacities = (size_t*)malloc(maxNbBlocks * sizeof(size_t)); + void** const cPtrs = (void**)malloc(nbChunksMax * sizeof(void*)); + size_t* const cSizes = (size_t*)malloc(nbChunksMax * sizeof(size_t)); + size_t* const cCapacities = (size_t*)malloc(nbChunksMax * sizeof(size_t)); - void ** const resPtrs = (void**)malloc(maxNbBlocks * sizeof(void*)); - size_t* const resSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t)); + void** const resPtrs = (void**)malloc(nbChunksMax * sizeof(void*)); + size_t* const resSizes = (size_t*)malloc(nbChunksMax * sizeof(size_t)); - BMK_timedFnState_t* timeStateCompress = BMK_createTimedFnState(adv->nbSeconds * 1000, BMK_RUNTEST_DEFAULT_MS); - BMK_timedFnState_t* timeStateDecompress = BMK_createTimedFnState(adv->nbSeconds * 1000, BMK_RUNTEST_DEFAULT_MS); + BMK_timedFnState_t* timeStateCompress = BMK_createTimedFnState( + adv->nbSeconds * 1000, BMK_RUNTEST_DEFAULT_MS); + BMK_timedFnState_t* timeStateDecompress = BMK_createTimedFnState( + adv->nbSeconds * 1000, BMK_RUNTEST_DEFAULT_MS); ZSTD_CCtx* const cctx = ZSTD_createCCtx(); ZSTD_DCtx* const dctx = ZSTD_createDCtx(); - const size_t maxCompressedSize = dstCapacity ? dstCapacity : ZSTD_compressBound(srcSize) + (maxNbBlocks * 1024); + const size_t maxCompressedSize = dstCapacity + ? dstCapacity + : ZSTD_compressBound(srcSize) + (nbChunksMax * 1024); - void* const internalDstBuffer = dstBuffer ? NULL : malloc(maxCompressedSize); + void* const internalDstBuffer = + dstBuffer ? NULL : malloc(maxCompressedSize); void* const compressedBuffer = dstBuffer ? dstBuffer : internalDstBuffer; - BMK_benchOutcome_t outcome = BMK_benchOutcome_error(); /* error by default */ + BMK_benchOutcome_t outcome = + BMK_benchOutcome_error(); /* error by default */ void* resultBuffer = srcSize ? malloc(srcSize) : NULL; - int allocationincomplete = !srcPtrs || !srcSizes || !cPtrs || - !cSizes || !cCapacities || !resPtrs || !resSizes || - !timeStateCompress || !timeStateDecompress || - !cctx || !dctx || - !compressedBuffer || !resultBuffer; - + int const allocationincomplete = !srcPtrs || !srcSizes || !cPtrs || !cSizes + || !cCapacities || !resPtrs || !resSizes || !timeStateCompress + || !timeStateDecompress || !cctx || !dctx || !compressedBuffer + || !resultBuffer; if (!allocationincomplete && !dstParamsError) { - outcome = BMK_benchMemAdvancedNoAlloc(srcPtrs, srcSizes, - cPtrs, cCapacities, cSizes, - resPtrs, resSizes, - &resultBuffer, - compressedBuffer, maxCompressedSize, - timeStateCompress, timeStateDecompress, - srcBuffer, srcSize, - fileSizes, nbFiles, - cLevel, comprParams, - dictBuffer, dictBufferSize, - cctx, dctx, - displayLevel, displayName, adv); + outcome = BMK_benchMemAdvancedNoAlloc( + srcPtrs, + srcSizes, + cPtrs, + cCapacities, + cSizes, + resPtrs, + resSizes, + &resultBuffer, + compressedBuffer, + maxCompressedSize, + timeStateCompress, + timeStateDecompress, + srcBuffer, + srcSize, + fileSizes, + nbFiles, + cLevel, + comprParams, + dictBuffer, + dictBufferSize, + cctx, + dctx, + displayLevel, + displayName, + adv); } /* clean up */ @@ -635,104 +877,167 @@ BMK_benchOutcome_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize, free(resPtrs); free(resSizes); - if(allocationincomplete) { - RETURN_ERROR(31, BMK_benchOutcome_t, "allocation error : not enough memory"); + if (allocationincomplete) { + RETURN_ERROR( + 31, BMK_benchOutcome_t, "allocation error : not enough memory"); } - if(dstParamsError) { + if (dstParamsError) { RETURN_ERROR(32, BMK_benchOutcome_t, "Dst parameters not coherent"); } return outcome; } -BMK_benchOutcome_t BMK_benchMem(const void* srcBuffer, size_t srcSize, - const size_t* fileSizes, unsigned nbFiles, - int cLevel, const ZSTD_compressionParameters* comprParams, - const void* dictBuffer, size_t dictBufferSize, - int displayLevel, const char* displayName) { - +BMK_benchOutcome_t BMK_benchMem( + const void* srcBuffer, + size_t srcSize, + const size_t* fileSizes, + unsigned nbFiles, + int cLevel, + const ZSTD_compressionParameters* comprParams, + const void* dictBuffer, + size_t dictBufferSize, + int displayLevel, + const char* displayName) +{ BMK_advancedParams_t const adv = BMK_initAdvancedParams(); - return BMK_benchMemAdvanced(srcBuffer, srcSize, - NULL, 0, - fileSizes, nbFiles, - cLevel, comprParams, - dictBuffer, dictBufferSize, - displayLevel, displayName, &adv); + return BMK_benchMemAdvanced( + srcBuffer, + srcSize, + NULL, + 0, + fileSizes, + nbFiles, + cLevel, + comprParams, + dictBuffer, + dictBufferSize, + displayLevel, + displayName, + &adv); } -static BMK_benchOutcome_t BMK_benchCLevel(const void* srcBuffer, size_t benchedSize, - const size_t* fileSizes, unsigned nbFiles, - int cLevel, const ZSTD_compressionParameters* comprParams, - const void* dictBuffer, size_t dictBufferSize, - int displayLevel, const char* displayName, - BMK_advancedParams_t const * const adv) +/* @return: 0 on success, !0 if error */ +static int BMK_benchCLevels( + const void* srcBuffer, + size_t benchedSize, + const size_t* fileSizes, + unsigned nbFiles, + int startCLevel, int endCLevel, + const ZSTD_compressionParameters* comprParams, + const void* dictBuffer, + size_t dictBufferSize, + int displayLevel, + const char* displayName, + BMK_advancedParams_t const* const adv) { + int level; const char* pch = strrchr(displayName, '\\'); /* Windows */ - if (!pch) pch = strrchr(displayName, '/'); /* Linux */ - if (pch) displayName = pch+1; + if (!pch) + pch = strrchr(displayName, '/'); /* Linux */ + if (pch) + displayName = pch + 1; + + if (endCLevel > ZSTD_maxCLevel()) { + DISPLAYLEVEL(1, "Invalid Compression Level \n"); + return 15; + } + if (endCLevel < startCLevel) { + DISPLAYLEVEL(1, "Invalid Compression Level Range \n"); + return 15; + } if (adv->realTime) { DISPLAYLEVEL(2, "Note : switching to real-time priority \n"); SET_REALTIME_PRIORITY; } - if (displayLevel == 1 && !adv->additionalParam) /* --quiet mode */ - DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", - ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING, - (unsigned)benchedSize, adv->nbSeconds, (unsigned)(adv->blockSize>>10)); - - return BMK_benchMemAdvanced(srcBuffer, benchedSize, - NULL, 0, - fileSizes, nbFiles, - cLevel, comprParams, - dictBuffer, dictBufferSize, - displayLevel, displayName, adv); + if (displayLevel == 1 && !adv->additionalParam) /* --quiet mode */ + OUTPUT("bench %s %s: input %u bytes, %u seconds, %u KB chunks\n", + ZSTD_VERSION_STRING, + ZSTD_GIT_COMMIT_STRING, + (unsigned)benchedSize, + adv->nbSeconds, + (unsigned)(adv->chunkSizeMax >> 10)); + + for (level = startCLevel; level <= endCLevel; level++) { + BMK_benchOutcome_t res = BMK_benchMemAdvanced( + srcBuffer, + benchedSize, + NULL, + 0, + fileSizes, + nbFiles, + level, + comprParams, + dictBuffer, + dictBufferSize, + displayLevel, + displayName, + adv); + if (!BMK_isSuccessful_benchOutcome(res)) return 1; + } + return 0; } -BMK_benchOutcome_t BMK_syntheticTest(int cLevel, double compressibility, - const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv) +int BMK_syntheticTest( + double compressibility, + int startingCLevel, int endCLevel, + const ZSTD_compressionParameters* compressionParams, + int displayLevel, + const BMK_advancedParams_t* adv) { - char name[20] = {0}; - size_t const benchedSize = 10000000; - void* srcBuffer; - BMK_benchOutcome_t res; - - if (cLevel > ZSTD_maxCLevel()) { - RETURN_ERROR(15, BMK_benchOutcome_t, "Invalid Compression Level"); - } + char nameBuff[20] = { 0 }; + const char* name = nameBuff; + size_t const benchedSize = adv->chunkSizeMax ? adv->chunkSizeMax : 10000000; /* Memory allocation */ - srcBuffer = malloc(benchedSize); - if (!srcBuffer) RETURN_ERROR(21, BMK_benchOutcome_t, "not enough memory"); + void* const srcBuffer = malloc(benchedSize); + if (!srcBuffer) { + DISPLAYLEVEL(1, "allocation error : not enough memory \n"); + return 16; + } /* Fill input buffer */ - RDG_genBuffer(srcBuffer, benchedSize, compressibility, 0.0, 0); + if (compressibility < 0.0) { + LOREM_genBuffer(srcBuffer, benchedSize, 0); + name = "Lorem ipsum"; + } else { + RDG_genBuffer(srcBuffer, benchedSize, compressibility, 0.0, 0); + formatString_u( + nameBuff, + sizeof(nameBuff), + "Synthetic %u%%", + (unsigned)(compressibility * 100)); + } /* Bench */ - snprintf (name, sizeof(name), "Synthetic %2u%%", (unsigned)(compressibility*100)); - res = BMK_benchCLevel(srcBuffer, benchedSize, - &benchedSize /* ? */, 1 /* ? */, - cLevel, compressionParams, - NULL, 0, /* dictionary */ - displayLevel, name, adv); - - /* clean up */ - free(srcBuffer); - - return res; + { int res = BMK_benchCLevels( + srcBuffer, + benchedSize, + &benchedSize, + 1, + startingCLevel, endCLevel, + compressionParams, + NULL, + 0, /* dictionary */ + displayLevel, + name, + adv); + free(srcBuffer); + return res; + } } - - static size_t BMK_findMaxMem(U64 requiredMem) { size_t const step = 64 MB; - BYTE* testmem = NULL; + BYTE* testmem = NULL; requiredMem = (((requiredMem >> 26) + 1) << 26); requiredMem += step; - if (requiredMem > maxMemory) requiredMem = maxMemory; + if (requiredMem > maxMemory) + requiredMem = maxMemory; do { testmem = (BYTE*)malloc((size_t)requiredMem); @@ -746,130 +1051,197 @@ static size_t BMK_findMaxMem(U64 requiredMem) /*! BMK_loadFiles() : * Loads `buffer` with content of files listed within `fileNamesTable`. * At most, fills `buffer` entirely. */ -static int BMK_loadFiles(void* buffer, size_t bufferSize, - size_t* fileSizes, - const char* const * fileNamesTable, unsigned nbFiles, - int displayLevel) +static int BMK_loadFiles( + void* buffer, + size_t bufferSize, + size_t* fileSizes, + const char* const* fileNamesTable, + unsigned nbFiles, + int displayLevel) { size_t pos = 0, totalSize = 0; unsigned n; - for (n=0; n bufferSize-pos) fileSize = bufferSize-pos, nbFiles=n; /* buffer too small - stop after this file */ - { size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f); - if (readSize != (size_t)fileSize) EXM_THROW_INT(11, "could not read %s", fileNamesTable[n]); - pos += readSize; + if (fileSize > bufferSize - pos) { + /* buffer too small - limit quantity loaded */ + fileSize = bufferSize - pos; + nbFiles = n; /* stop after this file */ + } + + { FILE* const f = fopen(filename, "rb"); + if (f == NULL) { + RETURN_ERROR_INT( + 10, "cannot open file %s", filename); + } + OUTPUTLEVEL(2, "Loading %s... \r", filename); + { size_t const readSize = + fread(((char*)buffer) + pos, 1, (size_t)fileSize, f); + if (readSize != (size_t)fileSize) { + fclose(f); + RETURN_ERROR_INT( + 11, "invalid read %s", filename); + } + pos += readSize; + } + fileSizes[n] = (size_t)fileSize; + totalSize += (size_t)fileSize; + fclose(f); } - fileSizes[n] = (size_t)fileSize; - totalSize += (size_t)fileSize; - fclose(f); } - if (totalSize == 0) EXM_THROW_INT(12, "no data to bench"); + if (totalSize == 0) + RETURN_ERROR_INT(12, "no data to bench"); return 0; } -BMK_benchOutcome_t BMK_benchFilesAdvanced( - const char* const * fileNamesTable, unsigned nbFiles, - const char* dictFileName, int cLevel, - const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv) +int BMK_benchFilesAdvanced( + const char* const* fileNamesTable, + unsigned nbFiles, + const char* dictFileName, + int startCLevel, int endCLevel, + const ZSTD_compressionParameters* compressionParams, + int displayLevel, + const BMK_advancedParams_t* adv) { void* srcBuffer = NULL; size_t benchedSize; - void* dictBuffer = NULL; + void* dictBuffer = NULL; size_t dictBufferSize = 0; - size_t* fileSizes = NULL; - BMK_benchOutcome_t res; + size_t* fileSizes = NULL; + int res = 1; U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles); if (!nbFiles) { - RETURN_ERROR(14, BMK_benchOutcome_t, "No Files to Benchmark"); + DISPLAYLEVEL(1, "No Files to Benchmark"); + return 13; } - if (cLevel > ZSTD_maxCLevel()) { - RETURN_ERROR(15, BMK_benchOutcome_t, "Invalid Compression Level"); + if (endCLevel > ZSTD_maxCLevel()) { + DISPLAYLEVEL(1, "Invalid Compression Level"); + return 14; + } + + if (totalSizeToLoad == UTIL_FILESIZE_UNKNOWN) { + DISPLAYLEVEL(1, "Error loading files"); + return 15; } fileSizes = (size_t*)calloc(nbFiles, sizeof(size_t)); - if (!fileSizes) RETURN_ERROR(12, BMK_benchOutcome_t, "not enough memory for fileSizes"); + if (!fileSizes) { + DISPLAYLEVEL(1, "not enough memory for fileSizes"); + return 16; + } /* Load dictionary */ if (dictFileName != NULL) { U64 const dictFileSize = UTIL_getFileSize(dictFileName); if (dictFileSize == UTIL_FILESIZE_UNKNOWN) { - DISPLAYLEVEL(1, "error loading %s : %s \n", dictFileName, strerror(errno)); + DISPLAYLEVEL( + 1, + "error loading %s : %s \n", + dictFileName, + strerror(errno)); free(fileSizes); - RETURN_ERROR(9, BMK_benchOutcome_t, "benchmark aborted"); + DISPLAYLEVEL(1, "benchmark aborted"); + return 17; } if (dictFileSize > 64 MB) { free(fileSizes); - RETURN_ERROR(10, BMK_benchOutcome_t, "dictionary file %s too large", dictFileName); + DISPLAYLEVEL(1, "dictionary file %s too large", dictFileName); + return 18; } dictBufferSize = (size_t)dictFileSize; - dictBuffer = malloc(dictBufferSize); - if (dictBuffer==NULL) { + dictBuffer = malloc(dictBufferSize); + if (dictBuffer == NULL) { free(fileSizes); - RETURN_ERROR(11, BMK_benchOutcome_t, "not enough memory for dictionary (%u bytes)", - (unsigned)dictBufferSize); + DISPLAYLEVEL( + 1, + "not enough memory for dictionary (%u bytes)", + (unsigned)dictBufferSize); + return 19; } - { int const errorCode = BMK_loadFiles(dictBuffer, dictBufferSize, - fileSizes, &dictFileName /*?*/, - 1 /*?*/, displayLevel); + { + int const errorCode = BMK_loadFiles( + dictBuffer, + dictBufferSize, + fileSizes, + &dictFileName /*?*/, + 1 /*?*/, + displayLevel); if (errorCode) { - res = BMK_benchOutcome_error(); goto _cleanUp; - } } + } + } } /* Memory allocation & restrictions */ benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3; - if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad; + if ((U64)benchedSize > totalSizeToLoad) + benchedSize = (size_t)totalSizeToLoad; if (benchedSize < totalSizeToLoad) - DISPLAY("Not enough memory; testing %u MB only...\n", (unsigned)(benchedSize >> 20)); + DISPLAY("Not enough memory; testing %u MB only...\n", + (unsigned)(benchedSize >> 20)); srcBuffer = benchedSize ? malloc(benchedSize) : NULL; if (!srcBuffer) { free(dictBuffer); free(fileSizes); - RETURN_ERROR(12, BMK_benchOutcome_t, "not enough memory"); + DISPLAYLEVEL(1, "not enough memory for srcBuffer"); + return 20; } /* Load input buffer */ - { int const errorCode = BMK_loadFiles(srcBuffer, benchedSize, - fileSizes, fileNamesTable, nbFiles, - displayLevel); + { + int const errorCode = BMK_loadFiles( + srcBuffer, + benchedSize, + fileSizes, + fileNamesTable, + nbFiles, + displayLevel); if (errorCode) { - res = BMK_benchOutcome_error(); goto _cleanUp; - } } + } + } /* Bench */ - { char mfName[20] = {0}; - snprintf (mfName, sizeof(mfName), " %u files", nbFiles); - { const char* const displayName = (nbFiles > 1) ? mfName : fileNamesTable[0]; - res = BMK_benchCLevel(srcBuffer, benchedSize, - fileSizes, nbFiles, - cLevel, compressionParams, - dictBuffer, dictBufferSize, - displayLevel, displayName, - adv); - } } + { + char mfName[20] = { 0 }; + formatString_u(mfName, sizeof(mfName), " %u files", nbFiles); + { const char* const displayName = + (nbFiles > 1) ? mfName : fileNamesTable[0]; + res = BMK_benchCLevels( + srcBuffer, + benchedSize, + fileSizes, + nbFiles, + startCLevel, endCLevel, + compressionParams, + dictBuffer, + dictBufferSize, + displayLevel, + displayName, + adv); + } + } _cleanUp: free(srcBuffer); @@ -878,13 +1250,21 @@ BMK_benchOutcome_t BMK_benchFilesAdvanced( return res; } - -BMK_benchOutcome_t BMK_benchFiles( - const char* const * fileNamesTable, unsigned nbFiles, - const char* dictFileName, - int cLevel, const ZSTD_compressionParameters* compressionParams, - int displayLevel) +int BMK_benchFiles( + const char* const* fileNamesTable, + unsigned nbFiles, + const char* dictFileName, + int cLevel, + const ZSTD_compressionParameters* compressionParams, + int displayLevel) { BMK_advancedParams_t const adv = BMK_initAdvancedParams(); - return BMK_benchFilesAdvanced(fileNamesTable, nbFiles, dictFileName, cLevel, compressionParams, displayLevel, &adv); + return BMK_benchFilesAdvanced( + fileNamesTable, + nbFiles, + dictFileName, + cLevel, cLevel, + compressionParams, + displayLevel, + &adv); } diff --git a/programs/benchzstd.h b/programs/benchzstd.h index 2c7627713ee..d62a33c0aec 100644 --- a/programs/benchzstd.h +++ b/programs/benchzstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,18 +14,13 @@ * and display progress result and final summary */ -#if defined (__cplusplus) -extern "C" { -#endif - #ifndef BENCH_ZSTD_H_3242387 #define BENCH_ZSTD_H_3242387 /* === Dependencies === */ #include /* size_t */ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ -#include "zstd.h" /* ZSTD_compressionParameters */ - +#include "../lib/zstd.h" /* ZSTD_compressionParameters */ /* === Constants === */ @@ -81,21 +76,13 @@ BMK_benchResult_t BMK_extract_benchResult(BMK_benchOutcome_t outcome); * 2 : + result + interaction + warnings; * 3 : + information; * 4 : + debug - * @return: - * a variant, which expresses either an error, or a valid result. - * Use BMK_isSuccessful_benchOutcome() to check if function was successful. - * If yes, extract the valid result with BMK_extract_benchResult(), - * it will contain : - * .cSpeed: compression speed in bytes per second, - * .dSpeed: decompression speed in bytes per second, - * .cSize : compressed size, in bytes - * .cMem : memory budget required for the compression context + * @return: 0 on success, !0 on error */ -BMK_benchOutcome_t BMK_benchFiles( - const char* const * fileNamesTable, unsigned nbFiles, - const char* dictFileName, - int cLevel, const ZSTD_compressionParameters* compressionParams, - int displayLevel); +int BMK_benchFiles( + const char* const * fileNamesTable, unsigned nbFiles, + const char* dictFileName, + int cLevel, const ZSTD_compressionParameters* compressionParams, + int displayLevel); typedef enum { @@ -105,9 +92,10 @@ typedef enum { } BMK_mode_t; typedef struct { - BMK_mode_t mode; /* 0: all, 1: compress only 2: decode only */ + BMK_mode_t mode; /* 0: both, 1: compress only 2: decode only */ unsigned nbSeconds; /* default timing is in nbSeconds */ - size_t blockSize; /* Maximum size of each block*/ + size_t chunkSizeMax; /* Maximum size of each independent chunk */ + size_t targetCBlockSize;/* Approximative size of compressed blocks */ int nbWorkers; /* multithreading */ unsigned realTime; /* real time priority */ int additionalParam; /* used by python speed benchmark */ @@ -116,7 +104,8 @@ typedef struct { int ldmHashLog; int ldmBucketSizeLog; int ldmHashRateLog; - ZSTD_literalCompressionMode_e literalCompressionMode; + ZSTD_ParamSwitch_e literalCompressionMode; + int useRowMatchFinder; /* use row-based matchfinder if possible */ } BMK_advancedParams_t; /* returns default parameters used by nonAdvanced functions */ @@ -125,33 +114,27 @@ BMK_advancedParams_t BMK_initAdvancedParams(void); /*! BMK_benchFilesAdvanced(): * Same as BMK_benchFiles(), * with more controls, provided through advancedParams_t structure */ -BMK_benchOutcome_t BMK_benchFilesAdvanced( - const char* const * fileNamesTable, unsigned nbFiles, - const char* dictFileName, - int cLevel, const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv); +int BMK_benchFilesAdvanced( + const char* const * fileNamesTable, unsigned nbFiles, + const char* dictFileName, + int startCLevel, int endCLevel, + const ZSTD_compressionParameters* compressionParams, + int displayLevel, const BMK_advancedParams_t* adv); /*! BMK_syntheticTest() -- called from zstdcli */ -/* Generates a sample with datagen, using compressibility argument */ -/* cLevel - compression level to benchmark, errors if invalid - * compressibility - determines compressibility of sample - * compressionParams - basic compression Parameters - * displayLevel - see benchFiles - * adv - see advanced_Params_t - * @return: - * a variant, which expresses either an error, or a valid result. - * Use BMK_isSuccessful_benchOutcome() to check if function was successful. - * If yes, extract the valid result with BMK_extract_benchResult(), - * it will contain : - * .cSpeed: compression speed in bytes per second, - * .dSpeed: decompression speed in bytes per second, - * .cSize : compressed size, in bytes - * .cMem : memory budget required for the compression context +/* Generates a sample with datagen, using @compressibility argument + * @cLevel - compression level to benchmark, errors if invalid + * @compressibility - determines compressibility of sample, range [0.0 - 1.0] + * if @compressibility < 0.0, uses the lorem ipsum generator + * @compressionParams - basic compression Parameters + * @displayLevel - see benchFiles + * @adv - see advanced_Params_t + * @return: 0 on success, !0 on error */ -BMK_benchOutcome_t BMK_syntheticTest( - int cLevel, double compressibility, - const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv); +int BMK_syntheticTest(double compressibility, + int startingCLevel, int endCLevel, + const ZSTD_compressionParameters* compressionParams, + int displayLevel, const BMK_advancedParams_t* adv); @@ -189,8 +172,8 @@ BMK_benchOutcome_t BMK_benchMem(const void* srcBuffer, size_t srcSize, int displayLevel, const char* displayName); -/* BMK_benchMemAdvanced() : same as BMK_benchMem() - * with following additional options : +/* BMK_benchMemAdvanced() : used by Paramgrill + * same as BMK_benchMem() with following additional options : * dstBuffer - destination buffer to write compressed output in, NULL if none provided. * dstCapacity - capacity of destination buffer, give 0 if dstBuffer = NULL * adv = see advancedParams_t @@ -205,9 +188,4 @@ BMK_benchOutcome_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize, - #endif /* BENCH_ZSTD_H_3242387 */ - -#if defined (__cplusplus) -} -#endif diff --git a/programs/datagen.c b/programs/datagen.c index 026b9a1855d..ddc690bb1b7 100644 --- a/programs/datagen.c +++ b/programs/datagen.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -18,7 +18,7 @@ #include /* malloc, free */ #include /* FILE, fwrite, fprintf */ #include /* memcpy */ -#include "mem.h" /* U32 */ +#include "../lib/common/mem.h" /* U32 */ /*-************************************ @@ -55,17 +55,18 @@ static U32 RDG_rand(U32* src) return rand32 >> 5; } +typedef U32 fixedPoint_24_8; -static void RDG_fillLiteralDistrib(BYTE* ldt, double ld) +static void RDG_fillLiteralDistrib(BYTE* ldt, fixedPoint_24_8 ld) { BYTE const firstChar = (ld<=0.0) ? 0 : '('; BYTE const lastChar = (ld<=0.0) ? 255 : '}'; BYTE character = (ld<=0.0) ? 0 : '0'; U32 u; - if (ld<=0.0) ld = 0.0; + if (ld<=0) ld = 0; for (u=0; u> 8) + 1; U32 const end = MIN ( u + weight , LTSIZE); while (u < end) ldt[u++] = character; character++; @@ -92,7 +93,8 @@ static U32 RDG_randLength(U32* seedPtr) return (RDG_rand(seedPtr) & 0x1FF) + 0xF; } -static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, double matchProba, const BYTE* ldt, U32* seedPtr) +static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, + double matchProba, const BYTE* ldt, U32* seedPtr) { BYTE* const buffPtr = (BYTE*)buffer; U32 const matchProba32 = (U32)(32768 * matchProba); @@ -128,13 +130,13 @@ static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, doubl U32 const randOffset = RDG_rand15Bits(seedPtr) + 1; U32 const offset = repeatOffset ? prevOffset : (U32) MIN(randOffset , pos); size_t match = pos - offset; - while (pos < d) buffPtr[pos++] = buffPtr[match++]; /* correctly manages overlaps */ + while (pos < d) { buffPtr[pos++] = buffPtr[match++]; /* correctly manages overlaps */ } prevOffset = offset; } else { /* Literal (noise) */ U32 const length = RDG_randLength(seedPtr); U32 const d = (U32) MIN(pos + length, buffSize); - while (pos < d) buffPtr[pos++] = RDG_genChar(seedPtr, ldt); + while (pos < d) { buffPtr[pos++] = RDG_genChar(seedPtr, ldt); } } } } @@ -145,7 +147,7 @@ void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba BYTE ldt[LTSIZE]; memset(ldt, '0', sizeof(ldt)); /* yes, character '0', this is intentional */ if (litProba<=0.0) litProba = matchProba / 4.5; - RDG_fillLiteralDistrib(ldt, litProba); + RDG_fillLiteralDistrib(ldt, (fixedPoint_24_8)(litProba * 256 + 0.001)); RDG_genBlock(buffer, size, 0, matchProba, ldt, &seed32); } @@ -163,7 +165,7 @@ void RDG_genStdout(unsigned long long size, double matchProba, double litProba, if (buff==NULL) { perror("datagen"); exit(1); } if (litProba<=0.0) litProba = matchProba / 4.5; memset(ldt, '0', sizeof(ldt)); /* yes, character '0', this is intentional */ - RDG_fillLiteralDistrib(ldt, litProba); + RDG_fillLiteralDistrib(ldt, (fixedPoint_24_8)(litProba * 256 + 0.001)); SET_BINARY_MODE(stdout); /* Generate initial dict */ diff --git a/programs/datagen.h b/programs/datagen.h index 2fcc980e5e7..461fb716c43 100644 --- a/programs/datagen.h +++ b/programs/datagen.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,6 +14,10 @@ #include /* size_t */ +#if defined (__cplusplus) +extern "C" { +#endif + void RDG_genStdout(unsigned long long size, double matchProba, double litProba, unsigned seed); void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba, unsigned seed); /*!RDG_genBuffer @@ -27,4 +31,8 @@ void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba Same as RDG_genBuffer, but generates data into stdout */ +#if defined (__cplusplus) +} /* extern "C" */ +#endif + #endif diff --git a/programs/dibio.c b/programs/dibio.c index 12eb3268085..dc629103ba2 100644 --- a/programs/dibio.c +++ b/programs/dibio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -27,11 +27,11 @@ #include /* memset */ #include /* fprintf, fopen, ftello64 */ #include /* errno */ -#include #include "timefn.h" /* UTIL_time_t, UTIL_clockSpanMicro, UTIL_getTime */ -#include "mem.h" /* read */ -#include "error_private.h" +#include "../lib/common/debug.h" /* assert */ +#include "../lib/common/mem.h" /* read */ +#include "../lib/zstd_errors.h" #include "dibio.h" @@ -49,6 +49,7 @@ static const size_t g_maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_t)(512 MB) << sizeof(size_t)); #define NOISELENGTH 32 +#define MAX_SAMPLES_SIZE (2 GB) /* training dataset limited to 2GB */ /*-************************************* @@ -88,6 +89,15 @@ static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; #undef MIN #define MIN(a,b) ((a) < (b) ? (a) : (b)) +/** + Returns the size of a file. + If error returns -1. +*/ +static S64 DiB_getFileSize (const char * fileName) +{ + U64 const fileSize = UTIL_getFileSize(fileName); + return (fileSize == UTIL_FILESIZE_UNKNOWN) ? -1 : (S64)fileSize; +} /* ******************************************************** * File related operations @@ -101,47 +111,70 @@ static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; * *bufferSizePtr is modified, it provides the amount data loaded within buffer. * sampleSizes is filled with the size of each sample. */ -static unsigned DiB_loadFiles(void* buffer, size_t* bufferSizePtr, - size_t* sampleSizes, unsigned sstSize, - const char** fileNamesTable, unsigned nbFiles, size_t targetChunkSize, - unsigned displayLevel) +static int DiB_loadFiles( + void* buffer, size_t* bufferSizePtr, + size_t* sampleSizes, int sstSize, + const char** fileNamesTable, int nbFiles, + size_t targetChunkSize, int displayLevel ) { char* const buff = (char*)buffer; - size_t pos = 0; - unsigned nbLoadedChunks = 0, fileIndex; - - for (fileIndex=0; fileIndex *bufferSizePtr-pos) break; - { size_t const readSize = fread(buff+pos, 1, toLoad, f); - if (readSize != toLoad) EXM_THROW(11, "Pb reading %s", fileName); - pos += readSize; - sampleSizes[nbLoadedChunks++] = toLoad; - remainingToLoad -= targetChunkSize; - if (nbLoadedChunks == sstSize) { /* no more space left in sampleSizes table */ - fileIndex = nbFiles; /* stop there */ + size_t totalDataLoaded = 0; + int nbSamplesLoaded = 0; + int fileIndex = 0; + FILE * f = NULL; + + assert(targetChunkSize <= SAMPLESIZE_MAX); + + while ( nbSamplesLoaded < sstSize && fileIndex < nbFiles ) { + size_t fileDataLoaded; + S64 const fileSize = DiB_getFileSize(fileNamesTable[fileIndex]); + if (fileSize <= 0) { + /* skip if zero-size or file error */ + ++fileIndex; + continue; + } + + f = fopen( fileNamesTable[fileIndex], "rb"); + if (f == NULL) + EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileNamesTable[fileIndex], strerror(errno)); + DISPLAYUPDATE(2, "Loading %s... \r", fileNamesTable[fileIndex]); + + /* Load the first chunk of data from the file */ + fileDataLoaded = targetChunkSize > 0 ? + (size_t)MIN(fileSize, (S64)targetChunkSize) : + (size_t)MIN(fileSize, SAMPLESIZE_MAX ); + if (totalDataLoaded + fileDataLoaded > *bufferSizePtr) + break; + if (fread( buff+totalDataLoaded, 1, fileDataLoaded, f ) != fileDataLoaded) + EXM_THROW(11, "Pb reading %s", fileNamesTable[fileIndex]); + sampleSizes[nbSamplesLoaded++] = fileDataLoaded; + totalDataLoaded += fileDataLoaded; + + /* If file-chunking is enabled, load the rest of the file as more samples */ + if (targetChunkSize > 0) { + while( (S64)fileDataLoaded < fileSize && nbSamplesLoaded < sstSize ) { + size_t const chunkSize = MIN((size_t)(fileSize-fileDataLoaded), targetChunkSize); + if (totalDataLoaded + chunkSize > *bufferSizePtr) /* buffer is full */ break; - } - if (toLoad < targetChunkSize) { - fseek(f, (long)(targetChunkSize - toLoad), SEEK_CUR); - } } } - fclose(f); + + if (fread( buff+totalDataLoaded, 1, chunkSize, f ) != chunkSize) + EXM_THROW(11, "Pb reading %s", fileNamesTable[fileIndex]); + sampleSizes[nbSamplesLoaded++] = chunkSize; + totalDataLoaded += chunkSize; + fileDataLoaded += chunkSize; + } + } + fileIndex += 1; + fclose(f); f = NULL; } + if (f != NULL) + fclose(f); + DISPLAYLEVEL(2, "\r%79s\r", ""); - *bufferSizePtr = pos; - DISPLAYLEVEL(4, "loaded : %u KB \n", (unsigned)(pos >> 10)) - return nbLoadedChunks; + DISPLAYLEVEL(4, "Loaded %d KB total training data, %d nb samples \n", + (int)(totalDataLoaded / (1 KB)), nbSamplesLoaded ); + *bufferSizePtr = totalDataLoaded; + return nbSamplesLoaded; } #define DiB_rotl32(x,r) ((x << r) | (x >> (32 - r))) @@ -164,7 +197,8 @@ static U32 DiB_rand(U32* src) static void DiB_shuffle(const char** fileNamesTable, unsigned nbFiles) { U32 seed = 0xFD2FB528; unsigned i; - assert(nbFiles >= 1); + if (nbFiles == 0) + return; for (i = nbFiles - 1; i > 0; --i) { unsigned const j = DiB_rand(&seed) % (i + 1); const char* const tmp = fileNamesTable[j]; @@ -201,7 +235,7 @@ static void DiB_fillNoise(void* buffer, size_t length) unsigned const prime1 = 2654435761U; unsigned const prime2 = 2246822519U; unsigned acc = prime1; - size_t p=0;; + size_t p=0; for (p=0; p sample buffer, and sample sizes table. */ -static fileStats DiB_fileStats(const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, unsigned displayLevel) +static fileStats DiB_fileStats(const char** fileNamesTable, int nbFiles, size_t chunkSize, int displayLevel) { fileStats fs; - unsigned n; + int n; memset(&fs, 0, sizeof(fs)); + + /* We assume that if chunking is requested, the chunk size is < SAMPLESIZE_MAX */ + assert( chunkSize <= SAMPLESIZE_MAX ); + for (n=0; n 2*SAMPLESIZE_MAX); - fs.nbSamples += nbSamples; + S64 const fileSize = DiB_getFileSize(fileNamesTable[n]); + /* TODO: is there a minimum sample size? What if the file is 1-byte? */ + /* Skip empty or invalid files */ + if (fileSize <= 0) { + if (fileSize < 0) { + DISPLAYLEVEL(3, "Sample file '%s' is unreadable or stat failed, skipping...\n", + fileNamesTable[n]); + } else { + DISPLAYLEVEL(3, "Sample file '%s' has zero size, skipping...\n", + fileNamesTable[n]); + } + continue; + } + + /* the case where we are breaking up files in sample chunks */ + if (chunkSize > 0) { + /* TODO: is there a minimum sample size? Can we have a 1-byte sample? */ + fs.nbSamples += (int)((fileSize + chunkSize-1) / chunkSize); + fs.totalSizeToLoad += fileSize; + } + else { + /* the case where one file is one sample */ + if (fileSize > SAMPLESIZE_MAX) { + /* flag excessively large sample files */ + fs.oneSampleTooLarge |= (fileSize > 2*SAMPLESIZE_MAX); + + /* Limit to the first SAMPLESIZE_MAX (128kB) of the file */ + DISPLAYLEVEL(3, "Sample file '%s' is too large, limiting to %d KB\n", + fileNamesTable[n], SAMPLESIZE_MAX / (1 KB)); + } + fs.nbSamples += 1; + fs.totalSizeToLoad += MIN(fileSize, SAMPLESIZE_MAX); + } } - DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (unsigned)(fs.totalSizeToLoad >> 10)); + DISPLAYLEVEL(4, "Found training data %d files, %d KB, %d samples\n", nbFiles, (int)(fs.totalSizeToLoad / (1 KB)), fs.nbSamples); return fs; } - -/*! ZDICT_trainFromBuffer_unsafe_legacy() : - Strictly Internal use only !! - Same as ZDICT_trainFromBuffer_legacy(), but does not control `samplesBuffer`. - `samplesBuffer` must be followed by noisy guard band to avoid out-of-buffer reads. - @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - or an error code. -*/ -size_t ZDICT_trainFromBuffer_unsafe_legacy(void* dictBuffer, size_t dictBufferCapacity, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, - ZDICT_legacy_params_t parameters); - - -int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, - const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, +int DiB_trainFromFiles(const char* dictFileName, size_t maxDictSize, + const char** fileNamesTable, int nbFiles, size_t chunkSize, ZDICT_legacy_params_t* params, ZDICT_cover_params_t* coverParams, - ZDICT_fastCover_params_t* fastCoverParams, int optimize) + ZDICT_fastCover_params_t* fastCoverParams, int optimize, unsigned memLimit) { - unsigned const displayLevel = params ? params->zParams.notificationLevel : - coverParams ? coverParams->zParams.notificationLevel : - fastCoverParams ? fastCoverParams->zParams.notificationLevel : - 0; /* should never happen */ + fileStats fs; + size_t* sampleSizes; /* vector of sample sizes. Each sample can be up to SAMPLESIZE_MAX */ + int nbSamplesLoaded; /* nb of samples effectively loaded in srcBuffer */ + size_t loadedSize; /* total data loaded in srcBuffer for all samples */ + void* srcBuffer /* contiguous buffer with training data/samples */; void* const dictBuffer = malloc(maxDictSize); - fileStats const fs = DiB_fileStats(fileNamesTable, nbFiles, chunkSize, displayLevel); - size_t* const sampleSizes = (size_t*)malloc(fs.nbSamples * sizeof(size_t)); - size_t const memMult = params ? MEMMULT : - coverParams ? COVER_MEMMULT: - FASTCOVER_MEMMULT; - size_t const maxMem = DiB_findMaxMem(fs.totalSizeToLoad * memMult) / memMult; - size_t loadedSize = (size_t) MIN ((unsigned long long)maxMem, fs.totalSizeToLoad); - void* const srcBuffer = malloc(loadedSize+NOISELENGTH); int result = 0; + int const displayLevel = params ? params->zParams.notificationLevel : + coverParams ? coverParams->zParams.notificationLevel : + fastCoverParams ? fastCoverParams->zParams.notificationLevel : 0; + + /* Shuffle input files before we start assessing how much sample datA to load. + The purpose of the shuffle is to pick random samples when the sample + set is larger than what we can load in memory. */ + DISPLAYLEVEL(3, "Shuffling input files\n"); + DiB_shuffle(fileNamesTable, nbFiles); + + /* Figure out how much sample data to load with how many samples */ + fs = DiB_fileStats(fileNamesTable, nbFiles, chunkSize, displayLevel); + + { + int const memMult = params ? MEMMULT : + coverParams ? COVER_MEMMULT: + FASTCOVER_MEMMULT; + size_t const maxMem = DiB_findMaxMem(fs.totalSizeToLoad * memMult) / memMult; + /* Limit the size of the training data to the free memory */ + /* Limit the size of the training data to 2GB */ + /* TODO: there is opportunity to stop DiB_fileStats() early when the data limit is reached */ + loadedSize = (size_t)MIN( MIN((S64)maxMem, fs.totalSizeToLoad), MAX_SAMPLES_SIZE ); + if (memLimit != 0) { + DISPLAYLEVEL(2, "! Warning : setting manual memory limit for dictionary training data at %u MB \n", + (unsigned)(memLimit / (1 MB))); + loadedSize = (size_t)MIN(loadedSize, memLimit); + } + srcBuffer = malloc(loadedSize+NOISELENGTH); + sampleSizes = (size_t*)malloc(fs.nbSamples * sizeof(size_t)); + } + /* Checks */ - if ((!sampleSizes) || (!srcBuffer) || (!dictBuffer)) + if ((fs.nbSamples && !sampleSizes) || (!srcBuffer) || (!dictBuffer)) EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */ if (fs.oneSampleTooLarge) { DISPLAYLEVEL(2, "! Warning : some sample(s) are very large \n"); @@ -296,36 +369,37 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each sample are loaded \n", SAMPLESIZE_MAX); } if (fs.nbSamples < 5) { - DISPLAYLEVEL(2, "! Warning : nb of samples too low for proper processing ! \n"); - DISPLAYLEVEL(2, "! Please provide _one file per sample_. \n"); - DISPLAYLEVEL(2, "! Alternatively, split files into fixed-size blocks representative of samples, with -B# \n"); + DISPLAYLEVEL(2, "! Warning : nb of samples too low for proper processing !\n"); + DISPLAYLEVEL(2, "! Please provide _one file per sample_.\n"); + DISPLAYLEVEL(2, "! Alternatively, split file(s) into fixed-size samples, with --split=#\n"); EXM_THROW(14, "nb of samples too low"); /* we now clearly forbid this case */ } - if (fs.totalSizeToLoad < (unsigned long long)(8 * maxDictSize)) { + if (fs.totalSizeToLoad < (S64)maxDictSize * 8) { DISPLAYLEVEL(2, "! Warning : data size of samples too small for target dictionary size \n"); DISPLAYLEVEL(2, "! Samples should be about 100x larger than target dictionary size \n"); } /* init */ - if (loadedSize < fs.totalSizeToLoad) - DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(loadedSize >> 20)); + if ((S64)loadedSize < fs.totalSizeToLoad) + DISPLAYLEVEL(1, "Training samples set too large (%u MB); training on %u MB only...\n", + (unsigned)(fs.totalSizeToLoad / (1 MB)), + (unsigned)(loadedSize / (1 MB))); /* Load input buffer */ - DISPLAYLEVEL(3, "Shuffling input files\n"); - DiB_shuffle(fileNamesTable, nbFiles); - - DiB_loadFiles(srcBuffer, &loadedSize, sampleSizes, fs.nbSamples, fileNamesTable, nbFiles, chunkSize, displayLevel); + nbSamplesLoaded = DiB_loadFiles( + srcBuffer, &loadedSize, sampleSizes, fs.nbSamples, fileNamesTable, + nbFiles, chunkSize, displayLevel); - { size_t dictSize; + { size_t dictSize = ZSTD_error_GENERIC; if (params) { DiB_fillNoise((char*)srcBuffer + loadedSize, NOISELENGTH); /* guard band, for end of buffer condition */ - dictSize = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, maxDictSize, - srcBuffer, sampleSizes, fs.nbSamples, - *params); + dictSize = ZDICT_trainFromBuffer_legacy(dictBuffer, maxDictSize, + srcBuffer, sampleSizes, nbSamplesLoaded, + *params); } else if (coverParams) { if (optimize) { dictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, maxDictSize, - srcBuffer, sampleSizes, fs.nbSamples, + srcBuffer, sampleSizes, nbSamplesLoaded, coverParams); if (!ZDICT_isError(dictSize)) { unsigned splitPercentage = (unsigned)(coverParams->splitPoint * 100); @@ -334,13 +408,12 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, } } else { dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer, - sampleSizes, fs.nbSamples, *coverParams); + sampleSizes, nbSamplesLoaded, *coverParams); } - } else { - assert(fastCoverParams != NULL); + } else if (fastCoverParams != NULL) { if (optimize) { dictSize = ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, maxDictSize, - srcBuffer, sampleSizes, fs.nbSamples, + srcBuffer, sampleSizes, nbSamplesLoaded, fastCoverParams); if (!ZDICT_isError(dictSize)) { unsigned splitPercentage = (unsigned)(fastCoverParams->splitPoint * 100); @@ -350,8 +423,10 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, } } else { dictSize = ZDICT_trainFromBuffer_fastCover(dictBuffer, maxDictSize, srcBuffer, - sampleSizes, fs.nbSamples, *fastCoverParams); + sampleSizes, nbSamplesLoaded, *fastCoverParams); } + } else { + assert(0 /* Impossible */); } if (ZDICT_isError(dictSize)) { DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */ diff --git a/programs/dibio.h b/programs/dibio.h index ea163fe6afd..a96104c36d7 100644 --- a/programs/dibio.h +++ b/programs/dibio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,7 @@ * Dependencies ***************************************/ #define ZDICT_STATIC_LINKING_ONLY -#include "zdict.h" /* ZDICT_params_t */ +#include "../lib/zdict.h" /* ZDICT_params_t */ /*-************************************* @@ -31,9 +31,9 @@ `parameters` is optional and can be provided with values set to 0, meaning "default". @return : 0 == ok. Any other : error. */ -int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, - const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, +int DiB_trainFromFiles(const char* dictFileName, size_t maxDictSize, + const char** fileNamesTable, int nbFiles, size_t chunkSize, ZDICT_legacy_params_t* params, ZDICT_cover_params_t* coverParams, - ZDICT_fastCover_params_t* fastCoverParams, int optimize); + ZDICT_fastCover_params_t* fastCoverParams, int optimize, unsigned memLimit); #endif diff --git a/programs/fileio.c b/programs/fileio.c index fba71157591..4000c5b62f4 100644 --- a/programs/fileio.c +++ b/programs/fileio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -25,11 +25,14 @@ ***************************************/ #include "platform.h" /* Large Files support, SET_BINARY_MODE */ #include "util.h" /* UTIL_getFileSize, UTIL_isRegularFile, UTIL_isSameFile */ -#include /* fprintf, fopen, fread, _fileno, stdin, stdout */ +#include /* fprintf, open, fdopen, fread, _fileno, stdin, stdout */ #include /* malloc, free */ #include /* strcmp, strlen */ +#include /* clock_t, to measure process time */ +#include /* O_WRONLY */ #include #include /* errno */ +#include /* INT_MAX */ #include #include "timefn.h" /* UTIL_getTime, UTIL_clockSpanMicro */ @@ -38,12 +41,16 @@ # include #endif -#include "mem.h" /* U32, U64 */ #include "fileio.h" +#include "fileio_asyncio.h" +#include "fileio_common.h" + +FIO_display_prefs_t g_display_prefs = {2, FIO_ps_auto}; +UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_magicNumber, ZSTD_frameHeaderSize_max */ -#include "zstd.h" -#include "zstd_errors.h" /* ZSTD_error_frameParameter_windowTooLarge */ +#include "../lib/zstd.h" +#include "../lib/zstd_errors.h" /* ZSTD_error_frameParameter_windowTooLarge */ #if defined(ZSTD_GZCOMPRESS) || defined(ZSTD_GZDECOMPRESS) # include @@ -63,70 +70,325 @@ # include #endif +char const* FIO_zlibVersion(void) +{ +#if defined(ZSTD_GZCOMPRESS) || defined(ZSTD_GZDECOMPRESS) + return zlibVersion(); +#else + return "Unsupported"; +#endif +} + +char const* FIO_lz4Version(void) +{ +#if defined(ZSTD_LZ4COMPRESS) || defined(ZSTD_LZ4DECOMPRESS) + /* LZ4_versionString() added in v1.7.3 */ +# if LZ4_VERSION_NUMBER >= 10703 + return LZ4_versionString(); +# else +# define ZSTD_LZ4_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE +# define ZSTD_LZ4_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LZ4_VERSION) + return ZSTD_LZ4_VERSION_STRING; +# endif +#else + return "Unsupported"; +#endif +} + +char const* FIO_lzmaVersion(void) +{ +#if defined(ZSTD_LZMACOMPRESS) || defined(ZSTD_LZMADECOMPRESS) + return lzma_version_string(); +#else + return "Unsupported"; +#endif +} + /*-************************************* * Constants ***************************************/ -#define KB *(1<<10) -#define MB *(1<<20) -#define GB *(1U<<30) - #define ADAPT_WINDOWLOG_DEFAULT 23 /* 8 MB */ #define DICTSIZE_MAX (32 MB) /* protection against large input (attack scenario) */ #define FNSPACE 30 +/* Default file permissions 0666 (modulated by umask) */ +/* Temporary restricted file permissions are used when we're going to + * chmod/chown at the end of the operation. */ +#if !defined(_WIN32) +/* These macros aren't defined on windows. */ +#define DEFAULT_FILE_PERMISSIONS (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH) +#define TEMPORARY_FILE_PERMISSIONS (S_IRUSR|S_IWUSR) +#else +#define DEFAULT_FILE_PERMISSIONS (0666) +#define TEMPORARY_FILE_PERMISSIONS (0600) +#endif -/*-************************************* -* Macros + +#ifndef ZSTD_NOCOMPRESS + +/* ************************************* +* Synchronous compression IO helpers +* Lightweight wrapper used by compression paths to manage buffered +* reads/writes without the async job machinery. ***************************************/ +typedef struct { + const FIO_prefs_t* prefs; + FILE* srcFile; + FILE* dstFile; + unsigned storedSkips; + U8* inBuffer; + size_t inCapacity; + U8* srcBuffer; + size_t srcBufferLoaded; + U8* outBuffer; + size_t outCapacity; +} FIO_SyncCompressIO; + +static void FIO_SyncCompressIO_init(FIO_SyncCompressIO* io, + const FIO_prefs_t* prefs, + size_t inCapacity, + size_t outCapacity); +static void FIO_SyncCompressIO_destroy(FIO_SyncCompressIO* io); +static void FIO_SyncCompressIO_setSrc(FIO_SyncCompressIO* io, FILE* file); +static void FIO_SyncCompressIO_clearSrc(FIO_SyncCompressIO* io); +static void FIO_SyncCompressIO_setDst(FIO_SyncCompressIO* io, FILE* file); +static int FIO_SyncCompressIO_closeDst(FIO_SyncCompressIO* io); +static size_t FIO_SyncCompressIO_fillBuffer(FIO_SyncCompressIO* io, size_t minToHave); +static void FIO_SyncCompressIO_consumeBytes(FIO_SyncCompressIO* io, size_t n); +static void FIO_SyncCompressIO_commitOut(FIO_SyncCompressIO* io, const void* buffer, size_t size); +static void FIO_SyncCompressIO_finish(FIO_SyncCompressIO* io); + + +static unsigned FIO_sparseWrite(FILE* file, + const void* buffer, size_t bufferSize, + const FIO_prefs_t* const prefs, + unsigned storedSkips) +{ + const size_t* const bufferT = (const size_t*)buffer; /* Buffer is supposed malloc'ed, hence aligned on size_t */ + size_t bufferSizeT = bufferSize / sizeof(size_t); + const size_t* const bufferTEnd = bufferT + bufferSizeT; + const size_t* ptrT = bufferT; + static const size_t segmentSizeT = (32 KB) / sizeof(size_t); /* check every 32 KB */ -struct FIO_display_prefs_s { - int displayLevel; /* 0 : no display; 1: errors; 2: + result + interaction + warnings; 3: + progression; 4: + information */ - U32 noProgress; -}; + if (prefs->testMode) return 0; /* do not output anything in test mode */ -static FIO_display_prefs_t g_display_prefs = {2, 0}; + if (!prefs->sparseFileSupport) { /* normal write */ + size_t const sizeCheck = fwrite(buffer, 1, bufferSize, file); + if (sizeCheck != bufferSize) + EXM_THROW(70, "Write error : cannot write block : %s", + strerror(errno)); + return 0; + } + + /* avoid int overflow */ + if (storedSkips > 1 GB) { + if (LONG_SEEK(file, 1 GB, SEEK_CUR) != 0) + EXM_THROW(91, "1 GB skip error (sparse file support)"); + storedSkips -= 1 GB; + } -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYOUT(...) fprintf(stdout, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) { if (g_display_prefs.displayLevel>=l) { DISPLAY(__VA_ARGS__); } } + while (ptrT < bufferTEnd) { + size_t nb0T; -static const U64 g_refreshRate = SEC_TO_MICRO / 6; -static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; + /* adjust last segment if < 32 KB */ + size_t seg0SizeT = segmentSizeT; + if (seg0SizeT > bufferSizeT) seg0SizeT = bufferSizeT; + bufferSizeT -= seg0SizeT; -#define READY_FOR_UPDATE() (!g_display_prefs.noProgress && UTIL_clockSpanMicro(g_displayClock) > g_refreshRate) -#define DELAY_NEXT_UPDATE() { g_displayClock = UTIL_getTime(); } -#define DISPLAYUPDATE(l, ...) { \ - if (g_display_prefs.displayLevel>=l && !g_display_prefs.noProgress) { \ - if (READY_FOR_UPDATE() || (g_display_prefs.displayLevel>=4)) { \ - DELAY_NEXT_UPDATE(); \ - DISPLAY(__VA_ARGS__); \ - if (g_display_prefs.displayLevel>=4) fflush(stderr); \ - } } } + /* count leading zeroes */ + for (nb0T=0; (nb0T < seg0SizeT) && (ptrT[nb0T] == 0); nb0T++) ; + storedSkips += (unsigned)(nb0T * sizeof(size_t)); -#undef MIN /* in case it would be already defined */ -#define MIN(a,b) ((a) < (b) ? (a) : (b)) + if (nb0T != seg0SizeT) { /* not all 0s */ + size_t const nbNon0ST = seg0SizeT - nb0T; + /* skip leading zeros */ + if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) + EXM_THROW(92, "Sparse skip error ; try --no-sparse"); + storedSkips = 0; + /* write the rest */ + if (fwrite(ptrT + nb0T, sizeof(size_t), nbNon0ST, file) != nbNon0ST) + EXM_THROW(93, "Write error : cannot write block : %s", + strerror(errno)); + } + ptrT += seg0SizeT; + } + { static size_t const maskT = sizeof(size_t)-1; + if (bufferSize & maskT) { + /* size not multiple of sizeof(size_t) : implies end of block */ + const char* const restStart = (const char*)bufferTEnd; + const char* restPtr = restStart; + const char* const restEnd = (const char*)buffer + bufferSize; + assert(restEnd > restStart && restEnd < restStart + sizeof(size_t)); + for ( ; (restPtr < restEnd) && (*restPtr == 0); restPtr++) ; + storedSkips += (unsigned) (restPtr - restStart); + if (restPtr != restEnd) { + /* not all remaining bytes are 0 */ + size_t const restSize = (size_t)(restEnd - restPtr); + if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) + EXM_THROW(92, "Sparse skip error ; try --no-sparse"); + if (fwrite(restPtr, 1, restSize, file) != restSize) + EXM_THROW(95, "Write error : cannot write end of decoded block : %s", + strerror(errno)); + storedSkips = 0; + } } } + + return storedSkips; +} + +static void FIO_sparseWriteEnd(const FIO_prefs_t* const prefs, FILE* file, unsigned storedSkips) +{ + if (file == NULL) return; + if (prefs->testMode) { + assert(storedSkips == 0); + return; + } + if (storedSkips>0) { + assert(prefs->sparseFileSupport > 0); /* storedSkips>0 implies sparse support is enabled */ + if (LONG_SEEK(file, storedSkips-1, SEEK_CUR) != 0) + EXM_THROW(69, "Final skip error (sparse file support)"); + /* last zero must be explicitly written, + * so that skipped ones get implicitly translated as zero by FS */ + { const char lastZeroByte[1] = { 0 }; + if (fwrite(lastZeroByte, 1, 1, file) != 1) + EXM_THROW(69, "Write error : cannot write last zero : %s", strerror(errno)); + } + } +} + +static void FIO_SyncCompressIO_init(FIO_SyncCompressIO* io, + const FIO_prefs_t* prefs, + size_t inCapacity, + size_t outCapacity) +{ + memset(io, 0, sizeof(*io)); + io->prefs = prefs; + io->inCapacity = inCapacity; + io->outCapacity = outCapacity; + io->inBuffer = (U8*)malloc(inCapacity); + if (!io->inBuffer) + EXM_THROW(101, "Allocation error : not enough memory"); + io->outBuffer = (U8*)malloc(outCapacity); + if (!io->outBuffer) { + free(io->inBuffer); + io->inBuffer = NULL; + EXM_THROW(101, "Allocation error : not enough memory"); + } + io->srcBuffer = io->inBuffer; + io->srcBufferLoaded = 0; +} + +static void FIO_SyncCompressIO_destroy(FIO_SyncCompressIO* io) +{ + if (!io) return; + free(io->inBuffer); + free(io->outBuffer); + io->inBuffer = NULL; + io->outBuffer = NULL; + io->srcBuffer = NULL; + io->srcBufferLoaded = 0; + io->srcFile = NULL; + io->dstFile = NULL; + io->storedSkips = 0; +} + +static void FIO_SyncCompressIO_setSrc(FIO_SyncCompressIO* io, FILE* file) +{ + io->srcFile = file; + io->srcBuffer = io->inBuffer; + io->srcBufferLoaded = 0; +} + +static void FIO_SyncCompressIO_clearSrc(FIO_SyncCompressIO* io) +{ + io->srcFile = NULL; + io->srcBuffer = io->inBuffer; + io->srcBufferLoaded = 0; +} + +static void FIO_SyncCompressIO_setDst(FIO_SyncCompressIO* io, FILE* file) +{ + io->dstFile = file; + io->storedSkips = 0; +} + +static int FIO_SyncCompressIO_closeDst(FIO_SyncCompressIO* io) +{ + int result = 0; + if (io->dstFile != NULL) { + FIO_SyncCompressIO_finish(io); + result = fclose(io->dstFile); + io->dstFile = NULL; + } + return result; +} + +static size_t FIO_SyncCompressIO_fillBuffer(FIO_SyncCompressIO* io, size_t minToHave) +{ + size_t added = 0; + if (io->srcFile == NULL) + return 0; + + if (minToHave > io->inCapacity) + minToHave = io->inCapacity; + + if (io->srcBufferLoaded >= minToHave) + return 0; + + if (io->srcBuffer != io->inBuffer) { + if (io->srcBufferLoaded > 0) + memmove(io->inBuffer, io->srcBuffer, io->srcBufferLoaded); + io->srcBuffer = io->inBuffer; + } + + while (io->srcBufferLoaded < minToHave) { + size_t const toRead = io->inCapacity - io->srcBufferLoaded; + size_t const readBytes = fread(io->inBuffer + io->srcBufferLoaded, 1, toRead, io->srcFile); + if (readBytes == 0) { + if (ferror(io->srcFile)) + EXM_THROW(37, "Read error"); + break; /* EOF */ + } + io->srcBufferLoaded += readBytes; + added += readBytes; + if (readBytes < toRead) + break; + } + + return added; +} -#define EXM_THROW(error, ...) \ -{ \ - DISPLAYLEVEL(1, "zstd: "); \ - DISPLAYLEVEL(5, "Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "error %i : ", error); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, " \n"); \ - exit(error); \ +static void FIO_SyncCompressIO_consumeBytes(FIO_SyncCompressIO* io, size_t n) +{ + assert(n <= io->srcBufferLoaded); + io->srcBuffer += n; + io->srcBufferLoaded -= n; + if (io->srcBufferLoaded == 0) + io->srcBuffer = io->inBuffer; } -#define CHECK_V(v, f) \ - v = f; \ - if (ZSTD_isError(v)) { \ - DISPLAYLEVEL(5, "%s \n", #f); \ - EXM_THROW(11, "%s", ZSTD_getErrorName(v)); \ +static void FIO_SyncCompressIO_commitOut(FIO_SyncCompressIO* io, const void* buffer, size_t size) +{ + if (size == 0) + return; + if (io->dstFile == NULL) { + assert(io->prefs->testMode); + return; } -#define CHECK(f) { size_t err; CHECK_V(err, f); } + io->storedSkips = FIO_sparseWrite(io->dstFile, buffer, size, io->prefs, io->storedSkips); +} + +static void FIO_SyncCompressIO_finish(FIO_SyncCompressIO* io) +{ + if (io->dstFile == NULL) + return; + FIO_sparseWriteEnd(io->prefs, io->dstFile, io->storedSkips); + io->storedSkips = 0; +} +#endif /* ZSTD_NOCOMPRESS */ /*-************************************ * Signal (Ctrl-C trapping) @@ -227,7 +489,7 @@ static void ABRThandler(int sig) { } #endif -void FIO_addAbortHandler() +void FIO_addAbortHandler(void) { #if BACKTRACE_ENABLE signal(SIGABRT, ABRThandler); @@ -238,82 +500,36 @@ void FIO_addAbortHandler() #endif } +/*-************************************* +* Parameters: FIO_ctx_t +***************************************/ -/*-************************************************************ -* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW -***************************************************************/ -#if defined(_MSC_VER) && _MSC_VER >= 1400 -# define LONG_SEEK _fseeki64 -# define LONG_TELL _ftelli64 -#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ -# define LONG_SEEK fseeko -# define LONG_TELL ftello -#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) -# define LONG_SEEK fseeko64 -# define LONG_TELL ftello64 -#elif defined(_WIN32) && !defined(__DJGPP__) -# include - static int LONG_SEEK(FILE* file, __int64 offset, int origin) { - LARGE_INTEGER off; - DWORD method; - off.QuadPart = offset; - if (origin == SEEK_END) - method = FILE_END; - else if (origin == SEEK_CUR) - method = FILE_CURRENT; - else - method = FILE_BEGIN; +/* typedef'd to FIO_ctx_t within fileio.h */ +struct FIO_ctx_s { - if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method)) - return 0; - else - return -1; - } - static __int64 LONG_TELL(FILE* file) { - LARGE_INTEGER off, newOff; - off.QuadPart = 0; - newOff.QuadPart = 0; - SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, &newOff, FILE_CURRENT); - return newOff.QuadPart; - } -#else -# define LONG_SEEK fseek -# define LONG_TELL ftell -#endif + /* file i/o info */ + int nbFilesTotal; + int hasStdinInput; + int hasStdoutOutput; + /* file i/o state */ + int currFileIdx; + int nbFilesProcessed; + size_t totalBytesInput; + size_t totalBytesOutput; +}; -/*-************************************* -* Parameters: Typedefs -***************************************/ +static int FIO_shouldDisplayFileSummary(FIO_ctx_t const* fCtx) +{ + return fCtx->nbFilesTotal <= 1 || g_display_prefs.displayLevel >= 3; +} -struct FIO_prefs_s { - - /* Algorithm preferences */ - FIO_compressionType_t compressionType; - U32 sparseFileSupport; /* 0: no sparse allowed; 1: auto (file yes, stdout no); 2: force sparse */ - int dictIDFlag; - int checksumFlag; - int blockSize; - int overlapLog; - U32 adaptiveMode; - int rsyncable; - int minAdaptLevel; - int maxAdaptLevel; - int ldmFlag; - int ldmHashLog; - int ldmMinMatch; - int ldmBucketSizeLog; - int ldmHashRateLog; - ZSTD_literalCompressionMode_e literalCompressionMode; - - /* IO preferences */ - U32 removeSrcFile; - U32 overwrite; - - /* Computation resources preferences */ - unsigned memLimit; - int nbWorkers; -}; +static int FIO_shouldDisplayMultipleFileSummary(FIO_ctx_t const* fCtx) +{ + int const shouldDisplay = (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1); + assert(shouldDisplay || FIO_shouldDisplayFileSummary(fCtx) || fCtx->nbFilesProcessed == 0); + return shouldDisplay; +} /*-************************************* @@ -337,7 +553,7 @@ FIO_prefs_t* FIO_createPreferences(void) ret->removeSrcFile = 0; ret->memLimit = 0; ret->nbWorkers = 1; - ret->blockSize = 0; + ret->jobSize = 0; ret->overlapLog = FIO_OVERLAP_LOG_NOTSET; ret->adaptiveMode = 0; ret->rsyncable = 0; @@ -348,7 +564,30 @@ FIO_prefs_t* FIO_createPreferences(void) ret->ldmMinMatch = 0; ret->ldmBucketSizeLog = FIO_LDM_PARAM_NOTSET; ret->ldmHashRateLog = FIO_LDM_PARAM_NOTSET; - ret->literalCompressionMode = ZSTD_lcm_auto; + ret->streamSrcSize = 0; + ret->targetCBlockSize = 0; + ret->srcSizeHint = 0; + ret->testMode = 0; + ret->literalCompressionMode = ZSTD_ps_auto; + ret->excludeCompressedFiles = 0; + ret->allowBlockDevices = 0; + ret->asyncIO = AIO_supported(); + ret->passThrough = -1; + return ret; +} + +FIO_ctx_t* FIO_createContext(void) +{ + FIO_ctx_t* const ret = (FIO_ctx_t*)malloc(sizeof(FIO_ctx_t)); + if (!ret) EXM_THROW(21, "Allocation error : not enough memory"); + + ret->currFileIdx = 0; + ret->hasStdinInput = 0; + ret->hasStdoutOutput = 0; + ret->nbFilesTotal = 1; + ret->nbFilesProcessed = 0; + ret->totalBytesInput = 0; + ret->totalBytesOutput = 0; return ret; } @@ -357,6 +596,11 @@ void FIO_freePreferences(FIO_prefs_t* const prefs) free(prefs); } +void FIO_freeContext(FIO_ctx_t* const fCtx) +{ + free(fCtx); +} + /*-************************************* * Parameters: Display Options @@ -364,24 +608,26 @@ void FIO_freePreferences(FIO_prefs_t* const prefs) void FIO_setNotificationLevel(int level) { g_display_prefs.displayLevel=level; } -void FIO_setNoProgress(unsigned noProgress) { g_display_prefs.noProgress = noProgress; } +void FIO_setProgressSetting(FIO_progressSetting_e setting) { g_display_prefs.progressSetting = setting; } /*-************************************* * Parameters: Setters ***************************************/ +/* FIO_prefs_t functions */ + void FIO_setCompressionType(FIO_prefs_t* const prefs, FIO_compressionType_t compressionType) { prefs->compressionType = compressionType; } void FIO_overwriteMode(FIO_prefs_t* const prefs) { prefs->overwrite = 1; } -void FIO_setSparseWrite(FIO_prefs_t* const prefs, unsigned sparse) { prefs->sparseFileSupport = sparse; } +void FIO_setSparseWrite(FIO_prefs_t* const prefs, int sparse) { prefs->sparseFileSupport = sparse; } void FIO_setDictIDFlag(FIO_prefs_t* const prefs, int dictIDFlag) { prefs->dictIDFlag = dictIDFlag; } void FIO_setChecksumFlag(FIO_prefs_t* const prefs, int checksumFlag) { prefs->checksumFlag = checksumFlag; } -void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, unsigned flag) { prefs->removeSrcFile = (flag>0); } +void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, int flag) { prefs->removeSrcFile = (flag!=0); } void FIO_setMemLimit(FIO_prefs_t* const prefs, unsigned memLimit) { prefs->memLimit = memLimit; } @@ -392,10 +638,14 @@ void FIO_setNbWorkers(FIO_prefs_t* const prefs, int nbWorkers) { prefs->nbWorkers = nbWorkers; } -void FIO_setBlockSize(FIO_prefs_t* const prefs, int blockSize) { - if (blockSize && prefs->nbWorkers==0) +void FIO_setExcludeCompressedFile(FIO_prefs_t* const prefs, int excludeCompressedFiles) { prefs->excludeCompressedFiles = excludeCompressedFiles; } + +void FIO_setAllowBlockDevices(FIO_prefs_t* const prefs, int allowBlockDevices) { prefs->allowBlockDevices = allowBlockDevices; } + +void FIO_setJobSize(FIO_prefs_t* const prefs, int jobSize) { + if (jobSize && prefs->nbWorkers==0) DISPLAYLEVEL(2, "Setting block size is useless in single-thread mode \n"); - prefs->blockSize = blockSize; + prefs->jobSize = jobSize; } void FIO_setOverlapLog(FIO_prefs_t* const prefs, int overlapLog){ @@ -404,21 +654,41 @@ void FIO_setOverlapLog(FIO_prefs_t* const prefs, int overlapLog){ prefs->overlapLog = overlapLog; } -void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, unsigned adapt) { +void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, int adapt) { if ((adapt>0) && (prefs->nbWorkers==0)) EXM_THROW(1, "Adaptive mode is not compatible with single thread mode \n"); prefs->adaptiveMode = adapt; } +void FIO_setUseRowMatchFinder(FIO_prefs_t* const prefs, int useRowMatchFinder) { + prefs->useRowMatchFinder = useRowMatchFinder; +} + void FIO_setRsyncable(FIO_prefs_t* const prefs, int rsyncable) { if ((rsyncable>0) && (prefs->nbWorkers==0)) EXM_THROW(1, "Rsyncable mode is not compatible with single thread mode \n"); prefs->rsyncable = rsyncable; } +void FIO_setStreamSrcSize(FIO_prefs_t* const prefs, size_t streamSrcSize) { + prefs->streamSrcSize = streamSrcSize; +} + +void FIO_setTargetCBlockSize(FIO_prefs_t* const prefs, size_t targetCBlockSize) { + prefs->targetCBlockSize = targetCBlockSize; +} + +void FIO_setSrcSizeHint(FIO_prefs_t* const prefs, size_t srcSizeHint) { + prefs->srcSizeHint = (int)MIN((size_t)INT_MAX, srcSizeHint); +} + +void FIO_setTestMode(FIO_prefs_t* const prefs, int testMode) { + prefs->testMode = (testMode!=0); +} + void FIO_setLiteralCompressionMode( FIO_prefs_t* const prefs, - ZSTD_literalCompressionMode_e mode) { + ZSTD_ParamSwitch_e mode) { prefs->literalCompressionMode = mode; } @@ -456,45 +726,110 @@ void FIO_setLdmHashRateLog(FIO_prefs_t* const prefs, int ldmHashRateLog) { prefs->ldmHashRateLog = ldmHashRateLog; } +void FIO_setPatchFromMode(FIO_prefs_t* const prefs, int value) +{ + prefs->patchFromMode = value != 0; +} + +void FIO_setContentSize(FIO_prefs_t* const prefs, int value) +{ + prefs->contentSize = value != 0; +} + +void FIO_setAsyncIOFlag(FIO_prefs_t* const prefs, int value) { +#ifdef ZSTD_MULTITHREAD + prefs->asyncIO = value; +#else + (void) prefs; + (void) value; + DISPLAYLEVEL(2, "Note : asyncio is disabled (lack of multithreading support) \n"); +#endif +} + +void FIO_setPassThroughFlag(FIO_prefs_t* const prefs, int value) { + prefs->passThrough = (value != 0); +} + +void FIO_setMMapDict(FIO_prefs_t* const prefs, ZSTD_ParamSwitch_e value) +{ + prefs->mmapDict = value; +} + +/* FIO_ctx_t functions */ + +void FIO_setHasStdoutOutput(FIO_ctx_t* const fCtx, int value) { + fCtx->hasStdoutOutput = value; +} + +void FIO_setNbFilesTotal(FIO_ctx_t* const fCtx, int value) +{ + fCtx->nbFilesTotal = value; +} + +void FIO_determineHasStdinInput(FIO_ctx_t* const fCtx, const FileNamesTable* const filenames) { + size_t i = 0; + for ( ; i < filenames->tableSize; ++i) { + if (!strcmp(stdinmark, filenames->fileNames[i])) { + fCtx->hasStdinInput = 1; + return; + } + } +} /*-************************************* * Functions ***************************************/ -/** FIO_remove() : +/** FIO_removeFile() : * @result : Unlink `fileName`, even if it's read-only */ -static int FIO_remove(const char* path) +static int FIO_removeFile(const char* path) { - if (!UTIL_isRegularFile(path)) { - DISPLAYLEVEL(2, "zstd: Refusing to remove non-regular file %s \n", path); + stat_t statbuf; + if (!UTIL_stat(path, &statbuf)) { + DISPLAYLEVEL(2, "zstd: Failed to stat %s while trying to remove it\n", path); + return 0; + } + if (!UTIL_isRegularFileStat(&statbuf)) { + DISPLAYLEVEL(2, "zstd: Refusing to remove non-regular file %s\n", path); return 0; } -#if defined(_WIN32) || defined(WIN32) +#if defined(_WIN32) /* windows doesn't allow remove read-only files, * so try to make it writable first */ - chmod(path, _S_IWRITE); + if (!(statbuf.st_mode & _S_IWRITE)) { + UTIL_chmod(path, &statbuf, _S_IWRITE); + } #endif return remove(path); } /** FIO_openSrcFile() : * condition : `srcFileName` must be non-NULL. + * optional: `prefs` may be NULL. * @result : FILE* to `srcFileName`, or NULL if it fails */ -static FILE* FIO_openSrcFile(const char* srcFileName) +static FILE* FIO_openSrcFile(const FIO_prefs_t* const prefs, const char* srcFileName, stat_t* statbuf) { + int allowBlockDevices = prefs != NULL ? prefs->allowBlockDevices : 0; assert(srcFileName != NULL); - if (!strcmp (srcFileName, stdinmark)) { + assert(statbuf != NULL); + + if (!strcmp(srcFileName, stdinmark)) { DISPLAYLEVEL(4,"Using stdin for input \n"); SET_BINARY_MODE(stdin); return stdin; } - if (!UTIL_fileExist(srcFileName)) { + if (!UTIL_stat(srcFileName, statbuf)) { DISPLAYLEVEL(1, "zstd: can't stat %s : %s -- ignored \n", srcFileName, strerror(errno)); return NULL; } - if (!UTIL_isRegularFile(srcFileName)) { + /* Accept regular files, FIFOs, and process substitution file descriptors */ + if (!UTIL_isRegularFileStat(statbuf) + && !UTIL_isFIFOStat(statbuf) + && !UTIL_isFileDescriptorPipe(srcFileName) /* Process substitution support */ + && !(allowBlockDevices && UTIL_isBlockDevStat(statbuf)) + ) { DISPLAYLEVEL(1, "zstd: %s is not a regular file -- ignored \n", srcFileName); return NULL; @@ -510,8 +845,13 @@ static FILE* FIO_openSrcFile(const char* srcFileName) /** FIO_openDstFile() : * condition : `dstFileName` must be non-NULL. * @result : FILE* to `dstFileName`, or NULL if it fails */ -static FILE* FIO_openDstFile(FIO_prefs_t* const prefs, const char* srcFileName, const char* dstFileName) +static FILE* +FIO_openDstFile(FIO_ctx_t* fCtx, FIO_prefs_t* const prefs, + const char* srcFileName, const char* dstFileName, + const int mode) { + if (prefs->testMode) return NULL; /* do not open file in test mode */ + assert(dstFileName != NULL); if (!strcmp (dstFileName, stdoutmark)) { DISPLAYLEVEL(4,"Using stdout for output \n"); @@ -529,106 +869,525 @@ static FILE* FIO_openDstFile(FIO_prefs_t* const prefs, const char* srcFileName, return NULL; } - if (prefs->sparseFileSupport == 1) { - prefs->sparseFileSupport = ZSTD_SPARSE_DEFAULT; - } - if (UTIL_isRegularFile(dstFileName)) { /* Check if destination file already exists */ - FILE* const fCheck = fopen( dstFileName, "rb" ); +#if !defined(_WIN32) + /* this test does not work on Windows : + * `NUL` and `nul` are detected as regular files */ if (!strcmp(dstFileName, nulmark)) { EXM_THROW(40, "%s is unexpectedly categorized as a regular file", dstFileName); } - if (fCheck != NULL) { /* dst file exists, authorization prompt */ - fclose(fCheck); - if (!prefs->overwrite) { - if (g_display_prefs.displayLevel <= 1) { - /* No interaction possible */ - DISPLAY("zstd: %s already exists; not overwritten \n", - dstFileName); - return NULL; - } - DISPLAY("zstd: %s already exists; overwrite (y/N) ? ", +#endif + if (!prefs->overwrite) { + if (g_display_prefs.displayLevel <= 1) { + /* No interaction possible */ + DISPLAYLEVEL(1, "zstd: %s already exists; not overwritten \n", dstFileName); - { int ch = getchar(); - if ((ch!='Y') && (ch!='y')) { - DISPLAY(" not overwritten \n"); - return NULL; - } - /* flush rest of input line */ - while ((ch!=EOF) && (ch!='\n')) ch = getchar(); - } } - /* need to unlink */ - FIO_remove(dstFileName); - } } + return NULL; + } + DISPLAY("zstd: %s already exists; ", dstFileName); + if (UTIL_requireUserConfirmation("overwrite (y/n) ? ", "Not overwritten \n", "yY", fCtx->hasStdinInput)) + return NULL; + } + /* need to unlink */ + FIO_removeFile(dstFileName); + } + + { + int isDstRegFile; +#if defined(_WIN32) + /* Windows requires opening the file as a "binary" file to avoid + * mangling. This macro doesn't exist on unix. */ + const int openflags = O_WRONLY|O_CREAT|O_TRUNC|O_BINARY; + const int fd = _open(dstFileName, openflags, mode); + FILE* f = NULL; + if (fd != -1) { + f = _fdopen(fd, "wb"); + } +#else + const int openflags = O_WRONLY|O_CREAT|O_TRUNC; + const int fd = open(dstFileName, openflags, mode); + FILE* f = NULL; + if (fd != -1) { + f = fdopen(fd, "wb"); + } +#endif + + /* Check regular file after opening with O_CREAT */ + isDstRegFile = UTIL_isFdRegularFile(fd); + if (prefs->sparseFileSupport == 1) { + prefs->sparseFileSupport = ZSTD_SPARSE_DEFAULT; + if (!isDstRegFile) { + prefs->sparseFileSupport = 0; + DISPLAYLEVEL(4, "Sparse File Support is disabled when output is not a file \n"); + } + } - { FILE* const f = fopen( dstFileName, "wb" ); if (f == NULL) { - DISPLAYLEVEL(1, "zstd: %s: %s\n", dstFileName, strerror(errno)); + if (UTIL_isFileDescriptorPipe(dstFileName)) { + DISPLAYLEVEL(1, "zstd: error: no output specified (use -o or -c). \n"); + } else { + DISPLAYLEVEL(1, "zstd: %s: %s\n", dstFileName, strerror(errno)); + } } else { - chmod(dstFileName, 00600); + /* An increased buffer size can provide a significant performance + * boost on some platforms. Note that providing a NULL buf with a + * size that's not 0 is not defined in ANSI C, but is defined in an + * extension. There are three possibilities here: + * 1. Libc supports the extended version and everything is good. + * 2. Libc ignores the size when buf is NULL, in which case + * everything will continue as if we didn't call `setvbuf()`. + * 3. We fail the call and execution continues but a warning + * message might be shown. + * In all cases due execution continues. For now, I believe that + * this is a more cost-effective solution than managing the buffers + * allocations ourselves (will require an API change). + */ + if (setvbuf(f, NULL, _IOFBF, 1 MB)) { + DISPLAYLEVEL(2, "Warning: setvbuf failed for %s\n", dstFileName); + } } return f; } } -/*! FIO_createDictBuffer() : - * creates a buffer, pointed by `*bufferPtr`, +/* FIO_getDictFileStat() : + */ +static void FIO_getDictFileStat(const char* fileName, stat_t* dictFileStat) { + assert(dictFileStat != NULL); + if (fileName == NULL) return; + + if (!UTIL_stat(fileName, dictFileStat)) { + EXM_THROW(31, "Stat failed on dictionary file %s: %s", fileName, strerror(errno)); + } + + if (!UTIL_isRegularFileStat(dictFileStat)) { + EXM_THROW(32, "Dictionary %s must be a regular file.", fileName); + } +} + +/* FIO_setDictBufferMalloc() : + * allocates a buffer, pointed by `dict->dictBuffer`, * loads `filename` content into it, up to DICTSIZE_MAX bytes. * @return : loaded size * if fileName==NULL, returns 0 and a NULL pointer */ -static size_t FIO_createDictBuffer(void** bufferPtr, const char* fileName) +static size_t FIO_setDictBufferMalloc(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) { FILE* fileHandle; U64 fileSize; + void** bufferPtr = &dict->dictBuffer; assert(bufferPtr != NULL); + assert(dictFileStat != NULL); *bufferPtr = NULL; if (fileName == NULL) return 0; DISPLAYLEVEL(4,"Loading %s as dictionary \n", fileName); + fileHandle = fopen(fileName, "rb"); - if (fileHandle==NULL) EXM_THROW(31, "%s: %s", fileName, strerror(errno)); - fileSize = UTIL_getFileSize(fileName); - if (fileSize > DICTSIZE_MAX) { - EXM_THROW(32, "Dictionary file %s is too large (> %u MB)", - fileName, DICTSIZE_MAX >> 20); /* avoid extreme cases */ + if (fileHandle == NULL) { + EXM_THROW(33, "Couldn't open dictionary %s: %s", fileName, strerror(errno)); + } + + fileSize = UTIL_getFileSizeStat(dictFileStat); + { + size_t const dictSizeMax = prefs->patchFromMode ? prefs->memLimit : DICTSIZE_MAX; + if (fileSize > dictSizeMax) { + EXM_THROW(34, "Dictionary file %s is too large (> %u bytes)", + fileName, (unsigned)dictSizeMax); /* avoid extreme cases */ + } } *bufferPtr = malloc((size_t)fileSize); if (*bufferPtr==NULL) EXM_THROW(34, "%s", strerror(errno)); { size_t const readSize = fread(*bufferPtr, 1, (size_t)fileSize, fileHandle); - if (readSize != fileSize) + if (readSize != fileSize) { EXM_THROW(35, "Error reading dictionary file %s : %s", fileName, strerror(errno)); + } } fclose(fileHandle); return (size_t)fileSize; } +#if (PLATFORM_POSIX_VERSION > 0) +#include +static void FIO_munmap(FIO_Dict_t* dict) +{ + munmap(dict->dictBuffer, dict->dictBufferSize); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; +} +static size_t FIO_setDictBufferMMap(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) +{ + int fileHandle; + U64 fileSize; + void** bufferPtr = &dict->dictBuffer; + + assert(bufferPtr != NULL); + assert(dictFileStat != NULL); + *bufferPtr = NULL; + if (fileName == NULL) return 0; + + DISPLAYLEVEL(4,"Loading %s as dictionary \n", fileName); + + fileHandle = open(fileName, O_RDONLY); + + if (fileHandle == -1) { + EXM_THROW(33, "Couldn't open dictionary %s: %s", fileName, strerror(errno)); + } + + fileSize = UTIL_getFileSizeStat(dictFileStat); + { + size_t const dictSizeMax = prefs->patchFromMode ? prefs->memLimit : DICTSIZE_MAX; + if (fileSize > dictSizeMax) { + EXM_THROW(34, "Dictionary file %s is too large (> %u bytes)", + fileName, (unsigned)dictSizeMax); /* avoid extreme cases */ + } + } + + *bufferPtr = mmap(NULL, (size_t)fileSize, PROT_READ, MAP_PRIVATE, fileHandle, 0); + if (*bufferPtr==NULL) EXM_THROW(34, "%s", strerror(errno)); + + close(fileHandle); + return (size_t)fileSize; +} +#elif defined(_MSC_VER) || defined(_WIN32) +#include +static void FIO_munmap(FIO_Dict_t* dict) +{ + UnmapViewOfFile(dict->dictBuffer); + CloseHandle(dict->dictHandle); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; +} +static size_t FIO_setDictBufferMMap(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) +{ + HANDLE fileHandle, mapping; + U64 fileSize; + void** bufferPtr = &dict->dictBuffer; + + assert(bufferPtr != NULL); + assert(dictFileStat != NULL); + *bufferPtr = NULL; + if (fileName == NULL) return 0; + + DISPLAYLEVEL(4,"Loading %s as dictionary \n", fileName); + + fileHandle = CreateFileA(fileName, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_READONLY, NULL); + + if (fileHandle == INVALID_HANDLE_VALUE) { + EXM_THROW(33, "Couldn't open dictionary %s: %s", fileName, strerror(errno)); + } + + fileSize = UTIL_getFileSizeStat(dictFileStat); + { + size_t const dictSizeMax = prefs->patchFromMode ? prefs->memLimit : DICTSIZE_MAX; + if (fileSize > dictSizeMax) { + EXM_THROW(34, "Dictionary file %s is too large (> %u bytes)", + fileName, (unsigned)dictSizeMax); /* avoid extreme cases */ + } + } + + mapping = CreateFileMapping(fileHandle, NULL, PAGE_READONLY, 0, 0, NULL); + if (mapping == NULL) { + EXM_THROW(35, "Couldn't map dictionary %s: %s", fileName, strerror(errno)); + } + + *bufferPtr = MapViewOfFile(mapping, FILE_MAP_READ, 0, 0, (DWORD)fileSize); /* we can only cast to DWORD here because dictSize <= 2GB */ + if (*bufferPtr==NULL) EXM_THROW(36, "%s", strerror(errno)); + + dict->dictHandle = fileHandle; + return (size_t)fileSize; +} +#else +static size_t FIO_setDictBufferMMap(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) +{ + return FIO_setDictBufferMalloc(dict, fileName, prefs, dictFileStat); +} +static void FIO_munmap(FIO_Dict_t* dict) { + free(dict->dictBuffer); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; +} +#endif + +static void FIO_freeDict(FIO_Dict_t* dict) { + if (dict->dictBufferType == FIO_mallocDict) { + free(dict->dictBuffer); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; + } else if (dict->dictBufferType == FIO_mmapDict) { + FIO_munmap(dict); + } else { + assert(0); /* Should not reach this case */ + } +} + +static void FIO_initDict(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat, FIO_dictBufferType_t dictBufferType) { + dict->dictBufferType = dictBufferType; + if (dict->dictBufferType == FIO_mallocDict) { + dict->dictBufferSize = FIO_setDictBufferMalloc(dict, fileName, prefs, dictFileStat); + } else if (dict->dictBufferType == FIO_mmapDict) { + dict->dictBufferSize = FIO_setDictBufferMMap(dict, fileName, prefs, dictFileStat); + } else { + assert(0); /* Should not reach this case */ + } +} + + +/* FIO_checkFilenameCollisions() : + * Checks for and warns if there are any files that would have the same output path + */ +int FIO_checkFilenameCollisions(const char** filenameTable, unsigned nbFiles) { + const char **filenameTableSorted, *prevElem, *filename; + unsigned u; + + filenameTableSorted = (const char**) malloc(sizeof(char*) * nbFiles); + if (!filenameTableSorted) { + DISPLAYLEVEL(1, "Allocation error during filename collision checking \n"); + return 1; + } + + for (u = 0; u < nbFiles; ++u) { + filename = strrchr(filenameTable[u], PATH_SEP); + if (filename == NULL) { + filenameTableSorted[u] = filenameTable[u]; + } else { + filenameTableSorted[u] = filename+1; + } + } + + qsort((void*)filenameTableSorted, nbFiles, sizeof(char*), UTIL_compareStr); + prevElem = filenameTableSorted[0]; + for (u = 1; u < nbFiles; ++u) { + if (strcmp(prevElem, filenameTableSorted[u]) == 0) { + DISPLAYLEVEL(2, "WARNING: Two files have same filename: %s\n", prevElem); + } + prevElem = filenameTableSorted[u]; + } + + free((void*)filenameTableSorted); + return 0; +} + +static const char* +extractFilename(const char* path, char separator) +{ + const char* search = strrchr(path, separator); + if (search == NULL) return path; + return search+1; +} + +/* FIO_createFilename_fromOutDir() : + * Takes a source file name and specified output directory, and + * allocates memory for and returns a pointer to final path. + * This function never returns an error (it may abort() in case of pb) + */ +static char* +FIO_createFilename_fromOutDir(const char* path, const char* outDirName, const size_t suffixLen) +{ + const char* filenameStart; + char separator; + char* result; + +#if defined(_MSC_VER) || defined(__MINGW32__) || defined (__MSVCRT__) /* windows support */ + separator = '\\'; +#else + separator = '/'; +#endif + + filenameStart = extractFilename(path, separator); +#if defined(_MSC_VER) || defined(__MINGW32__) || defined (__MSVCRT__) /* windows support */ + filenameStart = extractFilename(filenameStart, '/'); /* sometimes, '/' separator is also used on Windows (mingw+msys2) */ +#endif + + result = (char*) calloc(1, strlen(outDirName) + 1 + strlen(filenameStart) + suffixLen + 1); + if (!result) { + EXM_THROW(30, "zstd: FIO_createFilename_fromOutDir: %s", strerror(errno)); + } + + memcpy(result, outDirName, strlen(outDirName)); + if (outDirName[strlen(outDirName)-1] == separator) { + memcpy(result + strlen(outDirName), filenameStart, strlen(filenameStart)); + } else { + memcpy(result + strlen(outDirName), &separator, 1); + memcpy(result + strlen(outDirName) + 1, filenameStart, strlen(filenameStart)); + } + + return result; +} + +/* FIO_highbit64() : + * gives position of highest bit. + * note : only works for v > 0 ! + */ +static unsigned FIO_highbit64(unsigned long long v) +{ + unsigned count = 0; + assert(v != 0); + v >>= 1; + while (v) { v >>= 1; count++; } + return count; +} + +static void FIO_adjustMemLimitForPatchFromMode(FIO_prefs_t* const prefs, + unsigned long long const dictSize, + unsigned long long const maxSrcFileSize) +{ + unsigned long long maxSize = MAX(prefs->memLimit, MAX(dictSize, maxSrcFileSize)); + unsigned const maxWindowSize = (1U << ZSTD_WINDOWLOG_MAX); + if (maxSize == UTIL_FILESIZE_UNKNOWN) + EXM_THROW(42, "Using --patch-from with stdin requires --stream-size"); + assert(maxSize != UTIL_FILESIZE_UNKNOWN); + if (maxSize > maxWindowSize) + EXM_THROW(42, "Can't handle files larger than %u GB\n", maxWindowSize/(1 GB)); + FIO_setMemLimit(prefs, (unsigned)maxSize); +} + +/* FIO_multiFilesConcatWarning() : + * This function handles logic when processing multiple files with -o or -c, displaying the appropriate warnings/prompts. + * Returns 1 if the console should abort, 0 if console should proceed. + * + * If output is stdout or test mode is active, check that `--rm` disabled. + * + * If there is just 1 file to process, zstd will proceed as usual. + * If each file get processed into its own separate destination file, proceed as usual. + * + * When multiple files are processed into a single output, + * display a warning message, then disable --rm if it's set. + * + * If -f is specified or if output is stdout, just proceed. + * If output is set with -o, prompt for confirmation. + */ +static int FIO_multiFilesConcatWarning(const FIO_ctx_t* fCtx, FIO_prefs_t* prefs, const char* outFileName, int displayLevelCutoff) +{ + if (fCtx->hasStdoutOutput) { + if (prefs->removeSrcFile) + /* this should not happen ; hard fail, to protect user's data + * note: this should rather be an assert(), but we want to be certain that user's data will not be wiped out in case it nonetheless happen */ + EXM_THROW(43, "It's not allowed to remove input files when processed output is piped to stdout. " + "This scenario is not supposed to be possible. " + "This is a programming error. File an issue for it to be fixed."); + } + if (prefs->testMode) { + if (prefs->removeSrcFile) + /* this should not happen ; hard fail, to protect user's data + * note: this should rather be an assert(), but we want to be certain that user's data will not be wiped out in case it nonetheless happen */ + EXM_THROW(43, "Test mode shall not remove input files! " + "This scenario is not supposed to be possible. " + "This is a programming error. File an issue for it to be fixed."); + return 0; + } + + if (fCtx->nbFilesTotal == 1) return 0; + assert(fCtx->nbFilesTotal > 1); + + if (!outFileName) return 0; + + if (fCtx->hasStdoutOutput) { + DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into stdout. \n"); + } else { + DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into a single output file: %s \n", outFileName); + } + DISPLAYLEVEL(2, "The concatenated output CANNOT regenerate original file names nor directory structure. \n") + + /* multi-input into single output : --rm is not allowed */ + if (prefs->removeSrcFile) { + DISPLAYLEVEL(2, "Since it's a destructive operation, input files will not be removed. \n"); + prefs->removeSrcFile = 0; + } + + if (fCtx->hasStdoutOutput) return 0; + if (prefs->overwrite) return 0; + + /* multiple files concatenated into single destination file using -o without -f */ + if (g_display_prefs.displayLevel <= displayLevelCutoff) { + /* quiet mode => no prompt => fail automatically */ + DISPLAYLEVEL(1, "Concatenating multiple processed inputs into a single output loses file metadata. \n"); + DISPLAYLEVEL(1, "Aborting. \n"); + return 1; + } + /* normal mode => prompt */ + return UTIL_requireUserConfirmation("Proceed? (y/n): ", "Aborting...", "yY", fCtx->hasStdinInput); +} + +static ZSTD_inBuffer setInBuffer(const void* buf, size_t s, size_t pos) +{ + ZSTD_inBuffer i; + i.src = buf; + i.size = s; + i.pos = pos; + return i; +} + +static ZSTD_outBuffer setOutBuffer(void* buf, size_t s, size_t pos) +{ + ZSTD_outBuffer o; + o.dst = buf; + o.size = s; + o.pos = pos; + return o; +} + #ifndef ZSTD_NOCOMPRESS /* ********************************************************************** * Compression ************************************************************************/ typedef struct { - FILE* srcFile; - FILE* dstFile; - void* srcBuffer; - size_t srcBufferSize; - void* dstBuffer; - size_t dstBufferSize; + FIO_Dict_t dict; const char* dictFileName; + stat_t dictFileStat; ZSTD_CStream* cctx; + FIO_SyncCompressIO io; } cRess_t; +/** ZSTD_cycleLog() : + * condition for correct operation : hashLog > 1 */ +static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) +{ + U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); + assert(hashLog > 1); + return hashLog - btScale; +} + +static void FIO_adjustParamsForPatchFromMode(FIO_prefs_t* const prefs, + ZSTD_compressionParameters* comprParams, + unsigned long long const dictSize, + unsigned long long const maxSrcFileSize, + int cLevel) +{ + unsigned const fileWindowLog = FIO_highbit64(maxSrcFileSize) + 1; + ZSTD_compressionParameters const cParams = ZSTD_getCParams(cLevel, (size_t)maxSrcFileSize, (size_t)dictSize); + FIO_adjustMemLimitForPatchFromMode(prefs, dictSize, maxSrcFileSize); + if (fileWindowLog > ZSTD_WINDOWLOG_MAX) + DISPLAYLEVEL(1, "Max window log exceeded by file (compression ratio will suffer)\n"); + comprParams->windowLog = MAX(ZSTD_WINDOWLOG_MIN, MIN(ZSTD_WINDOWLOG_MAX, fileWindowLog)); + if (fileWindowLog > ZSTD_cycleLog(cParams.chainLog, cParams.strategy)) { + if (!prefs->ldmFlag) + DISPLAYLEVEL(2, "long mode automatically triggered\n"); + FIO_setLdmFlag(prefs, 1); + } + if (cParams.strategy >= ZSTD_btopt) { + DISPLAYLEVEL(4, "[Optimal parser notes] Consider the following to improve patch size at the cost of speed:\n"); + DISPLAYLEVEL(4, "- Set a larger targetLength (e.g. --zstd=targetLength=4096)\n"); + DISPLAYLEVEL(4, "- Set a larger chainLog (e.g. --zstd=chainLog=%u)\n", ZSTD_CHAINLOG_MAX); + DISPLAYLEVEL(4, "- Set a larger LDM hashLog (e.g. --zstd=ldmHashLog=%u)\n", ZSTD_LDM_HASHLOG_MAX); + DISPLAYLEVEL(4, "- Set a smaller LDM rateLog (e.g. --zstd=ldmHashRateLog=%u)\n", ZSTD_LDM_HASHRATELOG_MIN); + DISPLAYLEVEL(4, "Also consider playing around with searchLog and hashLog\n"); + } +} + static cRess_t FIO_createCResources(FIO_prefs_t* const prefs, - const char* dictFileName, int cLevel, - U64 srcSize, - ZSTD_compressionParameters comprParams) { + const char* dictFileName, unsigned long long const maxSrcFileSize, + int cLevel, ZSTD_compressionParameters comprParams) { + int useMMap = prefs->mmapDict == ZSTD_ps_enable; + int forceNoUseMMap = prefs->mmapDict == ZSTD_ps_disable; + FIO_dictBufferType_t dictBufferType; cRess_t ress; memset(&ress, 0, sizeof(ress)); @@ -637,74 +1396,87 @@ static cRess_t FIO_createCResources(FIO_prefs_t* const prefs, if (ress.cctx == NULL) EXM_THROW(30, "allocation error (%s): can't create ZSTD_CCtx", strerror(errno)); - ress.srcBufferSize = ZSTD_CStreamInSize(); - ress.srcBuffer = malloc(ress.srcBufferSize); - ress.dstBufferSize = ZSTD_CStreamOutSize(); - ress.dstBuffer = malloc(ress.dstBufferSize); - if (!ress.srcBuffer || !ress.dstBuffer) - EXM_THROW(31, "allocation error : not enough memory"); + + FIO_getDictFileStat(dictFileName, &ress.dictFileStat); + + /* need to update memLimit before calling createDictBuffer + * because of memLimit check inside it */ + if (prefs->patchFromMode) { + U64 const dictSize = UTIL_getFileSizeStat(&ress.dictFileStat); + unsigned long long const ssSize = (unsigned long long)prefs->streamSrcSize; + useMMap |= dictSize > prefs->memLimit; + FIO_adjustParamsForPatchFromMode(prefs, &comprParams, dictSize, ssSize > 0 ? ssSize : maxSrcFileSize, cLevel); + } + + dictBufferType = (useMMap && !forceNoUseMMap) ? FIO_mmapDict : FIO_mallocDict; + FIO_initDict(&ress.dict, dictFileName, prefs, &ress.dictFileStat, dictBufferType); /* works with dictFileName==NULL */ + + FIO_SyncCompressIO_init(&ress.io, prefs, ZSTD_CStreamInSize(), ZSTD_CStreamOutSize()); /* Advanced parameters, including dictionary */ - { void* dictBuffer; - size_t const dictBuffSize = FIO_createDictBuffer(&dictBuffer, dictFileName); /* works with dictFileName==NULL */ - if (dictFileName && (dictBuffer==NULL)) - EXM_THROW(32, "allocation error : can't create dictBuffer"); - ress.dictFileName = dictFileName; - - if (prefs->adaptiveMode && !prefs->ldmFlag && !comprParams.windowLog) - comprParams.windowLog = ADAPT_WINDOWLOG_DEFAULT; - - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_contentSizeFlag, 1) ); /* always enable content size when available (note: supposed to be default) */ - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_dictIDFlag, prefs->dictIDFlag) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_checksumFlag, prefs->checksumFlag) ); - /* compression level */ - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, cLevel) ); - /* long distance matching */ - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_enableLongDistanceMatching, prefs->ldmFlag) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_ldmHashLog, prefs->ldmHashLog) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_ldmMinMatch, prefs->ldmMinMatch) ); - if (prefs->ldmBucketSizeLog != FIO_LDM_PARAM_NOTSET) { - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_ldmBucketSizeLog, prefs->ldmBucketSizeLog) ); - } - if (prefs->ldmHashRateLog != FIO_LDM_PARAM_NOTSET) { - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_ldmHashRateLog, prefs->ldmHashRateLog) ); - } - /* compression parameters */ - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_windowLog, (int)comprParams.windowLog) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_chainLog, (int)comprParams.chainLog) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_hashLog, (int)comprParams.hashLog) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_searchLog, (int)comprParams.searchLog) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_minMatch, (int)comprParams.minMatch) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_targetLength, (int)comprParams.targetLength) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_strategy, comprParams.strategy) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_literalCompressionMode, (int)prefs->literalCompressionMode) ); - /* multi-threading */ + if (dictFileName && (ress.dict.dictBuffer==NULL)) + EXM_THROW(32, "allocation error : can't create dictBuffer"); + ress.dictFileName = dictFileName; + + if (prefs->adaptiveMode && !prefs->ldmFlag && !comprParams.windowLog) + comprParams.windowLog = ADAPT_WINDOWLOG_DEFAULT; + + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_contentSizeFlag, prefs->contentSize) ); /* always enable content size when available (note: supposed to be default) */ + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_dictIDFlag, prefs->dictIDFlag) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_checksumFlag, prefs->checksumFlag) ); + /* compression level */ + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, cLevel) ); + /* max compressed block size */ + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_targetCBlockSize, (int)prefs->targetCBlockSize) ); + /* source size hint */ + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_srcSizeHint, (int)prefs->srcSizeHint) ); + /* long distance matching */ + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_enableLongDistanceMatching, prefs->ldmFlag) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_ldmHashLog, prefs->ldmHashLog) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_ldmMinMatch, prefs->ldmMinMatch) ); + if (prefs->ldmBucketSizeLog != FIO_LDM_PARAM_NOTSET) { + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_ldmBucketSizeLog, prefs->ldmBucketSizeLog) ); + } + if (prefs->ldmHashRateLog != FIO_LDM_PARAM_NOTSET) { + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_ldmHashRateLog, prefs->ldmHashRateLog) ); + } + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_useRowMatchFinder, prefs->useRowMatchFinder)); + /* compression parameters */ + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_windowLog, (int)comprParams.windowLog) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_chainLog, (int)comprParams.chainLog) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_hashLog, (int)comprParams.hashLog) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_searchLog, (int)comprParams.searchLog) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_minMatch, (int)comprParams.minMatch) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_targetLength, (int)comprParams.targetLength) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_strategy, (int)comprParams.strategy) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_literalCompressionMode, (int)prefs->literalCompressionMode) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_enableDedicatedDictSearch, 1) ); + /* multi-threading */ #ifdef ZSTD_MULTITHREAD - DISPLAYLEVEL(5,"set nb workers = %u \n", prefs->nbWorkers); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_nbWorkers, prefs->nbWorkers) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_jobSize, prefs->blockSize) ); - if (prefs->overlapLog != FIO_OVERLAP_LOG_NOTSET) { - DISPLAYLEVEL(3,"set overlapLog = %u \n", prefs->overlapLog); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_overlapLog, prefs->overlapLog) ); - } - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_rsyncable, prefs->rsyncable) ); + DISPLAYLEVEL(5,"set nb workers = %u \n", prefs->nbWorkers); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_nbWorkers, prefs->nbWorkers) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_jobSize, prefs->jobSize) ); + if (prefs->overlapLog != FIO_OVERLAP_LOG_NOTSET) { + DISPLAYLEVEL(3,"set overlapLog = %u \n", prefs->overlapLog); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_overlapLog, prefs->overlapLog) ); + } + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_rsyncable, prefs->rsyncable) ); #endif - /* dictionary */ - CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, srcSize) ); /* set the value temporarily for dictionary loading, to adapt compression parameters */ - CHECK( ZSTD_CCtx_loadDictionary(ress.cctx, dictBuffer, dictBuffSize) ); - CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, ZSTD_CONTENTSIZE_UNKNOWN) ); /* reset */ - - free(dictBuffer); + /* dictionary */ + if (prefs->patchFromMode) { + CHECK( ZSTD_CCtx_refPrefix(ress.cctx, ress.dict.dictBuffer, ress.dict.dictBufferSize) ); + } else { + CHECK( ZSTD_CCtx_loadDictionary_byReference(ress.cctx, ress.dict.dictBuffer, ress.dict.dictBufferSize) ); } return ress; } -static void FIO_freeCResources(cRess_t ress) +static void FIO_freeCResources(cRess_t* const ress) { - free(ress.srcBuffer); - free(ress.dstBuffer); - ZSTD_freeCStream(ress.cctx); /* never fails */ + FIO_freeDict(&(ress->dict)); + FIO_SyncCompressIO_destroy(&ress->io); + ZSTD_freeCStream(ress->cctx); /* never fails */ } @@ -714,9 +1486,9 @@ FIO_compressGzFrame(cRess_t* ress, const char* srcFileName, U64 const srcFileSize, int compressionLevel, U64* readsize) { + FIO_SyncCompressIO* const syncIO = &ress->io; unsigned long long inFileSize = 0, outFileSize = 0; z_stream strm; - int ret; if (compressionLevel > Z_BEST_COMPRESSION) compressionLevel = Z_BEST_COMPRESSION; @@ -725,67 +1497,76 @@ FIO_compressGzFrame(cRess_t* ress, strm.zfree = Z_NULL; strm.opaque = Z_NULL; - ret = deflateInit2(&strm, compressionLevel, Z_DEFLATED, + { int const ret = deflateInit2(&strm, compressionLevel, Z_DEFLATED, 15 /* maxWindowLogSize */ + 16 /* gzip only */, - 8, Z_DEFAULT_STRATEGY); /* see http://www.zlib.net/manual.html */ - if (ret != Z_OK) - EXM_THROW(71, "zstd: %s: deflateInit2 error %d \n", srcFileName, ret); + 8, Z_DEFAULT_STRATEGY); /* see https://www.zlib.net/manual.html */ + if (ret != Z_OK) { + EXM_THROW(71, "zstd: %s: deflateInit2 error %d \n", srcFileName, ret); + } } strm.next_in = 0; strm.avail_in = 0; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; + strm.next_out = (Bytef*)syncIO->outBuffer; + strm.avail_out = (uInt)syncIO->outCapacity; while (1) { + int ret; if (strm.avail_in == 0) { - size_t const inSize = fread(ress->srcBuffer, 1, ress->srcBufferSize, ress->srcFile); - if (inSize == 0) break; - inFileSize += inSize; - strm.next_in = (z_const unsigned char*)ress->srcBuffer; - strm.avail_in = (uInt)inSize; + size_t const added = FIO_SyncCompressIO_fillBuffer(syncIO, ZSTD_CStreamInSize()); + if (syncIO->srcBufferLoaded == 0) break; + inFileSize += added; + *readsize += added; + strm.next_in = (z_const unsigned char*)syncIO->srcBuffer; + strm.avail_in = (uInt)syncIO->srcBufferLoaded; + } + + { + size_t const availBefore = strm.avail_in; + ret = deflate(&strm, Z_NO_FLUSH); + FIO_SyncCompressIO_consumeBytes(syncIO, availBefore - strm.avail_in); } - ret = deflate(&strm, Z_NO_FLUSH); + if (ret != Z_OK) EXM_THROW(72, "zstd: %s: deflate error %d \n", srcFileName, ret); - { size_t const decompBytes = ress->dstBufferSize - strm.avail_out; - if (decompBytes) { - if (fwrite(ress->dstBuffer, 1, decompBytes, ress->dstFile) != decompBytes) - EXM_THROW(73, "Write error : cannot write to output file"); - outFileSize += decompBytes; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; - } - } - if (srcFileSize == UTIL_FILESIZE_UNKNOWN) - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", - (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) - else - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", - (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); - } + { size_t const cSize = (size_t)((uInt)syncIO->outCapacity - strm.avail_out); + if (cSize) { + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, cSize); + outFileSize += cSize; + strm.next_out = (Bytef*)syncIO->outBuffer; + strm.avail_out = (uInt)syncIO->outCapacity; + } } + if (srcFileSize == UTIL_FILESIZE_UNKNOWN) { + DISPLAYUPDATE_PROGRESS( + "\rRead : %u MB ==> %.2f%% ", + (unsigned)(inFileSize>>20), + (double)outFileSize/(double)inFileSize*100) + } else { + DISPLAYUPDATE_PROGRESS( + "\rRead : %u / %u MB ==> %.2f%% ", + (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), + (double)outFileSize/(double)inFileSize*100); + } } while (1) { - ret = deflate(&strm, Z_FINISH); - { size_t const decompBytes = ress->dstBufferSize - strm.avail_out; - if (decompBytes) { - if (fwrite(ress->dstBuffer, 1, decompBytes, ress->dstFile) != decompBytes) - EXM_THROW(75, "Write error : %s", strerror(errno)); - outFileSize += decompBytes; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; - } } + int const ret = deflate(&strm, Z_FINISH); + { size_t const cSize = (size_t)((uInt)syncIO->outCapacity - strm.avail_out); + if (cSize) { + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, cSize); + outFileSize += cSize; + strm.next_out = (Bytef*)syncIO->outBuffer; + strm.avail_out = (uInt)syncIO->outCapacity; + } } if (ret == Z_STREAM_END) break; if (ret != Z_BUF_ERROR) EXM_THROW(77, "zstd: %s: deflate error %d \n", srcFileName, ret); } - ret = deflateEnd(&strm); - if (ret != Z_OK) - EXM_THROW(79, "zstd: %s: deflateEnd error %d \n", srcFileName, ret); + { int const ret = deflateEnd(&strm); + if (ret != Z_OK) { + EXM_THROW(79, "zstd: %s: deflateEnd error %d \n", srcFileName, ret); + } } *readsize = inFileSize; - + FIO_SyncCompressIO_finish(syncIO); return outFileSize; } #endif @@ -797,6 +1578,7 @@ FIO_compressLzmaFrame(cRess_t* ress, const char* srcFileName, U64 const srcFileSize, int compressionLevel, U64* readsize, int plain_lzma) { + FIO_SyncCompressIO* const syncIO = &ress->io; unsigned long long inFileSize = 0, outFileSize = 0; lzma_stream strm = LZMA_STREAM_INIT; lzma_action action = LZMA_RUN; @@ -808,56 +1590,62 @@ FIO_compressLzmaFrame(cRess_t* ress, if (plain_lzma) { lzma_options_lzma opt_lzma; if (lzma_lzma_preset(&opt_lzma, compressionLevel)) - EXM_THROW(71, "zstd: %s: lzma_lzma_preset error", srcFileName); + EXM_THROW(81, "zstd: %s: lzma_lzma_preset error", srcFileName); ret = lzma_alone_encoder(&strm, &opt_lzma); /* LZMA */ if (ret != LZMA_OK) - EXM_THROW(71, "zstd: %s: lzma_alone_encoder error %d", srcFileName, ret); + EXM_THROW(82, "zstd: %s: lzma_alone_encoder error %d", srcFileName, ret); } else { ret = lzma_easy_encoder(&strm, compressionLevel, LZMA_CHECK_CRC64); /* XZ */ if (ret != LZMA_OK) - EXM_THROW(71, "zstd: %s: lzma_easy_encoder error %d", srcFileName, ret); + EXM_THROW(83, "zstd: %s: lzma_easy_encoder error %d", srcFileName, ret); } + strm.next_out = (BYTE*)syncIO->outBuffer; + strm.avail_out = syncIO->outCapacity; strm.next_in = 0; strm.avail_in = 0; - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; while (1) { if (strm.avail_in == 0) { - size_t const inSize = fread(ress->srcBuffer, 1, ress->srcBufferSize, ress->srcFile); - if (inSize == 0) action = LZMA_FINISH; - inFileSize += inSize; - strm.next_in = (BYTE const*)ress->srcBuffer; - strm.avail_in = inSize; + size_t const added = FIO_SyncCompressIO_fillBuffer(syncIO, ZSTD_CStreamInSize()); + if (syncIO->srcBufferLoaded == 0) action = LZMA_FINISH; + inFileSize += added; + *readsize += added; + strm.next_in = (BYTE const*)syncIO->srcBuffer; + strm.avail_in = syncIO->srcBufferLoaded; } - ret = lzma_code(&strm, action); + { + size_t const availBefore = strm.avail_in; + ret = lzma_code(&strm, action); + FIO_SyncCompressIO_consumeBytes(syncIO, availBefore - strm.avail_in); + } if (ret != LZMA_OK && ret != LZMA_STREAM_END) - EXM_THROW(72, "zstd: %s: lzma_code encoding error %d", srcFileName, ret); - { size_t const compBytes = ress->dstBufferSize - strm.avail_out; + EXM_THROW(84, "zstd: %s: lzma_code encoding error %d", srcFileName, ret); + { size_t const compBytes = syncIO->outCapacity - strm.avail_out; if (compBytes) { - if (fwrite(ress->dstBuffer, 1, compBytes, ress->dstFile) != compBytes) - EXM_THROW(73, "Write error : %s", strerror(errno)); + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, compBytes); outFileSize += compBytes; - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; + strm.next_out = (BYTE*)syncIO->outBuffer; + strm.avail_out = syncIO->outCapacity; } } if (srcFileSize == UTIL_FILESIZE_UNKNOWN) - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + (double)outFileSize/(double)inFileSize*100) else - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u / %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); + (double)outFileSize/(double)inFileSize*100); if (ret == LZMA_STREAM_END) break; } lzma_end(&strm); *readsize = inFileSize; + FIO_SyncCompressIO_finish(syncIO); + return outFileSize; } #endif @@ -877,6 +1665,7 @@ FIO_compressLz4Frame(cRess_t* ress, int compressionLevel, int checksumFlag, U64* readsize) { + FIO_SyncCompressIO* const syncIO = &ress->io; const size_t blockSize = FIO_LZ4_GetBlockSize_FromBlockId(LZ4F_max64KB); unsigned long long inFileSize = 0, outFileSize = 0; @@ -889,9 +1678,10 @@ FIO_compressLz4Frame(cRess_t* ress, memset(&prefs, 0, sizeof(prefs)); - assert(blockSize <= ress->srcBufferSize); + assert(blockSize <= syncIO->inCapacity); - prefs.autoFlush = 1; + /* autoflush off to mitigate a bug in lz4<=1.9.3 for compression level 12 */ + prefs.autoFlush = 0; prefs.compressionLevel = compressionLevel; prefs.frameInfo.blockMode = LZ4F_blockLinked; prefs.frameInfo.blockSizeID = LZ4F_max64KB; @@ -899,86 +1689,78 @@ FIO_compressLz4Frame(cRess_t* ress, #if LZ4_VERSION_NUMBER >= 10600 prefs.frameInfo.contentSize = (srcFileSize==UTIL_FILESIZE_UNKNOWN) ? 0 : srcFileSize; #endif - assert(LZ4F_compressBound(blockSize, &prefs) <= ress->dstBufferSize); + assert(LZ4F_compressBound(blockSize, &prefs) <= syncIO->outCapacity); { - size_t readSize; - size_t headerSize = LZ4F_compressBegin(ctx, ress->dstBuffer, ress->dstBufferSize, &prefs); + size_t headerSize = LZ4F_compressBegin(ctx, syncIO->outBuffer, syncIO->outCapacity, &prefs); if (LZ4F_isError(headerSize)) EXM_THROW(33, "File header generation failed : %s", LZ4F_getErrorName(headerSize)); - if (fwrite(ress->dstBuffer, 1, headerSize, ress->dstFile) != headerSize) - EXM_THROW(34, "Write error : %s (cannot write header)", strerror(errno)); + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, headerSize); outFileSize += headerSize; - /* Read first block */ - readSize = fread(ress->srcBuffer, (size_t)1, (size_t)blockSize, ress->srcFile); - inFileSize += readSize; + { + size_t const added = FIO_SyncCompressIO_fillBuffer(syncIO, blockSize); + inFileSize += added; + *readsize += added; + } - /* Main Loop */ - while (readSize>0) { - size_t const outSize = LZ4F_compressUpdate(ctx, - ress->dstBuffer, ress->dstBufferSize, - ress->srcBuffer, readSize, NULL); + while (syncIO->srcBufferLoaded) { + size_t const inSize = MIN(blockSize, syncIO->srcBufferLoaded); + size_t const outSize = LZ4F_compressUpdate(ctx, syncIO->outBuffer, syncIO->outCapacity, + syncIO->srcBuffer, inSize, NULL); if (LZ4F_isError(outSize)) EXM_THROW(35, "zstd: %s: lz4 compression failed : %s", srcFileName, LZ4F_getErrorName(outSize)); outFileSize += outSize; if (srcFileSize == UTIL_FILESIZE_UNKNOWN) { - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + (double)outFileSize/(double)inFileSize*100) } else { - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u / %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); + (double)outFileSize/(double)inFileSize*100); } - /* Write Block */ - { size_t const sizeCheck = fwrite(ress->dstBuffer, 1, outSize, ress->dstFile); - if (sizeCheck != outSize) - EXM_THROW(36, "Write error : %s", strerror(errno)); - } + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, outSize); - /* Read next block */ - readSize = fread(ress->srcBuffer, (size_t)1, (size_t)blockSize, ress->srcFile); - inFileSize += readSize; + FIO_SyncCompressIO_consumeBytes(syncIO, inSize); + { + size_t const added = FIO_SyncCompressIO_fillBuffer(syncIO, blockSize); + inFileSize += added; + *readsize += added; + } } - if (ferror(ress->srcFile)) EXM_THROW(37, "Error reading %s ", srcFileName); - /* End of Stream mark */ - headerSize = LZ4F_compressEnd(ctx, ress->dstBuffer, ress->dstBufferSize, NULL); + headerSize = LZ4F_compressEnd(ctx, syncIO->outBuffer, syncIO->outCapacity, NULL); if (LZ4F_isError(headerSize)) EXM_THROW(38, "zstd: %s: lz4 end of file generation failed : %s", srcFileName, LZ4F_getErrorName(headerSize)); - { size_t const sizeCheck = fwrite(ress->dstBuffer, 1, headerSize, ress->dstFile); - if (sizeCheck != headerSize) - EXM_THROW(39, "Write error : %s (cannot write end of stream)", - strerror(errno)); - } + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, headerSize); outFileSize += headerSize; } - *readsize = inFileSize; LZ4F_freeCompressionContext(ctx); + FIO_SyncCompressIO_finish(syncIO); return outFileSize; } #endif - static unsigned long long -FIO_compressZstdFrame(FIO_prefs_t* const prefs, - const cRess_t* ressPtr, +FIO_compressZstdFrame(FIO_ctx_t* const fCtx, + FIO_prefs_t* const prefs, + cRess_t* ress, const char* srcFileName, U64 fileSize, int compressionLevel, U64* readsize) { - cRess_t const ress = *ressPtr; - FILE* const srcFile = ress.srcFile; - FILE* const dstFile = ress.dstFile; + FIO_SyncCompressIO* const syncIO = &ress->io; + U64 compressedfilesize = 0; ZSTD_EndDirective directive = ZSTD_e_continue; + U64 pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* stats */ ZSTD_frameProgression previous_zfp_update = { 0, 0, 0, 0, 0, 0 }; @@ -989,25 +1771,49 @@ FIO_compressZstdFrame(FIO_prefs_t* const prefs, unsigned inputPresented = 0; unsigned inputBlocked = 0; unsigned lastJobID = 0; + UTIL_time_t lastAdaptTime = UTIL_getTime(); + U64 const adaptEveryMicro = REFRESH_RATE; + + UTIL_HumanReadableSize_t const file_hrs = UTIL_makeHumanReadableSize(fileSize); DISPLAYLEVEL(6, "compression using zstd format \n"); /* init */ if (fileSize != UTIL_FILESIZE_UNKNOWN) { - CHECK(ZSTD_CCtx_setPledgedSrcSize(ress.cctx, fileSize)); + pledgedSrcSize = fileSize; + CHECK(ZSTD_CCtx_setPledgedSrcSize(ress->cctx, fileSize)); + } else if (prefs->streamSrcSize > 0) { + /* unknown source size; use the declared stream size */ + pledgedSrcSize = prefs->streamSrcSize; + CHECK( ZSTD_CCtx_setPledgedSrcSize(ress->cctx, prefs->streamSrcSize) ); + } + + { int windowLog; + UTIL_HumanReadableSize_t windowSize; + CHECK(ZSTD_CCtx_getParameter(ress->cctx, ZSTD_c_windowLog, &windowLog)); + if (windowLog == 0) { + if (prefs->ldmFlag) { + /* If long mode is set without a window size libzstd will set this size internally */ + windowLog = ZSTD_WINDOWLOG_LIMIT_DEFAULT; + } else { + const ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, fileSize, 0); + windowLog = (int)cParams.windowLog; + } + } + windowSize = UTIL_makeHumanReadableSize(MAX(1ULL, MIN(1ULL << windowLog, pledgedSrcSize))); + DISPLAYLEVEL(4, "Decompression will require %.*f%s of memory\n", windowSize.precision, windowSize.value, windowSize.suffix); } - (void)srcFileName; /* Main compression loop */ do { size_t stillToFlush; /* Fill input Buffer */ - size_t const inSize = fread(ress.srcBuffer, (size_t)1, ress.srcBufferSize, srcFile); - ZSTD_inBuffer inBuff = { ress.srcBuffer, inSize, 0 }; + size_t const inSize = FIO_SyncCompressIO_fillBuffer(syncIO, ZSTD_CStreamInSize()); + ZSTD_inBuffer inBuff = setInBuffer( syncIO->srcBuffer, syncIO->srcBufferLoaded, 0 ); DISPLAYLEVEL(6, "fread %u bytes from source \n", (unsigned)inSize); *readsize += inSize; - if ((inSize == 0) || (*readsize == fileSize)) + if ((syncIO->srcBufferLoaded == 0) || (*readsize == fileSize)) directive = ZSTD_e_end; stillToFlush = 1; @@ -1015,9 +1821,10 @@ FIO_compressZstdFrame(FIO_prefs_t* const prefs, || (directive == ZSTD_e_end && stillToFlush != 0) ) { size_t const oldIPos = inBuff.pos; - ZSTD_outBuffer outBuff = { ress.dstBuffer, ress.dstBufferSize, 0 }; - size_t const toFlushNow = ZSTD_toFlushNow(ress.cctx); - CHECK_V(stillToFlush, ZSTD_compressStream2(ress.cctx, &outBuff, &inBuff, directive)); + ZSTD_outBuffer outBuff = setOutBuffer( syncIO->outBuffer, syncIO->outCapacity, 0 ); + size_t const toFlushNow = ZSTD_toFlushNow(ress->cctx); + CHECK_V(stillToFlush, ZSTD_compressStream2(ress->cctx, &outBuff, &inBuff, directive)); + FIO_SyncCompressIO_consumeBytes(syncIO, inBuff.pos - oldIPos); /* count stats */ inputPresented++; @@ -1026,134 +1833,153 @@ FIO_compressZstdFrame(FIO_prefs_t* const prefs, /* Write compressed stream */ DISPLAYLEVEL(6, "ZSTD_compress_generic(end:%u) => input pos(%u)<=(%u)size ; output generated %u bytes \n", - (unsigned)directive, (unsigned)inBuff.pos, (unsigned)inBuff.size, (unsigned)outBuff.pos); + (unsigned)directive, (unsigned)inBuff.pos, (unsigned)inBuff.size, (unsigned)outBuff.pos); if (outBuff.pos) { - size_t const sizeCheck = fwrite(ress.dstBuffer, 1, outBuff.pos, dstFile); - if (sizeCheck != outBuff.pos) - EXM_THROW(25, "Write error : %s (cannot write compressed block)", - strerror(errno)); + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, outBuff.pos); compressedfilesize += outBuff.pos; } - /* display notification; and adapt compression level */ - if (READY_FOR_UPDATE()) { - ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress.cctx); - double const cShare = (double)zfp.produced / (zfp.consumed + !zfp.consumed/*avoid div0*/) * 100; + /* adaptive mode : statistics measurement and speed correction */ + if (prefs->adaptiveMode && UTIL_clockSpanMicro(lastAdaptTime) > adaptEveryMicro) { + ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress->cctx); + + lastAdaptTime = UTIL_getTime(); + + /* check output speed */ + if (zfp.currentJobID > 1) { /* only possible if nbWorkers >= 1 */ + + unsigned long long newlyProduced = zfp.produced - previous_zfp_update.produced; + unsigned long long newlyFlushed = zfp.flushed - previous_zfp_update.flushed; + assert(zfp.produced >= previous_zfp_update.produced); + assert(prefs->nbWorkers >= 1); + + /* test if compression is blocked + * either because output is slow and all buffers are full + * or because input is slow and no job can start while waiting for at least one buffer to be filled. + * note : exclude starting part, since currentJobID > 1 */ + if ( (zfp.consumed == previous_zfp_update.consumed) /* no data compressed : no data available, or no more buffer to compress to, OR compression is really slow (compression of a single block is slower than update rate)*/ + && (zfp.nbActiveWorkers == 0) /* confirmed : no compression ongoing */ + ) { + DISPLAYLEVEL(6, "all buffers full : compression stopped => slow down \n") + speedChange = slower; + } - /* display progress notifications */ - if (g_display_prefs.displayLevel >= 3) { - DISPLAYUPDATE(3, "\r(L%i) Buffered :%4u MB - Consumed :%4u MB - Compressed :%4u MB => %.2f%% ", - compressionLevel, - (unsigned)((zfp.ingested - zfp.consumed) >> 20), - (unsigned)(zfp.consumed >> 20), - (unsigned)(zfp.produced >> 20), - cShare ); - } else { /* summarized notifications if == 2; */ - DISPLAYLEVEL(2, "\rRead : %u ", (unsigned)(zfp.consumed >> 20)); - if (fileSize != UTIL_FILESIZE_UNKNOWN) - DISPLAYLEVEL(2, "/ %u ", (unsigned)(fileSize >> 20)); - DISPLAYLEVEL(2, "MB ==> %2.f%% ", cShare); - DELAY_NEXT_UPDATE(); - } + previous_zfp_update = zfp; - /* adaptive mode : statistics measurement and speed correction */ - if (prefs->adaptiveMode) { - - /* check output speed */ - if (zfp.currentJobID > 1) { /* only possible if nbWorkers >= 1 */ - - unsigned long long newlyProduced = zfp.produced - previous_zfp_update.produced; - unsigned long long newlyFlushed = zfp.flushed - previous_zfp_update.flushed; - assert(zfp.produced >= previous_zfp_update.produced); - assert(prefs->nbWorkers >= 1); - - /* test if compression is blocked - * either because output is slow and all buffers are full - * or because input is slow and no job can start while waiting for at least one buffer to be filled. - * note : exclude starting part, since currentJobID > 1 */ - if ( (zfp.consumed == previous_zfp_update.consumed) /* no data compressed : no data available, or no more buffer to compress to, OR compression is really slow (compression of a single block is slower than update rate)*/ - && (zfp.nbActiveWorkers == 0) /* confirmed : no compression ongoing */ - ) { - DISPLAYLEVEL(6, "all buffers full : compression stopped => slow down \n") - speedChange = slower; - } + if ( (newlyProduced > (newlyFlushed * 9 / 8)) /* compression produces more data than output can flush (though production can be spiky, due to work unit : (N==4)*block sizes) */ + && (flushWaiting == 0) /* flush speed was never slowed by lack of production, so it's operating at max capacity */ + ) { + DISPLAYLEVEL(6, "compression faster than flush (%llu > %llu), and flushed was never slowed down by lack of production => slow down \n", newlyProduced, newlyFlushed); + speedChange = slower; + } + flushWaiting = 0; + } - previous_zfp_update = zfp; + /* course correct only if there is at least one new job completed */ + if (zfp.currentJobID > lastJobID) { + DISPLAYLEVEL(6, "compression level adaptation check \n") - if ( (newlyProduced > (newlyFlushed * 9 / 8)) /* compression produces more data than output can flush (though production can be spiky, due to work unit : (N==4)*block sizes) */ - && (flushWaiting == 0) /* flush speed was never slowed by lack of production, so it's operating at max capacity */ - ) { - DISPLAYLEVEL(6, "compression faster than flush (%llu > %llu), and flushed was never slowed down by lack of production => slow down \n", newlyProduced, newlyFlushed); + /* check input speed */ + if (zfp.currentJobID > (unsigned)(prefs->nbWorkers+1)) { /* warm up period, to fill all workers */ + if (inputBlocked <= 0) { + DISPLAYLEVEL(6, "input is never blocked => input is slower than ingestion \n"); speedChange = slower; + } else if (speedChange == noChange) { + unsigned long long newlyIngested = zfp.ingested - previous_zfp_correction.ingested; + unsigned long long newlyConsumed = zfp.consumed - previous_zfp_correction.consumed; + unsigned long long newlyProduced = zfp.produced - previous_zfp_correction.produced; + unsigned long long newlyFlushed = zfp.flushed - previous_zfp_correction.flushed; + previous_zfp_correction = zfp; + assert(inputPresented > 0); + DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n", + inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100, + (unsigned)newlyIngested, (unsigned)newlyConsumed, + (unsigned)newlyFlushed, (unsigned)newlyProduced); + if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */ + && (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */ + && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */ + ) { + DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n", + newlyIngested, newlyConsumed, newlyProduced, newlyFlushed); + speedChange = faster; + } } - flushWaiting = 0; + inputBlocked = 0; + inputPresented = 0; } - /* course correct only if there is at least one new job completed */ - if (zfp.currentJobID > lastJobID) { - DISPLAYLEVEL(6, "compression level adaptation check \n") - - /* check input speed */ - if (zfp.currentJobID > (unsigned)(prefs->nbWorkers+1)) { /* warm up period, to fill all workers */ - if (inputBlocked <= 0) { - DISPLAYLEVEL(6, "input is never blocked => input is slower than ingestion \n"); - speedChange = slower; - } else if (speedChange == noChange) { - unsigned long long newlyIngested = zfp.ingested - previous_zfp_correction.ingested; - unsigned long long newlyConsumed = zfp.consumed - previous_zfp_correction.consumed; - unsigned long long newlyProduced = zfp.produced - previous_zfp_correction.produced; - unsigned long long newlyFlushed = zfp.flushed - previous_zfp_correction.flushed; - previous_zfp_correction = zfp; - assert(inputPresented > 0); - DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n", - inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100, - (unsigned)newlyIngested, (unsigned)newlyConsumed, - (unsigned)newlyFlushed, (unsigned)newlyProduced); - if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */ - && (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */ - && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */ - ) { - DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n", - newlyIngested, newlyConsumed, newlyProduced, newlyFlushed); - speedChange = faster; - } - } - inputBlocked = 0; - inputPresented = 0; - } + if (speedChange == slower) { + DISPLAYLEVEL(6, "slower speed , higher compression \n") + compressionLevel ++; + if (compressionLevel > ZSTD_maxCLevel()) compressionLevel = ZSTD_maxCLevel(); + if (compressionLevel > prefs->maxAdaptLevel) compressionLevel = prefs->maxAdaptLevel; + compressionLevel += (compressionLevel == 0); /* skip 0 */ + ZSTD_CCtx_setParameter(ress->cctx, ZSTD_c_compressionLevel, compressionLevel); + } + if (speedChange == faster) { + DISPLAYLEVEL(6, "faster speed , lighter compression \n") + compressionLevel --; + if (compressionLevel < prefs->minAdaptLevel) compressionLevel = prefs->minAdaptLevel; + compressionLevel -= (compressionLevel == 0); /* skip 0 */ + ZSTD_CCtx_setParameter(ress->cctx, ZSTD_c_compressionLevel, compressionLevel); + } + speedChange = noChange; - if (speedChange == slower) { - DISPLAYLEVEL(6, "slower speed , higher compression \n") - compressionLevel ++; - if (compressionLevel > ZSTD_maxCLevel()) compressionLevel = ZSTD_maxCLevel(); - if (compressionLevel > prefs->maxAdaptLevel) compressionLevel = prefs->maxAdaptLevel; - compressionLevel += (compressionLevel == 0); /* skip 0 */ - ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); - } - if (speedChange == faster) { - DISPLAYLEVEL(6, "faster speed , lighter compression \n") - compressionLevel --; - if (compressionLevel < prefs->minAdaptLevel) compressionLevel = prefs->minAdaptLevel; - compressionLevel -= (compressionLevel == 0); /* skip 0 */ - ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); - } - speedChange = noChange; + lastJobID = zfp.currentJobID; + } /* if (zfp.currentJobID > lastJobID) */ + } /* if (prefs->adaptiveMode && UTIL_clockSpanMicro(lastAdaptTime) > adaptEveryMicro) */ + + /* display notification */ + if (SHOULD_DISPLAY_PROGRESS() && READY_FOR_UPDATE()) { + ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress->cctx); + double const cShare = (double)zfp.produced / (double)(zfp.consumed + !zfp.consumed/*avoid div0*/) * 100; + UTIL_HumanReadableSize_t const buffered_hrs = UTIL_makeHumanReadableSize(zfp.ingested - zfp.consumed); + UTIL_HumanReadableSize_t const consumed_hrs = UTIL_makeHumanReadableSize(zfp.consumed); + UTIL_HumanReadableSize_t const produced_hrs = UTIL_makeHumanReadableSize(zfp.produced); - lastJobID = zfp.currentJobID; - } /* if (zfp.currentJobID > lastJobID) */ - } /* if (g_adaptiveMode) */ - } /* if (READY_FOR_UPDATE()) */ + DELAY_NEXT_UPDATE(); + + /* display progress notifications */ + DISPLAY_PROGRESS("\r%79s\r", ""); /* Clear out the current displayed line */ + if (g_display_prefs.displayLevel >= 3) { + /* Verbose progress update */ + DISPLAY_PROGRESS( + "(L%i) Buffered:%5.*f%s - Consumed:%5.*f%s - Compressed:%5.*f%s => %.2f%% ", + compressionLevel, + buffered_hrs.precision, buffered_hrs.value, buffered_hrs.suffix, + consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix, + produced_hrs.precision, produced_hrs.value, produced_hrs.suffix, + cShare ); + } else { + /* Require level 2 or forcibly displayed progress counter for summarized updates */ + if (fCtx->nbFilesTotal > 1) { + size_t srcFileNameSize = strlen(srcFileName); + /* Ensure that the string we print is roughly the same size each time */ + if (srcFileNameSize > 18) { + const char* truncatedSrcFileName = srcFileName + srcFileNameSize - 15; + DISPLAY_PROGRESS("Compress: %u/%u files. Current: ...%s ", + fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName); + } else { + DISPLAY_PROGRESS("Compress: %u/%u files. Current: %*s ", + fCtx->currFileIdx+1, fCtx->nbFilesTotal, (int)(18-srcFileNameSize), srcFileName); + } + } + DISPLAY_PROGRESS("Read:%6.*f%4s ", consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix); + if (fileSize != UTIL_FILESIZE_UNKNOWN) + DISPLAY_PROGRESS("/%6.*f%4s", file_hrs.precision, file_hrs.value, file_hrs.suffix); + DISPLAY_PROGRESS(" ==> %2.f%%", cShare); + } + } /* if (SHOULD_DISPLAY_PROGRESS() && READY_FOR_UPDATE()) */ } /* while ((inBuff.pos != inBuff.size) */ } while (directive != ZSTD_e_end); - if (ferror(srcFile)) { - EXM_THROW(26, "Read error : I/O error"); - } if (fileSize != UTIL_FILESIZE_UNKNOWN && *readsize != fileSize) { EXM_THROW(27, "Read error : Incomplete read : %llu / %llu B", (unsigned long long)*readsize, (unsigned long long)fileSize); } + FIO_SyncCompressIO_finish(syncIO); + return compressedfilesize; } @@ -1163,8 +1989,9 @@ FIO_compressZstdFrame(FIO_prefs_t* const prefs, * 1 : missing or pb opening srcFileName */ static int -FIO_compressFilename_internal(FIO_prefs_t* const prefs, - cRess_t ress, +FIO_compressFilename_internal(FIO_ctx_t* const fCtx, + FIO_prefs_t* const prefs, + cRess_t* ress, const char* dstFileName, const char* srcFileName, int compressionLevel) { @@ -1173,18 +2000,18 @@ FIO_compressFilename_internal(FIO_prefs_t* const prefs, U64 readsize = 0; U64 compressedfilesize = 0; U64 const fileSize = UTIL_getFileSize(srcFileName); - DISPLAYLEVEL(5, "%s: %u bytes \n", srcFileName, (unsigned)fileSize); + DISPLAYLEVEL(5, "%s: %llu bytes \n", srcFileName, (unsigned long long)fileSize); /* compression format selection */ switch (prefs->compressionType) { default: case FIO_zstdCompression: - compressedfilesize = FIO_compressZstdFrame(prefs, &ress, srcFileName, fileSize, compressionLevel, &readsize); + compressedfilesize = FIO_compressZstdFrame(fCtx, prefs, ress, srcFileName, fileSize, compressionLevel, &readsize); break; case FIO_gzipCompression: #ifdef ZSTD_GZCOMPRESS - compressedfilesize = FIO_compressGzFrame(&ress, srcFileName, fileSize, compressionLevel, &readsize); + compressedfilesize = FIO_compressGzFrame(ress, srcFileName, fileSize, compressionLevel, &readsize); #else (void)compressionLevel; EXM_THROW(20, "zstd: %s: file cannot be compressed as gzip (zstd compiled without ZSTD_GZCOMPRESS) -- ignored \n", @@ -1195,7 +2022,7 @@ FIO_compressFilename_internal(FIO_prefs_t* const prefs, case FIO_xzCompression: case FIO_lzmaCompression: #ifdef ZSTD_LZMACOMPRESS - compressedfilesize = FIO_compressLzmaFrame(&ress, srcFileName, fileSize, compressionLevel, &readsize, prefs->compressionType==FIO_lzmaCompression); + compressedfilesize = FIO_compressLzmaFrame(ress, srcFileName, fileSize, compressionLevel, &readsize, prefs->compressionType==FIO_lzmaCompression); #else (void)compressionLevel; EXM_THROW(20, "zstd: %s: file cannot be compressed as xz/lzma (zstd compiled without ZSTD_LZMACOMPRESS) -- ignored \n", @@ -1205,7 +2032,7 @@ FIO_compressFilename_internal(FIO_prefs_t* const prefs, case FIO_lz4Compression: #ifdef ZSTD_LZ4COMPRESS - compressedfilesize = FIO_compressLz4Frame(&ress, srcFileName, fileSize, compressionLevel, prefs->checksumFlag, &readsize); + compressedfilesize = FIO_compressLz4Frame(ress, srcFileName, fileSize, compressionLevel, prefs->checksumFlag, &readsize); #else (void)compressionLevel; EXM_THROW(20, "zstd: %s: file cannot be compressed as lz4 (zstd compiled without ZSTD_LZ4COMPRESS) -- ignored \n", @@ -1215,12 +2042,27 @@ FIO_compressFilename_internal(FIO_prefs_t* const prefs, } /* Status */ - DISPLAYLEVEL(2, "\r%79s\r", ""); - DISPLAYLEVEL(2,"%-20s :%6.2f%% (%6llu => %6llu bytes, %s) \n", - srcFileName, - (double)compressedfilesize / (readsize+(!readsize)/*avoid div by zero*/) * 100, - (unsigned long long)readsize, (unsigned long long) compressedfilesize, - dstFileName); + fCtx->totalBytesInput += (size_t)readsize; + fCtx->totalBytesOutput += (size_t)compressedfilesize; + DISPLAY_PROGRESS("\r%79s\r", ""); + if (FIO_shouldDisplayFileSummary(fCtx)) { + UTIL_HumanReadableSize_t hr_isize = UTIL_makeHumanReadableSize((U64) readsize); + UTIL_HumanReadableSize_t hr_osize = UTIL_makeHumanReadableSize((U64) compressedfilesize); + if (readsize == 0) { + DISPLAY_SUMMARY("%-20s : (%6.*f%s => %6.*f%s, %s) \n", + srcFileName, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix, + dstFileName); + } else { + DISPLAY_SUMMARY("%-20s :%6.2f%% (%6.*f%s => %6.*f%s, %s) \n", + srcFileName, + (double)compressedfilesize / (double)readsize * 100, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix, + dstFileName); + } + } /* Elapsed Time and CPU Load */ { clock_t const cpuEnd = clock(); @@ -1236,7 +2078,7 @@ FIO_compressFilename_internal(FIO_prefs_t* const prefs, /*! FIO_compressFilename_dstFile() : - * open dstFileName, or pass-through if ress.dstFile != NULL, + * open dstFileName, or pass-through if ress.file != NULL, * then start compression with FIO_compressFilename_internal(). * Manages source removal (--rm) and file permissions transfer. * note : ress.srcFile must be != NULL, @@ -1244,199 +2086,473 @@ FIO_compressFilename_internal(FIO_prefs_t* const prefs, * @return : 0 : compression completed correctly, * 1 : pb */ -static int FIO_compressFilename_dstFile(FIO_prefs_t* const prefs, - cRess_t ress, +static int FIO_compressFilename_dstFile(FIO_ctx_t* const fCtx, + FIO_prefs_t* const prefs, + cRess_t* ress, const char* dstFileName, const char* srcFileName, + const stat_t* srcFileStat, int compressionLevel) { int closeDstFile = 0; int result; - stat_t statbuf; - int transfer_permissions = 0; + int transferStat = 0; + int dstFd = -1; - assert(ress.srcFile != NULL); + if (ress->io.dstFile == NULL) { + int dstFileInitialPermissions = DEFAULT_FILE_PERMISSIONS; + if ( strcmp (srcFileName, stdinmark) + && strcmp (dstFileName, stdoutmark) + && UTIL_isRegularFileStat(srcFileStat) ) { + transferStat = 1; + dstFileInitialPermissions = TEMPORARY_FILE_PERMISSIONS; + } - if (ress.dstFile == NULL) { closeDstFile = 1; - DISPLAYLEVEL(6, "FIO_compressFilename_dstFile: opening dst: %s", dstFileName); - ress.dstFile = FIO_openDstFile(prefs, srcFileName, dstFileName); - if (ress.dstFile==NULL) return 1; /* could not open dstFileName */ - /* Must only be added after FIO_openDstFile() succeeds. - * Otherwise we may delete the destination file if it already exists, - * and the user presses Ctrl-C when asked if they wish to overwrite. - */ + DISPLAYLEVEL(6, "FIO_compressFilename_dstFile: opening dst: %s \n", dstFileName); + { + FILE *dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFileInitialPermissions); + if (dstFile==NULL) return 1; /* could not open dstFileName */ + dstFd = fileno(dstFile); + FIO_SyncCompressIO_setDst(&ress->io, dstFile); + } + /* Must only be added after FIO_openDstFile() succeeds. */ addHandler(dstFileName); - - if ( strcmp (srcFileName, stdinmark) - && UTIL_getFileStat(srcFileName, &statbuf)) - transfer_permissions = 1; } - result = FIO_compressFilename_internal(prefs, ress, dstFileName, srcFileName, compressionLevel); + result = FIO_compressFilename_internal(fCtx, prefs, ress, dstFileName, srcFileName, compressionLevel); if (closeDstFile) { - FILE* const dstFile = ress.dstFile; - ress.dstFile = NULL; - clearHandler(); - if (fclose(dstFile)) { /* error closing dstFile */ + if (transferStat) { + UTIL_setFDStat(dstFd, dstFileName, srcFileStat); + } + + DISPLAYLEVEL(6, "FIO_compressFilename_dstFile: closing dst: %s \n", dstFileName); + if (FIO_SyncCompressIO_closeDst(&ress->io)) { /* error closing file */ DISPLAYLEVEL(1, "zstd: %s: %s \n", dstFileName, strerror(errno)); result=1; } - if ( (result != 0) /* operation failure */ - && strcmp(dstFileName, nulmark) /* special case : don't remove() /dev/null */ - && strcmp(dstFileName, stdoutmark) /* special case : don't remove() stdout */ - ) { - FIO_remove(dstFileName); /* remove compression artefact; note don't do anything special if remove() fails */ - } else if ( strcmp(dstFileName, stdoutmark) - && strcmp(dstFileName, nulmark) - && transfer_permissions) { - UTIL_setFileStat(dstFileName, &statbuf); + + if (transferStat) { + UTIL_utime(dstFileName, srcFileStat); + } + + if ( (result != 0) + && strcmp(dstFileName, stdoutmark) ) { + FIO_removeFile(dstFileName); } } return result; } +/* List used to compare file extensions (used with --exclude-compressed flag) +* Different from the suffixList and should only apply to ZSTD compress operationResult +*/ +static const char *compressedFileExtensions[] = { + ZSTD_EXTENSION, + TZSTD_EXTENSION, + GZ_EXTENSION, + TGZ_EXTENSION, + LZMA_EXTENSION, + XZ_EXTENSION, + TXZ_EXTENSION, + LZ4_EXTENSION, + TLZ4_EXTENSION, + ".7z", + ".aa3", + ".aac", + ".aar", + ".ace", + ".alac", + ".ape", + ".apk", + ".apng", + ".arc", + ".archive", + ".arj", + ".ark", + ".asf", + ".avi", + ".avif", + ".ba", + ".br", + ".bz2", + ".cab", + ".cdx", + ".chm", + ".cr2", + ".divx", + ".dmg", + ".dng", + ".docm", + ".docx", + ".dotm", + ".dotx", + ".dsft", + ".ear", + ".eftx", + ".emz", + ".eot", + ".epub", + ".f4v", + ".flac", + ".flv", + ".gho", + ".gif", + ".gifv", + ".gnp", + ".iso", + ".jar", + ".jpeg", + ".jpg", + ".jxl", + ".lz", + ".lzh", + ".m4a", + ".m4v", + ".mkv", + ".mov", + ".mp2", + ".mp3", + ".mp4", + ".mpa", + ".mpc", + ".mpe", + ".mpeg", + ".mpg", + ".mpl", + ".mpv", + ".msi", + ".odp", + ".ods", + ".odt", + ".ogg", + ".ogv", + ".otp", + ".ots", + ".ott", + ".pea", + ".png", + ".pptx", + ".qt", + ".rar", + ".s7z", + ".sfx", + ".sit", + ".sitx", + ".sqx", + ".svgz", + ".swf", + ".tbz2", + ".tib", + ".tlz", + ".vob", + ".war", + ".webm", + ".webp", + ".wma", + ".wmv", + ".woff", + ".woff2", + ".wvl", + ".xlsx", + ".xpi", + ".xps", + ".zip", + ".zipx", + ".zoo", + ".zpaq", + NULL +}; /*! FIO_compressFilename_srcFile() : * @return : 0 : compression completed correctly, * 1 : missing or pb opening srcFileName */ static int -FIO_compressFilename_srcFile(FIO_prefs_t* const prefs, - cRess_t ress, +FIO_compressFilename_srcFile(FIO_ctx_t* const fCtx, + FIO_prefs_t* const prefs, + cRess_t* ress, const char* dstFileName, const char* srcFileName, int compressionLevel) { int result; + FILE* srcFile; + stat_t srcFileStat; + U64 fileSize = UTIL_FILESIZE_UNKNOWN; + DISPLAYLEVEL(6, "FIO_compressFilename_srcFile: %s \n", srcFileName); - /* ensure src is not a directory */ - if (UTIL_isDirectory(srcFileName)) { - DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); - return 1; + if (strcmp(srcFileName, stdinmark)) { + if (UTIL_stat(srcFileName, &srcFileStat)) { + /* failure to stat at all is handled during opening */ + + /* ensure src is not a directory */ + if (UTIL_isDirectoryStat(&srcFileStat)) { + DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); + return 1; + } + + /* ensure src is not the same as dict (if present) */ + if (ress->dictFileName != NULL && UTIL_isSameFileStat(srcFileName, ress->dictFileName, &srcFileStat, &ress->dictFileStat)) { + DISPLAYLEVEL(1, "zstd: cannot use %s as an input file and dictionary \n", srcFileName); + return 1; + } + } } - /* ensure src is not the same as dict (if present) */ - if (ress.dictFileName != NULL && UTIL_isSameFile(srcFileName, ress.dictFileName)) { - DISPLAYLEVEL(1, "zstd: cannot use %s as an input file and dictionary \n", srcFileName); - return 1; + /* Check if "srcFile" is compressed. Only done if --exclude-compressed flag is used + * YES => ZSTD will skip compression of the file and will return 0. + * NO => ZSTD will resume with compress operation. + */ + if (prefs->excludeCompressedFiles == 1 && UTIL_isCompressedFile(srcFileName, compressedFileExtensions)) { + DISPLAYLEVEL(4, "File is already compressed : %s \n", srcFileName); + return 0; } - ress.srcFile = FIO_openSrcFile(srcFileName); - if (ress.srcFile == NULL) return 1; /* srcFile could not be opened */ + srcFile = FIO_openSrcFile(prefs, srcFileName, &srcFileStat); + if (srcFile == NULL) return 1; /* srcFile could not be opened */ + + if (strcmp(srcFileName, stdinmark)) /* Stdin doesn't have stats */ + fileSize = UTIL_getFileSizeStat(&srcFileStat); + (void)fileSize; - result = FIO_compressFilename_dstFile(prefs, ress, dstFileName, srcFileName, compressionLevel); + FIO_SyncCompressIO_setSrc(&ress->io, srcFile); + result = FIO_compressFilename_dstFile( + fCtx, prefs, ress, + dstFileName, srcFileName, + &srcFileStat, compressionLevel); + FIO_SyncCompressIO_clearSrc(&ress->io); + + if (srcFile != NULL && fclose(srcFile)) { + DISPLAYLEVEL(1, "zstd: %s: %s \n", srcFileName, strerror(errno)); + return 1; + } - fclose(ress.srcFile); - ress.srcFile = NULL; - if ( prefs->removeSrcFile /* --rm */ - && result == 0 /* success */ - && strcmp(srcFileName, stdinmark) /* exception : don't erase stdin */ + if ( prefs->removeSrcFile /* --rm */ + && result == 0 /* success */ + && strcmp(srcFileName, stdinmark) /* exception : don't erase stdin */ ) { /* We must clear the handler, since after this point calling it would * delete both the source and destination files. */ clearHandler(); - if (FIO_remove(srcFileName)) + if (FIO_removeFile(srcFileName)) EXM_THROW(1, "zstd: %s: %s", srcFileName, strerror(errno)); } return result; } +static const char* +checked_index(const char* options[], size_t length, size_t index) { + assert(index < length); + /* Necessary to avoid warnings since -O3 will omit the above `assert` */ + (void) length; + return options[index]; +} + +#define INDEX(options, index) checked_index((options), sizeof(options) / sizeof(char*), (size_t)(index)) -int FIO_compressFilename(FIO_prefs_t* const prefs, - const char* dstFileName, const char* srcFileName, - const char* dictFileName, int compressionLevel, - ZSTD_compressionParameters comprParams) +void FIO_displayCompressionParameters(const FIO_prefs_t* prefs) { - U64 const fileSize = UTIL_getFileSize(srcFileName); - U64 const srcSize = (fileSize == UTIL_FILESIZE_UNKNOWN) ? ZSTD_CONTENTSIZE_UNKNOWN : fileSize; + static const char* formatOptions[5] = {ZSTD_EXTENSION, GZ_EXTENSION, XZ_EXTENSION, + LZMA_EXTENSION, LZ4_EXTENSION}; + static const char* sparseOptions[3] = {" --no-sparse", "", " --sparse"}; + static const char* checkSumOptions[3] = {" --no-check", "", " --check"}; + static const char* rowMatchFinderOptions[3] = {"", " --no-row-match-finder", " --row-match-finder"}; + static const char* compressLiteralsOptions[3] = {"", " --compress-literals", " --no-compress-literals"}; + + assert(g_display_prefs.displayLevel >= 4); + + DISPLAY("--format=%s", formatOptions[prefs->compressionType]); + DISPLAY("%s", INDEX(sparseOptions, prefs->sparseFileSupport)); + DISPLAY("%s", prefs->dictIDFlag ? "" : " --no-dictID"); + DISPLAY("%s", INDEX(checkSumOptions, prefs->checksumFlag)); + DISPLAY(" --jobsize=%d", prefs->jobSize); + if (prefs->adaptiveMode) + DISPLAY(" --adapt=min=%d,max=%d", prefs->minAdaptLevel, prefs->maxAdaptLevel); + DISPLAY("%s", INDEX(rowMatchFinderOptions, prefs->useRowMatchFinder)); + DISPLAY("%s", prefs->rsyncable ? " --rsyncable" : ""); + if (prefs->streamSrcSize) + DISPLAY(" --stream-size=%u", (unsigned) prefs->streamSrcSize); + if (prefs->srcSizeHint) + DISPLAY(" --size-hint=%d", prefs->srcSizeHint); + if (prefs->targetCBlockSize) + DISPLAY(" --target-compressed-block-size=%u", (unsigned) prefs->targetCBlockSize); + DISPLAY("%s", INDEX(compressLiteralsOptions, prefs->literalCompressionMode)); + DISPLAY(" --memory=%u", prefs->memLimit ? prefs->memLimit : 128 MB); + DISPLAY(" --threads=%d", prefs->nbWorkers); + DISPLAY("%s", prefs->excludeCompressedFiles ? " --exclude-compressed" : ""); + DISPLAY(" --%scontent-size", prefs->contentSize ? "" : "no-"); + DISPLAY("\n"); +} - cRess_t const ress = FIO_createCResources(prefs, dictFileName, compressionLevel, srcSize, comprParams); - int const result = FIO_compressFilename_srcFile(prefs, ress, dstFileName, srcFileName, compressionLevel); +#undef INDEX +int FIO_compressFilename(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, const char* dstFileName, + const char* srcFileName, const char* dictFileName, + int compressionLevel, ZSTD_compressionParameters comprParams) +{ + cRess_t ress = FIO_createCResources(prefs, dictFileName, UTIL_getFileSize(srcFileName), compressionLevel, comprParams); + int const result = FIO_compressFilename_srcFile(fCtx, prefs, &ress, dstFileName, srcFileName, compressionLevel); + +#define DISPLAY_LEVEL_DEFAULT 2 - FIO_freeCResources(ress); + FIO_freeCResources(&ress); return result; } - /* FIO_determineCompressedName() : * create a destination filename for compressed srcFileName. * @return a pointer to it. * This function never returns an error (it may abort() in case of pb) */ static const char* -FIO_determineCompressedName(const char* srcFileName, const char* suffix) +FIO_determineCompressedName(const char* srcFileName, const char* outDirName, const char* suffix) { static size_t dfnbCapacity = 0; static char* dstFileNameBuffer = NULL; /* using static allocation : this function cannot be multi-threaded */ + char* outDirFilename = NULL; + size_t sfnSize = strlen(srcFileName); + size_t const srcSuffixLen = strlen(suffix); + + if(!strcmp(srcFileName, stdinmark)) { + return stdoutmark; + } - size_t const sfnSize = strlen(srcFileName); - size_t const suffixSize = strlen(suffix); + if (outDirName) { + outDirFilename = FIO_createFilename_fromOutDir(srcFileName, outDirName, srcSuffixLen); + sfnSize = strlen(outDirFilename); + assert(outDirFilename != NULL); + } - if (dfnbCapacity <= sfnSize+suffixSize+1) { + if (dfnbCapacity <= sfnSize+srcSuffixLen+1) { /* resize buffer for dstName */ free(dstFileNameBuffer); - dfnbCapacity = sfnSize + suffixSize + 30; + dfnbCapacity = sfnSize + srcSuffixLen + 30; dstFileNameBuffer = (char*)malloc(dfnbCapacity); if (!dstFileNameBuffer) { EXM_THROW(30, "zstd: %s", strerror(errno)); - } } + } + } assert(dstFileNameBuffer != NULL); - memcpy(dstFileNameBuffer, srcFileName, sfnSize); - memcpy(dstFileNameBuffer+sfnSize, suffix, suffixSize+1 /* Include terminating null */); + if (outDirFilename) { + memcpy(dstFileNameBuffer, outDirFilename, sfnSize); + free(outDirFilename); + } else { + memcpy(dstFileNameBuffer, srcFileName, sfnSize); + } + memcpy(dstFileNameBuffer+sfnSize, suffix, srcSuffixLen+1 /* Include terminating null */); return dstFileNameBuffer; } +static unsigned long long FIO_getLargestFileSize(const char** inFileNames, unsigned nbFiles) +{ + size_t i; + unsigned long long fileSize, maxFileSize = 0; + for (i = 0; i < nbFiles; i++) { + fileSize = UTIL_getFileSize(inFileNames[i]); + maxFileSize = fileSize > maxFileSize ? fileSize : maxFileSize; + } + return maxFileSize; +} /* FIO_compressMultipleFilenames() : * compress nbFiles files - * into one destination (outFileName) - * or into one file each (outFileName == NULL, but suffix != NULL). + * into either one destination (outFileName), + * or into one file each (outFileName == NULL, but suffix != NULL), + * or into a destination folder (specified with -O) */ -int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs, - const char** inFileNamesTable, unsigned nbFiles, +int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, + FIO_prefs_t* const prefs, + const char** inFileNamesTable, + const char* outMirroredRootDirName, + const char* outDirName, const char* outFileName, const char* suffix, const char* dictFileName, int compressionLevel, ZSTD_compressionParameters comprParams) { + int status; int error = 0; - U64 const firstFileSize = UTIL_getFileSize(inFileNamesTable[0]); - U64 const firstSrcSize = (firstFileSize == UTIL_FILESIZE_UNKNOWN) ? ZSTD_CONTENTSIZE_UNKNOWN : firstFileSize; - U64 const srcSize = (nbFiles != 1) ? ZSTD_CONTENTSIZE_UNKNOWN : firstSrcSize ; - cRess_t ress = FIO_createCResources(prefs, dictFileName, compressionLevel, srcSize, comprParams); + cRess_t ress = FIO_createCResources(prefs, dictFileName, + FIO_getLargestFileSize(inFileNamesTable, (unsigned)fCtx->nbFilesTotal), + compressionLevel, comprParams); /* init */ assert(outFileName != NULL || suffix != NULL); - if (outFileName != NULL) { /* output into a single destination (stdout typically) */ - ress.dstFile = FIO_openDstFile(prefs, NULL, outFileName); - if (ress.dstFile == NULL) { /* could not open outFileName */ + FILE *dstFile; + if (FIO_multiFilesConcatWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { + FIO_freeCResources(&ress); + return 1; + } + dstFile = FIO_openDstFile(fCtx, prefs, NULL, outFileName, DEFAULT_FILE_PERMISSIONS); + if (dstFile == NULL) { /* could not open outFileName */ error = 1; } else { - unsigned u; - for (u=0; ucurrFileIdx < fCtx->nbFilesTotal; ++fCtx->currFileIdx) { + status = FIO_compressFilename_srcFile(fCtx, prefs, &ress, outFileName, inFileNamesTable[fCtx->currFileIdx], compressionLevel); + if (!status) fCtx->nbFilesProcessed++; + error |= status; + } + if (FIO_SyncCompressIO_closeDst(&ress.io)) EXM_THROW(29, "Write error (%s) : cannot properly close %s", strerror(errno), outFileName); - ress.dstFile = NULL; } } else { - unsigned u; - for (u=0; unbFilesTotal, outMirroredRootDirName); + + for (; fCtx->currFileIdx < fCtx->nbFilesTotal; ++fCtx->currFileIdx) { + const char* const srcFileName = inFileNamesTable[fCtx->currFileIdx]; + const char* dstFileName = NULL; + if (outMirroredRootDirName) { + char* validMirroredDirName = UTIL_createMirroredDestDirName(srcFileName, outMirroredRootDirName); + if (validMirroredDirName) { + dstFileName = FIO_determineCompressedName(srcFileName, validMirroredDirName, suffix); + free(validMirroredDirName); + } else { + DISPLAYLEVEL(2, "zstd: --output-dir-mirror cannot compress '%s' into '%s' \n", srcFileName, outMirroredRootDirName); + error=1; + continue; + } + } else { + dstFileName = FIO_determineCompressedName(srcFileName, outDirName, suffix); /* cannot fail */ + } + status = FIO_compressFilename_srcFile(fCtx, prefs, &ress, dstFileName, srcFileName, compressionLevel); + if (!status) fCtx->nbFilesProcessed++; + error |= status; + } + + if (outDirName) + FIO_checkFilenameCollisions(inFileNamesTable , (unsigned)fCtx->nbFilesTotal); + } + + if (FIO_shouldDisplayMultipleFileSummary(fCtx)) { + UTIL_HumanReadableSize_t hr_isize = UTIL_makeHumanReadableSize((U64) fCtx->totalBytesInput); + UTIL_HumanReadableSize_t hr_osize = UTIL_makeHumanReadableSize((U64) fCtx->totalBytesOutput); + + DISPLAY_PROGRESS("\r%79s\r", ""); + if (fCtx->totalBytesInput == 0) { + DISPLAY_SUMMARY("%3d files compressed : (%6.*f%4s => %6.*f%4s)\n", + fCtx->nbFilesProcessed, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix); + } else { + DISPLAY_SUMMARY("%3d files compressed : %.2f%% (%6.*f%4s => %6.*f%4s)\n", + fCtx->nbFilesProcessed, + (double)fCtx->totalBytesOutput/((double)fCtx->totalBytesInput)*100, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix); + } + } - FIO_freeCResources(ress); + FIO_freeCResources(&ress); return error; } @@ -1450,315 +2566,228 @@ int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs, * Decompression ***************************************************************************/ typedef struct { - void* srcBuffer; - size_t srcBufferSize; - size_t srcBufferLoaded; - void* dstBuffer; - size_t dstBufferSize; + FIO_Dict_t dict; ZSTD_DStream* dctx; - FILE* dstFile; + WritePoolCtx_t *writeCtx; + ReadPoolCtx_t *readCtx; } dRess_t; static dRess_t FIO_createDResources(FIO_prefs_t* const prefs, const char* dictFileName) { + int useMMap = prefs->mmapDict == ZSTD_ps_enable; + int forceNoUseMMap = prefs->mmapDict == ZSTD_ps_disable; + stat_t statbuf; dRess_t ress; + memset(&statbuf, 0, sizeof(statbuf)); memset(&ress, 0, sizeof(ress)); + FIO_getDictFileStat(dictFileName, &statbuf); + + if (prefs->patchFromMode){ + U64 const dictSize = UTIL_getFileSizeStat(&statbuf); + useMMap |= dictSize > prefs->memLimit; + FIO_adjustMemLimitForPatchFromMode(prefs, dictSize, 0 /* just use the dict size */); + } + /* Allocation */ ress.dctx = ZSTD_createDStream(); if (ress.dctx==NULL) EXM_THROW(60, "Error: %s : can't create ZSTD_DStream", strerror(errno)); CHECK( ZSTD_DCtx_setMaxWindowSize(ress.dctx, prefs->memLimit) ); - ress.srcBufferSize = ZSTD_DStreamInSize(); - ress.srcBuffer = malloc(ress.srcBufferSize); - ress.dstBufferSize = ZSTD_DStreamOutSize(); - ress.dstBuffer = malloc(ress.dstBufferSize); - if (!ress.srcBuffer || !ress.dstBuffer) - EXM_THROW(61, "Allocation error : not enough memory"); + CHECK( ZSTD_DCtx_setParameter(ress.dctx, ZSTD_d_forceIgnoreChecksum, !prefs->checksumFlag)); /* dictionary */ - { void* dictBuffer; - size_t const dictBufferSize = FIO_createDictBuffer(&dictBuffer, dictFileName); - CHECK( ZSTD_initDStream_usingDict(ress.dctx, dictBuffer, dictBufferSize) ); - free(dictBuffer); + { + FIO_dictBufferType_t dictBufferType = (useMMap && !forceNoUseMMap) ? FIO_mmapDict : FIO_mallocDict; + FIO_initDict(&ress.dict, dictFileName, prefs, &statbuf, dictBufferType); + + CHECK(ZSTD_DCtx_reset(ress.dctx, ZSTD_reset_session_only) ); + + if (prefs->patchFromMode){ + CHECK(ZSTD_DCtx_refPrefix(ress.dctx, ress.dict.dictBuffer, ress.dict.dictBufferSize)); + } else { + CHECK(ZSTD_DCtx_loadDictionary_byReference(ress.dctx, ress.dict.dictBuffer, ress.dict.dictBufferSize)); + } } + ress.writeCtx = AIO_WritePool_create(prefs, ZSTD_DStreamOutSize()); + ress.readCtx = AIO_ReadPool_create(prefs, ZSTD_DStreamInSize()); return ress; } static void FIO_freeDResources(dRess_t ress) { + FIO_freeDict(&(ress.dict)); CHECK( ZSTD_freeDStream(ress.dctx) ); - free(ress.srcBuffer); - free(ress.dstBuffer); -} - - -/** FIO_fwriteSparse() : -* @return : storedSkips, to be provided to next call to FIO_fwriteSparse() of LZ4IO_fwriteSparseEnd() */ -static unsigned FIO_fwriteSparse(FIO_prefs_t* const prefs, FILE* file, const void* buffer, size_t bufferSize, unsigned storedSkips) -{ - const size_t* const bufferT = (const size_t*)buffer; /* Buffer is supposed malloc'ed, hence aligned on size_t */ - size_t bufferSizeT = bufferSize / sizeof(size_t); - const size_t* const bufferTEnd = bufferT + bufferSizeT; - const size_t* ptrT = bufferT; - static const size_t segmentSizeT = (32 KB) / sizeof(size_t); /* 0-test re-attempted every 32 KB */ - - if (!prefs->sparseFileSupport) { /* normal write */ - size_t const sizeCheck = fwrite(buffer, 1, bufferSize, file); - if (sizeCheck != bufferSize) - EXM_THROW(70, "Write error : %s (cannot write decoded block)", - strerror(errno)); - return 0; - } - - /* avoid int overflow */ - if (storedSkips > 1 GB) { - int const seekResult = LONG_SEEK(file, 1 GB, SEEK_CUR); - if (seekResult != 0) - EXM_THROW(71, "1 GB skip error (sparse file support)"); - storedSkips -= 1 GB; - } - - while (ptrT < bufferTEnd) { - size_t seg0SizeT = segmentSizeT; - size_t nb0T; - - /* count leading zeros */ - if (seg0SizeT > bufferSizeT) seg0SizeT = bufferSizeT; - bufferSizeT -= seg0SizeT; - for (nb0T=0; (nb0T < seg0SizeT) && (ptrT[nb0T] == 0); nb0T++) ; - storedSkips += (unsigned)(nb0T * sizeof(size_t)); - - if (nb0T != seg0SizeT) { /* not all 0s */ - int const seekResult = LONG_SEEK(file, storedSkips, SEEK_CUR); - if (seekResult) EXM_THROW(72, "Sparse skip error ; try --no-sparse"); - storedSkips = 0; - seg0SizeT -= nb0T; - ptrT += nb0T; - { size_t const sizeCheck = fwrite(ptrT, sizeof(size_t), seg0SizeT, file); - if (sizeCheck != seg0SizeT) - EXM_THROW(73, "Write error : cannot write decoded block"); - } } - ptrT += seg0SizeT; - } - - { static size_t const maskT = sizeof(size_t)-1; - if (bufferSize & maskT) { - /* size not multiple of sizeof(size_t) : implies end of block */ - const char* const restStart = (const char*)bufferTEnd; - const char* restPtr = restStart; - size_t restSize = bufferSize & maskT; - const char* const restEnd = restStart + restSize; - for ( ; (restPtr < restEnd) && (*restPtr == 0); restPtr++) ; - storedSkips += (unsigned) (restPtr - restStart); - if (restPtr != restEnd) { - int seekResult = LONG_SEEK(file, storedSkips, SEEK_CUR); - if (seekResult) - EXM_THROW(74, "Sparse skip error ; try --no-sparse"); - storedSkips = 0; - { size_t const sizeCheck = fwrite(restPtr, 1, (size_t)(restEnd - restPtr), file); - if (sizeCheck != (size_t)(restEnd - restPtr)) - EXM_THROW(75, "Write error : cannot write decoded end of block"); - } } } } - - return storedSkips; -} - -static void -FIO_fwriteSparseEnd(FIO_prefs_t* const prefs, FILE* file, unsigned storedSkips) -{ - if (storedSkips>0) { - assert(prefs->sparseFileSupport > 0); /* storedSkips>0 implies sparse support is enabled */ - (void)prefs; /* assert can be disabled, in which case prefs becomes unused */ - if (LONG_SEEK(file, storedSkips-1, SEEK_CUR) != 0) - EXM_THROW(69, "Final skip error (sparse file support)"); - /* last zero must be explicitly written, - * so that skipped ones get implicitly translated as zero by FS */ - { const char lastZeroByte[1] = { 0 }; - if (fwrite(lastZeroByte, 1, 1, file) != 1) - EXM_THROW(69, "Write error : cannot write last zero"); - } } + AIO_WritePool_free(ress.writeCtx); + AIO_ReadPool_free(ress.readCtx); } - -/** FIO_passThrough() : just copy input into output, for compatibility with gzip -df mode - @return : 0 (no error) */ -static int FIO_passThrough(FIO_prefs_t* const prefs, - FILE* foutput, FILE* finput, - void* buffer, size_t bufferSize, - size_t alreadyLoaded) +/* FIO_passThrough() : just copy input into output, for compatibility with gzip -df mode + * @return : 0 (no error) */ +static int FIO_passThrough(dRess_t *ress) { - size_t const blockSize = MIN(64 KB, bufferSize); - size_t readFromInput = 1; - unsigned storedSkips = 0; - - /* assumption : ress->srcBufferLoaded bytes already loaded and stored within buffer */ - { size_t const sizeCheck = fwrite(buffer, 1, alreadyLoaded, foutput); - if (sizeCheck != alreadyLoaded) { - DISPLAYLEVEL(1, "Pass-through write error \n"); - return 1; - } } - - while (readFromInput) { - readFromInput = fread(buffer, 1, blockSize, finput); - storedSkips = FIO_fwriteSparse(prefs, foutput, buffer, readFromInput, storedSkips); + size_t const blockSize = MIN(MIN(64 KB, ZSTD_DStreamInSize()), ZSTD_DStreamOutSize()); + IOJob_t *writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); + + while(ress->readCtx->srcBufferLoaded) { + size_t writeSize; + writeSize = MIN(blockSize, ress->readCtx->srcBufferLoaded); + assert(writeSize <= writeJob->bufferSize); + memcpy(writeJob->buffer, ress->readCtx->srcBuffer, writeSize); + writeJob->usedBufferSize = writeSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); + AIO_ReadPool_consumeBytes(ress->readCtx, writeSize); + AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); } - - FIO_fwriteSparseEnd(prefs, foutput, storedSkips); + assert(ress->readCtx->reachedEof); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return 0; } -/* FIO_highbit64() : - * gives position of highest bit. - * note : only works for v > 0 ! - */ -static unsigned FIO_highbit64(unsigned long long v) -{ - unsigned count = 0; - assert(v != 0); - v >>= 1; - while (v) { v >>= 1; count++; } - return count; -} - /* FIO_zstdErrorHelp() : * detailed error message when requested window size is too large */ -static void FIO_zstdErrorHelp(FIO_prefs_t* const prefs, dRess_t* ress, size_t err, char const* srcFileName) +static void +FIO_zstdErrorHelp(const FIO_prefs_t* const prefs, + const dRess_t* ress, + size_t err, + const char* srcFileName) { - ZSTD_frameHeader header; + ZSTD_FrameHeader header; /* Help message only for one specific error */ if (ZSTD_getErrorCode(err) != ZSTD_error_frameParameter_windowTooLarge) return; /* Try to decode the frame header */ - err = ZSTD_getFrameHeader(&header, ress->srcBuffer, ress->srcBufferLoaded); + err = ZSTD_getFrameHeader(&header, ress->readCtx->srcBuffer, ress->readCtx->srcBufferLoaded); if (err == 0) { unsigned long long const windowSize = header.windowSize; unsigned const windowLog = FIO_highbit64(windowSize) + ((windowSize & (windowSize - 1)) != 0); assert(prefs->memLimit > 0); - DISPLAYLEVEL(1, "%s : Window size larger than maximum : %llu > %u\n", + DISPLAYLEVEL(1, "%s : Window size larger than maximum : %llu > %u \n", srcFileName, windowSize, prefs->memLimit); if (windowLog <= ZSTD_WINDOWLOG_MAX) { unsigned const windowMB = (unsigned)((windowSize >> 20) + ((windowSize & ((1 MB) - 1)) != 0)); assert(windowSize < (U64)(1ULL << 52)); /* ensure now overflow for windowMB */ - DISPLAYLEVEL(1, "%s : Use --long=%u or --memory=%uMB\n", + DISPLAYLEVEL(1, "%s : Use --long=%u or --memory=%uMB \n", srcFileName, windowLog, windowMB); return; - } - } - DISPLAYLEVEL(1, "%s : Window log larger than ZSTD_WINDOWLOG_MAX=%u; not supported\n", + } } + DISPLAYLEVEL(1, "%s : Window log larger than ZSTD_WINDOWLOG_MAX=%u; not supported \n", srcFileName, ZSTD_WINDOWLOG_MAX); } /** FIO_decompressFrame() : * @return : size of decoded zstd frame, or an error code -*/ + */ #define FIO_ERROR_FRAME_DECODING ((unsigned long long)(-2)) -static unsigned long long FIO_decompressZstdFrame( - FIO_prefs_t* const prefs, - dRess_t* ress, - FILE* finput, - const char* srcFileName, - U64 alreadyDecoded) +static unsigned long long +FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, + const FIO_prefs_t* const prefs, + const char* srcFileName, + U64 alreadyDecoded) /* for multi-frames streams */ { U64 frameSize = 0; - U32 storedSkips = 0; - - size_t const srcFileLength = strlen(srcFileName); - if (srcFileLength>20) srcFileName += srcFileLength-20; /* display last 20 characters only */ + const char* srcFName20 = srcFileName; + IOJob_t* writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + assert(writeJob); + + /* display last 20 characters only when not --verbose */ + { size_t const srcFileLength = strlen(srcFileName); + if ((srcFileLength>20) && (g_display_prefs.displayLevel<3)) + srcFName20 += srcFileLength-20; + } - ZSTD_resetDStream(ress->dctx); + ZSTD_DCtx_reset(ress->dctx, ZSTD_reset_session_only); /* Header loading : ensures ZSTD_getFrameHeader() will succeed */ - { size_t const toDecode = ZSTD_FRAMEHEADERSIZE_MAX; - if (ress->srcBufferLoaded < toDecode) { - size_t const toRead = toDecode - ress->srcBufferLoaded; - void* const startPosition = (char*)ress->srcBuffer + ress->srcBufferLoaded; - ress->srcBufferLoaded += fread(startPosition, 1, toRead, finput); - } } + AIO_ReadPool_fillBuffer(ress->readCtx, ZSTD_FRAMEHEADERSIZE_MAX); /* Main decompression Loop */ while (1) { - ZSTD_inBuffer inBuff = { ress->srcBuffer, ress->srcBufferLoaded, 0 }; - ZSTD_outBuffer outBuff= { ress->dstBuffer, ress->dstBufferSize, 0 }; + ZSTD_inBuffer inBuff = setInBuffer( ress->readCtx->srcBuffer, ress->readCtx->srcBufferLoaded, 0 ); + ZSTD_outBuffer outBuff= setOutBuffer( writeJob->buffer, writeJob->bufferSize, 0 ); size_t const readSizeHint = ZSTD_decompressStream(ress->dctx, &outBuff, &inBuff); + UTIL_HumanReadableSize_t const hrs = UTIL_makeHumanReadableSize(alreadyDecoded+frameSize); if (ZSTD_isError(readSizeHint)) { DISPLAYLEVEL(1, "%s : Decoding error (36) : %s \n", srcFileName, ZSTD_getErrorName(readSizeHint)); FIO_zstdErrorHelp(prefs, ress, readSizeHint, srcFileName); + AIO_WritePool_releaseIoJob(writeJob); return FIO_ERROR_FRAME_DECODING; } /* Write block */ - storedSkips = FIO_fwriteSparse(prefs, ress->dstFile, ress->dstBuffer, outBuff.pos, storedSkips); + writeJob->usedBufferSize = outBuff.pos; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); frameSize += outBuff.pos; - DISPLAYUPDATE(2, "\r%-20.20s : %u MB... ", - srcFileName, (unsigned)((alreadyDecoded+frameSize)>>20) ); - - if (inBuff.pos > 0) { - memmove(ress->srcBuffer, (char*)ress->srcBuffer + inBuff.pos, inBuff.size - inBuff.pos); - ress->srcBufferLoaded -= inBuff.pos; + if (fCtx->nbFilesTotal > 1) { + DISPLAYUPDATE_PROGRESS( + "\rDecompress: %2u/%2u files. Current: %s : %.*f%s... ", + fCtx->currFileIdx+1, fCtx->nbFilesTotal, srcFName20, hrs.precision, hrs.value, hrs.suffix); + } else { + DISPLAYUPDATE_PROGRESS("\r%-20.20s : %.*f%s... ", + srcFName20, hrs.precision, hrs.value, hrs.suffix); } + AIO_ReadPool_consumeBytes(ress->readCtx, inBuff.pos); + if (readSizeHint == 0) break; /* end of frame */ - if (inBuff.size != inBuff.pos) { - DISPLAYLEVEL(1, "%s : Decoding error (37) : should consume entire input \n", - srcFileName); - return FIO_ERROR_FRAME_DECODING; - } /* Fill input buffer */ - { size_t const toDecode = MIN(readSizeHint, ress->srcBufferSize); /* support large skippable frames */ - if (ress->srcBufferLoaded < toDecode) { - size_t const toRead = toDecode - ress->srcBufferLoaded; /* > 0 */ - void* const startPosition = (char*)ress->srcBuffer + ress->srcBufferLoaded; - size_t const readSize = fread(startPosition, 1, toRead, finput); + { size_t const toDecode = MIN(readSizeHint, ZSTD_DStreamInSize()); /* support large skippable frames */ + if (ress->readCtx->srcBufferLoaded < toDecode) { + size_t const readSize = AIO_ReadPool_fillBuffer(ress->readCtx, toDecode); if (readSize==0) { DISPLAYLEVEL(1, "%s : Read error (39) : premature end \n", - srcFileName); + srcFileName); + AIO_WritePool_releaseIoJob(writeJob); return FIO_ERROR_FRAME_DECODING; } - ress->srcBufferLoaded += readSize; - } } } + } } } - FIO_fwriteSparseEnd(prefs, ress->dstFile, storedSkips); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return frameSize; } #ifdef ZSTD_GZDECOMPRESS -static unsigned long long FIO_decompressGzFrame(dRess_t* ress, - FILE* srcFile, const char* srcFileName) +static unsigned long long +FIO_decompressGzFrame(dRess_t* ress, const char* srcFileName) { unsigned long long outFileSize = 0; z_stream strm; int flush = Z_NO_FLUSH; int decodingError = 0; + IOJob_t *writeJob = NULL; strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = 0; strm.avail_in = 0; - /* see http://www.zlib.net/manual.html */ + /* see https://www.zlib.net/manual.html */ if (inflateInit2(&strm, 15 /* maxWindowLogSize */ + 16 /* gzip only */) != Z_OK) return FIO_ERROR_FRAME_DECODING; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; - strm.avail_in = (uInt)ress->srcBufferLoaded; - strm.next_in = (z_const unsigned char*)ress->srcBuffer; + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; + strm.avail_in = (uInt)ress->readCtx->srcBufferLoaded; + strm.next_in = (z_const unsigned char*)ress->readCtx->srcBuffer; for ( ; ; ) { int ret; if (strm.avail_in == 0) { - ress->srcBufferLoaded = fread(ress->srcBuffer, 1, ress->srcBufferSize, srcFile); - if (ress->srcBufferLoaded == 0) flush = Z_FINISH; - strm.next_in = (z_const unsigned char*)ress->srcBuffer; - strm.avail_in = (uInt)ress->srcBufferLoaded; + AIO_ReadPool_consumeAndRefill(ress->readCtx); + if (ress->readCtx->srcBufferLoaded == 0) flush = Z_FINISH; + strm.next_in = (z_const unsigned char*)ress->readCtx->srcBuffer; + strm.avail_in = (uInt)ress->readCtx->srcBufferLoaded; } ret = inflate(&strm, flush); if (ret == Z_BUF_ERROR) { @@ -1769,41 +2798,42 @@ static unsigned long long FIO_decompressGzFrame(dRess_t* ress, DISPLAYLEVEL(1, "zstd: %s: inflate error %d \n", srcFileName, ret); decodingError = 1; break; } - { size_t const decompBytes = ress->dstBufferSize - strm.avail_out; + { size_t const decompBytes = writeJob->bufferSize - strm.avail_out; if (decompBytes) { - if (fwrite(ress->dstBuffer, 1, decompBytes, ress->dstFile) != decompBytes) { - DISPLAYLEVEL(1, "zstd: %s \n", strerror(errno)); - decodingError = 1; break; - } + writeJob->usedBufferSize = decompBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += decompBytes; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; } } if (ret == Z_STREAM_END) break; } - if (strm.avail_in > 0) - memmove(ress->srcBuffer, strm.next_in, strm.avail_in); - ress->srcBufferLoaded = strm.avail_in; + AIO_ReadPool_consumeBytes(ress->readCtx, ress->readCtx->srcBufferLoaded - strm.avail_in); + if ( (inflateEnd(&strm) != Z_OK) /* release resources ; error detected */ && (decodingError==0) ) { DISPLAYLEVEL(1, "zstd: %s: inflateEnd error \n", srcFileName); decodingError = 1; } + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return decodingError ? FIO_ERROR_FRAME_DECODING : outFileSize; } #endif - #ifdef ZSTD_LZMADECOMPRESS -static unsigned long long FIO_decompressLzmaFrame(dRess_t* ress, FILE* srcFile, const char* srcFileName, int plain_lzma) +static unsigned long long +FIO_decompressLzmaFrame(dRess_t* ress, + const char* srcFileName, int plain_lzma) { unsigned long long outFileSize = 0; lzma_stream strm = LZMA_STREAM_INIT; lzma_action action = LZMA_RUN; lzma_ret initRet; int decodingError = 0; + IOJob_t *writeJob = NULL; strm.next_in = 0; strm.avail_in = 0; @@ -1820,18 +2850,19 @@ static unsigned long long FIO_decompressLzmaFrame(dRess_t* ress, FILE* srcFile, return FIO_ERROR_FRAME_DECODING; } - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; - strm.next_in = (BYTE const*)ress->srcBuffer; - strm.avail_in = ress->srcBufferLoaded; + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + strm.next_out = (BYTE*)writeJob->buffer; + strm.avail_out = writeJob->bufferSize; + strm.next_in = (BYTE const*)ress->readCtx->srcBuffer; + strm.avail_in = ress->readCtx->srcBufferLoaded; for ( ; ; ) { lzma_ret ret; if (strm.avail_in == 0) { - ress->srcBufferLoaded = fread(ress->srcBuffer, 1, ress->srcBufferSize, srcFile); - if (ress->srcBufferLoaded == 0) action = LZMA_FINISH; - strm.next_in = (BYTE const*)ress->srcBuffer; - strm.avail_in = ress->srcBufferLoaded; + AIO_ReadPool_consumeAndRefill(ress->readCtx); + if (ress->readCtx->srcBufferLoaded == 0) action = LZMA_FINISH; + strm.next_in = (BYTE const*)ress->readCtx->srcBuffer; + strm.avail_in = ress->readCtx->srcBufferLoaded; } ret = lzma_code(&strm, action); @@ -1844,103 +2875,90 @@ static unsigned long long FIO_decompressLzmaFrame(dRess_t* ress, FILE* srcFile, srcFileName, ret); decodingError = 1; break; } - { size_t const decompBytes = ress->dstBufferSize - strm.avail_out; + { size_t const decompBytes = writeJob->bufferSize - strm.avail_out; if (decompBytes) { - if (fwrite(ress->dstBuffer, 1, decompBytes, ress->dstFile) != decompBytes) { - DISPLAYLEVEL(1, "zstd: %s \n", strerror(errno)); - decodingError = 1; break; - } + writeJob->usedBufferSize = decompBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += decompBytes; - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; + strm.next_out = (BYTE*)writeJob->buffer; + strm.avail_out = writeJob->bufferSize; } } if (ret == LZMA_STREAM_END) break; } - if (strm.avail_in > 0) - memmove(ress->srcBuffer, strm.next_in, strm.avail_in); - ress->srcBufferLoaded = strm.avail_in; + AIO_ReadPool_consumeBytes(ress->readCtx, ress->readCtx->srcBufferLoaded - strm.avail_in); lzma_end(&strm); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return decodingError ? FIO_ERROR_FRAME_DECODING : outFileSize; } #endif #ifdef ZSTD_LZ4DECOMPRESS -static unsigned long long FIO_decompressLz4Frame(dRess_t* ress, - FILE* srcFile, const char* srcFileName) +static unsigned long long +FIO_decompressLz4Frame(dRess_t* ress, const char* srcFileName) { unsigned long long filesize = 0; - LZ4F_errorCode_t nextToLoad; + LZ4F_errorCode_t nextToLoad = 4; LZ4F_decompressionContext_t dCtx; LZ4F_errorCode_t const errorCode = LZ4F_createDecompressionContext(&dCtx, LZ4F_VERSION); int decodingError = 0; + IOJob_t *writeJob = NULL; if (LZ4F_isError(errorCode)) { DISPLAYLEVEL(1, "zstd: failed to create lz4 decompression context \n"); return FIO_ERROR_FRAME_DECODING; } - /* Init feed with magic number (already consumed from FILE* sFile) */ - { size_t inSize = 4; - size_t outSize= 0; - MEM_writeLE32(ress->srcBuffer, LZ4_MAGICNUMBER); - nextToLoad = LZ4F_decompress(dCtx, ress->dstBuffer, &outSize, ress->srcBuffer, &inSize, NULL); - if (LZ4F_isError(nextToLoad)) { - DISPLAYLEVEL(1, "zstd: %s: lz4 header error : %s \n", - srcFileName, LZ4F_getErrorName(nextToLoad)); - LZ4F_freeDecompressionContext(dCtx); - return FIO_ERROR_FRAME_DECODING; - } } + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); /* Main Loop */ for (;nextToLoad;) { - size_t readSize; size_t pos = 0; - size_t decodedBytes = ress->dstBufferSize; + size_t decodedBytes = writeJob->bufferSize; + int fullBufferDecoded = 0; /* Read input */ - if (nextToLoad > ress->srcBufferSize) nextToLoad = ress->srcBufferSize; - readSize = fread(ress->srcBuffer, 1, nextToLoad, srcFile); - if (!readSize) break; /* reached end of file or stream */ + AIO_ReadPool_fillBuffer(ress->readCtx, nextToLoad); + if(!ress->readCtx->srcBufferLoaded) break; /* reached end of file */ - while ((pos < readSize) || (decodedBytes == ress->dstBufferSize)) { /* still to read, or still to flush */ + while ((pos < ress->readCtx->srcBufferLoaded) || fullBufferDecoded) { /* still to read, or still to flush */ /* Decode Input (at least partially) */ - size_t remaining = readSize - pos; - decodedBytes = ress->dstBufferSize; - nextToLoad = LZ4F_decompress(dCtx, ress->dstBuffer, &decodedBytes, (char*)(ress->srcBuffer)+pos, &remaining, NULL); + size_t remaining = ress->readCtx->srcBufferLoaded - pos; + decodedBytes = writeJob->bufferSize; + nextToLoad = LZ4F_decompress(dCtx, writeJob->buffer, &decodedBytes, (char*)(ress->readCtx->srcBuffer)+pos, + &remaining, NULL); if (LZ4F_isError(nextToLoad)) { DISPLAYLEVEL(1, "zstd: %s: lz4 decompression error : %s \n", srcFileName, LZ4F_getErrorName(nextToLoad)); decodingError = 1; nextToLoad = 0; break; } pos += remaining; + assert(pos <= ress->readCtx->srcBufferLoaded); + fullBufferDecoded = decodedBytes == writeJob->bufferSize; /* Write Block */ if (decodedBytes) { - if (fwrite(ress->dstBuffer, 1, decodedBytes, ress->dstFile) != decodedBytes) { - DISPLAYLEVEL(1, "zstd: %s \n", strerror(errno)); - decodingError = 1; nextToLoad = 0; break; - } + UTIL_HumanReadableSize_t hrs; + writeJob->usedBufferSize = decodedBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); filesize += decodedBytes; - DISPLAYUPDATE(2, "\rDecompressed : %u MB ", (unsigned)(filesize>>20)); + hrs = UTIL_makeHumanReadableSize(filesize); + DISPLAYUPDATE_PROGRESS("\rDecompressed : %.*f%s ", hrs.precision, hrs.value, hrs.suffix); } if (!nextToLoad) break; } + AIO_ReadPool_consumeBytes(ress->readCtx, pos); } - /* can be out because readSize == 0, which could be an fread() error */ - if (ferror(srcFile)) { - DISPLAYLEVEL(1, "zstd: %s: read error \n", srcFileName); - decodingError=1; - } - if (nextToLoad!=0) { DISPLAYLEVEL(1, "zstd: %s: unfinished lz4 stream \n", srcFileName); decodingError=1; } LZ4F_freeDecompressionContext(dCtx); - ress->srcBufferLoaded = 0; /* LZ4F will reach exact frame boundary */ + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return decodingError ? FIO_ERROR_FRAME_DECODING : filesize; } @@ -1954,22 +2972,31 @@ static unsigned long long FIO_decompressLz4Frame(dRess_t* ress, * @return : 0 : OK * 1 : error */ -static int FIO_decompressFrames(FIO_prefs_t* const prefs, dRess_t ress, FILE* srcFile, - const char* dstFileName, const char* srcFileName) +static int FIO_decompressFrames(FIO_ctx_t* const fCtx, + dRess_t ress, const FIO_prefs_t* const prefs, + const char* dstFileName, const char* srcFileName) { unsigned readSomething = 0; unsigned long long filesize = 0; - assert(srcFile != NULL); + int passThrough = prefs->passThrough; + + if (passThrough == -1) { + /* If pass-through mode is not explicitly enabled or disabled, + * default to the legacy behavior of enabling it if we are writing + * to stdout with the overwrite flag enabled. + */ + passThrough = prefs->overwrite && !strcmp(dstFileName, stdoutmark); + } + assert(passThrough == 0 || passThrough == 1); /* for each frame */ for ( ; ; ) { /* check magic number -> version */ size_t const toRead = 4; - const BYTE* const buf = (const BYTE*)ress.srcBuffer; - if (ress.srcBufferLoaded < toRead) /* load up to 4 bytes for header */ - ress.srcBufferLoaded += fread((char*)ress.srcBuffer + ress.srcBufferLoaded, - (size_t)1, toRead - ress.srcBufferLoaded, srcFile); - if (ress.srcBufferLoaded==0) { + const BYTE* buf; + AIO_ReadPool_fillBuffer(ress.readCtx, toRead); + buf = (const BYTE*)ress.readCtx->srcBuffer; + if (ress.readCtx->srcBufferLoaded==0) { if (readSomething==0) { /* srcFile is empty (which is invalid) */ DISPLAYLEVEL(1, "zstd: %s: unexpected end of file \n", srcFileName); return 1; @@ -1977,17 +3004,20 @@ static int FIO_decompressFrames(FIO_prefs_t* const prefs, dRess_t ress, FILE* sr break; /* no more input */ } readSomething = 1; /* there is at least 1 byte in srcFile */ - if (ress.srcBufferLoaded < toRead) { + if (ress.readCtx->srcBufferLoaded < toRead) { /* not enough input to check magic number */ + if (passThrough) { + return FIO_passThrough(&ress); + } DISPLAYLEVEL(1, "zstd: %s: unknown header \n", srcFileName); return 1; } - if (ZSTD_isFrame(buf, ress.srcBufferLoaded)) { - unsigned long long const frameSize = FIO_decompressZstdFrame(prefs, &ress, srcFile, srcFileName, filesize); + if (ZSTD_isFrame(buf, ress.readCtx->srcBufferLoaded)) { + unsigned long long const frameSize = FIO_decompressZstdFrame(fCtx, &ress, prefs, srcFileName, filesize); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; } else if (buf[0] == 31 && buf[1] == 139) { /* gz magic number */ #ifdef ZSTD_GZDECOMPRESS - unsigned long long const frameSize = FIO_decompressGzFrame(&ress, srcFile, srcFileName); + unsigned long long const frameSize = FIO_decompressGzFrame(&ress, srcFileName); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; #else @@ -1997,7 +3027,7 @@ static int FIO_decompressFrames(FIO_prefs_t* const prefs, dRess_t ress, FILE* sr } else if ((buf[0] == 0xFD && buf[1] == 0x37) /* xz magic number */ || (buf[0] == 0x5D && buf[1] == 0x00)) { /* lzma header (no magic number) */ #ifdef ZSTD_LZMADECOMPRESS - unsigned long long const frameSize = FIO_decompressLzmaFrame(&ress, srcFile, srcFileName, buf[0] != 0xFD); + unsigned long long const frameSize = FIO_decompressLzmaFrame(&ress, srcFileName, buf[0] != 0xFD); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; #else @@ -2006,85 +3036,93 @@ static int FIO_decompressFrames(FIO_prefs_t* const prefs, dRess_t ress, FILE* sr #endif } else if (MEM_readLE32(buf) == LZ4_MAGICNUMBER) { #ifdef ZSTD_LZ4DECOMPRESS - unsigned long long const frameSize = FIO_decompressLz4Frame(&ress, srcFile, srcFileName); + unsigned long long const frameSize = FIO_decompressLz4Frame(&ress, srcFileName); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; #else DISPLAYLEVEL(1, "zstd: %s: lz4 file cannot be uncompressed (zstd compiled without HAVE_LZ4) -- ignored \n", srcFileName); return 1; #endif - } else if ((prefs->overwrite) && !strcmp (dstFileName, stdoutmark)) { /* pass-through mode */ - return FIO_passThrough(prefs, - ress.dstFile, srcFile, - ress.srcBuffer, ress.srcBufferSize, - ress.srcBufferLoaded); + } else if (passThrough) { + return FIO_passThrough(&ress); } else { DISPLAYLEVEL(1, "zstd: %s: unsupported format \n", srcFileName); return 1; } } /* for each frame */ /* Final Status */ - DISPLAYLEVEL(2, "\r%79s\r", ""); - DISPLAYLEVEL(2, "%-20s: %llu bytes \n", srcFileName, filesize); + fCtx->totalBytesOutput += (size_t)filesize; + DISPLAY_PROGRESS("\r%79s\r", ""); + if (FIO_shouldDisplayFileSummary(fCtx)) + DISPLAY_SUMMARY("%-20s: %llu bytes \n", srcFileName, filesize); return 0; } /** FIO_decompressDstFile() : - open `dstFileName`, - or path-through if ress.dstFile is already != 0, + open `dstFileName`, or pass-through if writeCtx's file is already != 0, then start decompression process (FIO_decompressFrames()). @return : 0 : OK 1 : operation aborted */ -static int FIO_decompressDstFile(FIO_prefs_t* const prefs, - dRess_t ress, FILE* srcFile, - const char* dstFileName, const char* srcFileName) +static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, + FIO_prefs_t* const prefs, + dRess_t ress, + const char* dstFileName, + const char* srcFileName, + const stat_t* srcFileStat) { int result; - stat_t statbuf; - int transfer_permissions = 0; int releaseDstFile = 0; + int transferStat = 0; + int dstFd = 0; + + if ((AIO_WritePool_getFile(ress.writeCtx) == NULL) && (prefs->testMode == 0)) { + FILE *dstFile; + int dstFilePermissions = DEFAULT_FILE_PERMISSIONS; + if ( strcmp(srcFileName, stdinmark) /* special case : don't transfer permissions from stdin */ + && strcmp(dstFileName, stdoutmark) + && UTIL_isRegularFileStat(srcFileStat) ) { + transferStat = 1; + dstFilePermissions = TEMPORARY_FILE_PERMISSIONS; + } - if (ress.dstFile == NULL) { releaseDstFile = 1; - ress.dstFile = FIO_openDstFile(prefs, srcFileName, dstFileName); - if (ress.dstFile==0) return 1; + dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFilePermissions); + if (dstFile==NULL) return 1; + dstFd = fileno(dstFile); + AIO_WritePool_setFile(ress.writeCtx, dstFile); /* Must only be added after FIO_openDstFile() succeeds. * Otherwise we may delete the destination file if it already exists, * and the user presses Ctrl-C when asked if they wish to overwrite. */ addHandler(dstFileName); - - if ( strcmp(srcFileName, stdinmark) /* special case : don't transfer permissions from stdin */ - && UTIL_getFileStat(srcFileName, &statbuf) ) - transfer_permissions = 1; } - - result = FIO_decompressFrames(prefs, ress, srcFile, dstFileName, srcFileName); + result = FIO_decompressFrames(fCtx, ress, prefs, dstFileName, srcFileName); if (releaseDstFile) { - FILE* const dstFile = ress.dstFile; clearHandler(); - ress.dstFile = NULL; - if (fclose(dstFile)) { + + if (transferStat) { + UTIL_setFDStat(dstFd, dstFileName, srcFileStat); + } + + if (AIO_WritePool_closeFile(ress.writeCtx)) { DISPLAYLEVEL(1, "zstd: %s: %s \n", dstFileName, strerror(errno)); result = 1; } + if (transferStat) { + UTIL_utime(dstFileName, srcFileStat); + } + if ( (result != 0) /* operation failure */ - && strcmp(dstFileName, nulmark) /* special case : don't remove() /dev/null (#316) */ && strcmp(dstFileName, stdoutmark) /* special case : don't remove() stdout */ ) { - FIO_remove(dstFileName); /* remove decompression artefact; note: don't do anything special if remove() fails */ - } else { /* operation success */ - if ( strcmp(dstFileName, stdoutmark) /* special case : don't chmod stdout */ - && strcmp(dstFileName, nulmark) /* special case : don't chmod /dev/null */ - && transfer_permissions ) /* file permissions correctly extracted from src */ - UTIL_setFileStat(dstFileName, &statbuf); /* transfer file permissions from src into dst */ + FIO_removeFile(dstFileName); /* remove decompression artefact; note: don't do anything special if remove() fails */ } } @@ -2097,21 +3135,37 @@ static int FIO_decompressDstFile(FIO_prefs_t* const prefs, @return : 0 : OK 1 : error */ -static int FIO_decompressSrcFile(FIO_prefs_t* const prefs, dRess_t ress, const char* dstFileName, const char* srcFileName) +static int FIO_decompressSrcFile(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, dRess_t ress, const char* dstFileName, const char* srcFileName) { FILE* srcFile; + stat_t srcFileStat; int result; + U64 fileSize = UTIL_FILESIZE_UNKNOWN; if (UTIL_isDirectory(srcFileName)) { DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); return 1; } - srcFile = FIO_openSrcFile(srcFileName); + srcFile = FIO_openSrcFile(prefs, srcFileName, &srcFileStat); if (srcFile==NULL) return 1; - ress.srcBufferLoaded = 0; - result = FIO_decompressDstFile(prefs, ress, srcFile, dstFileName, srcFileName); + /* Don't use AsyncIO for small files */ + if (strcmp(srcFileName, stdinmark)) /* Stdin doesn't have stats */ + fileSize = UTIL_getFileSizeStat(&srcFileStat); + if(fileSize != UTIL_FILESIZE_UNKNOWN && fileSize < ZSTD_BLOCKSIZE_MAX * 3) { + AIO_ReadPool_setAsync(ress.readCtx, 0); + AIO_WritePool_setAsync(ress.writeCtx, 0); + } else { + AIO_ReadPool_setAsync(ress.readCtx, 1); + AIO_WritePool_setAsync(ress.writeCtx, 1); + } + + AIO_ReadPool_setFile(ress.readCtx, srcFile); + + result = FIO_decompressDstFile(fCtx, prefs, ress, dstFileName, srcFileName, &srcFileStat); + + AIO_ReadPool_setFile(ress.readCtx, NULL); /* Close file */ if (fclose(srcFile)) { @@ -2125,7 +3179,7 @@ static int FIO_decompressSrcFile(FIO_prefs_t* const prefs, dRess_t ress, const c * delete both the source and destination files. */ clearHandler(); - if (FIO_remove(srcFileName)) { + if (FIO_removeFile(srcFileName)) { /* failed to remove src file */ DISPLAYLEVEL(1, "zstd: %s: %s \n", srcFileName, strerror(errno)); return 1; @@ -2135,123 +3189,213 @@ static int FIO_decompressSrcFile(FIO_prefs_t* const prefs, dRess_t ress, const c -int FIO_decompressFilename(FIO_prefs_t* const prefs, +int FIO_decompressFilename(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, const char* dstFileName, const char* srcFileName, const char* dictFileName) { dRess_t const ress = FIO_createDResources(prefs, dictFileName); - int const decodingError = FIO_decompressSrcFile(prefs, ress, dstFileName, srcFileName); + int const decodingError = FIO_decompressSrcFile(fCtx, prefs, ress, dstFileName, srcFileName); + + FIO_freeDResources(ress); return decodingError; } +static const char *suffixList[] = { + ZSTD_EXTENSION, + TZSTD_EXTENSION, +#ifndef ZSTD_NODECOMPRESS + ZSTD_ALT_EXTENSION, +#endif +#ifdef ZSTD_GZDECOMPRESS + GZ_EXTENSION, + TGZ_EXTENSION, +#endif +#ifdef ZSTD_LZMADECOMPRESS + LZMA_EXTENSION, + XZ_EXTENSION, + TXZ_EXTENSION, +#endif +#ifdef ZSTD_LZ4DECOMPRESS + LZ4_EXTENSION, + TLZ4_EXTENSION, +#endif + NULL +}; + +static const char *suffixListStr = + ZSTD_EXTENSION "/" TZSTD_EXTENSION +#ifdef ZSTD_GZDECOMPRESS + "/" GZ_EXTENSION "/" TGZ_EXTENSION +#endif +#ifdef ZSTD_LZMADECOMPRESS + "/" LZMA_EXTENSION "/" XZ_EXTENSION "/" TXZ_EXTENSION +#endif +#ifdef ZSTD_LZ4DECOMPRESS + "/" LZ4_EXTENSION "/" TLZ4_EXTENSION +#endif +; /* FIO_determineDstName() : * create a destination filename from a srcFileName. * @return a pointer to it. * @return == NULL if there is an error */ static const char* -FIO_determineDstName(const char* srcFileName) +FIO_determineDstName(const char* srcFileName, const char* outDirName) { static size_t dfnbCapacity = 0; static char* dstFileNameBuffer = NULL; /* using static allocation : this function cannot be multi-threaded */ + size_t dstFileNameEndPos; + char* outDirFilename = NULL; + const char* dstSuffix = ""; + size_t dstSuffixLen = 0; - size_t const sfnSize = strlen(srcFileName); - size_t suffixSize; - const char* const suffixPtr = strrchr(srcFileName, '.'); - if (suffixPtr == NULL) { - DISPLAYLEVEL(1, "zstd: %s: unknown suffix -- ignored \n", - srcFileName); - return NULL; + size_t sfnSize = strlen(srcFileName); + + size_t srcSuffixLen; + const char* const srcSuffix = strrchr(srcFileName, '.'); + + if(!strcmp(srcFileName, stdinmark)) { + return stdoutmark; } - suffixSize = strlen(suffixPtr); - - /* check suffix is authorized */ - if (sfnSize <= suffixSize - || ( strcmp(suffixPtr, ZSTD_EXTENSION) - #ifdef ZSTD_GZDECOMPRESS - && strcmp(suffixPtr, GZ_EXTENSION) - #endif - #ifdef ZSTD_LZMADECOMPRESS - && strcmp(suffixPtr, XZ_EXTENSION) - && strcmp(suffixPtr, LZMA_EXTENSION) - #endif - #ifdef ZSTD_LZ4DECOMPRESS - && strcmp(suffixPtr, LZ4_EXTENSION) - #endif - ) ) { - const char* suffixlist = ZSTD_EXTENSION - #ifdef ZSTD_GZDECOMPRESS - "/" GZ_EXTENSION - #endif - #ifdef ZSTD_LZMADECOMPRESS - "/" XZ_EXTENSION "/" LZMA_EXTENSION - #endif - #ifdef ZSTD_LZ4DECOMPRESS - "/" LZ4_EXTENSION - #endif - ; - DISPLAYLEVEL(1, "zstd: %s: unknown suffix (%s expected) -- ignored \n", - srcFileName, suffixlist); + + if (srcSuffix == NULL) { + DISPLAYLEVEL(1, + "zstd: %s: unknown suffix (%s expected). " + "Can't derive the output file name. " + "Specify it with -o dstFileName. Ignoring.\n", + srcFileName, suffixListStr); return NULL; } + srcSuffixLen = strlen(srcSuffix); + + { + const char** matchedSuffixPtr; + for (matchedSuffixPtr = suffixList; *matchedSuffixPtr != NULL; matchedSuffixPtr++) { + if (!strcmp(*matchedSuffixPtr, srcSuffix)) { + break; + } + } + + /* check suffix is authorized */ + if (sfnSize <= srcSuffixLen || *matchedSuffixPtr == NULL) { + DISPLAYLEVEL(1, + "zstd: %s: unknown suffix (%s expected). " + "Can't derive the output file name. " + "Specify it with -o dstFileName. Ignoring.\n", + srcFileName, suffixListStr); + return NULL; + } + + if ((*matchedSuffixPtr)[1] == 't') { + dstSuffix = ".tar"; + dstSuffixLen = strlen(dstSuffix); + } + } + + if (outDirName) { + outDirFilename = FIO_createFilename_fromOutDir(srcFileName, outDirName, 0); + sfnSize = strlen(outDirFilename); + assert(outDirFilename != NULL); + } - /* allocate enough space to write dstFilename into it */ - if (dfnbCapacity+suffixSize <= sfnSize+1) { + if (dfnbCapacity+srcSuffixLen <= sfnSize+1+dstSuffixLen) { + /* allocate enough space to write dstFilename into it */ free(dstFileNameBuffer); dfnbCapacity = sfnSize + 20; dstFileNameBuffer = (char*)malloc(dfnbCapacity); if (dstFileNameBuffer==NULL) - EXM_THROW(74, "%s : not enough memory for dstFileName", strerror(errno)); + EXM_THROW(74, "%s : not enough memory for dstFileName", + strerror(errno)); } /* return dst name == src name truncated from suffix */ assert(dstFileNameBuffer != NULL); - memcpy(dstFileNameBuffer, srcFileName, sfnSize - suffixSize); - dstFileNameBuffer[sfnSize-suffixSize] = '\0'; + dstFileNameEndPos = sfnSize - srcSuffixLen; + if (outDirFilename) { + memcpy(dstFileNameBuffer, outDirFilename, dstFileNameEndPos); + free(outDirFilename); + } else { + memcpy(dstFileNameBuffer, srcFileName, dstFileNameEndPos); + } + + /* The short tar extensions tzst, tgz, txz and tlz4 files should have "tar" + * extension on decompression. Also writes terminating null. */ + strcpy(dstFileNameBuffer + dstFileNameEndPos, dstSuffix); return dstFileNameBuffer; /* note : dstFileNameBuffer memory is not going to be free */ } - int -FIO_decompressMultipleFilenames(FIO_prefs_t* const prefs, - const char* srcNamesTable[], unsigned nbFiles, - const char* outFileName, +FIO_decompressMultipleFilenames(FIO_ctx_t* const fCtx, + FIO_prefs_t* const prefs, + const char** srcNamesTable, + const char* outMirroredRootDirName, + const char* outDirName, const char* outFileName, const char* dictFileName) { + int status; int error = 0; dRess_t ress = FIO_createDResources(prefs, dictFileName); if (outFileName) { - unsigned u; - ress.dstFile = FIO_openDstFile(prefs, NULL, outFileName); - if (ress.dstFile == 0) EXM_THROW(71, "cannot open %s", outFileName); - for (u=0; utestMode) { + FILE* dstFile = FIO_openDstFile(fCtx, prefs, NULL, outFileName, DEFAULT_FILE_PERMISSIONS); + if (dstFile == 0) EXM_THROW(19, "cannot open %s", outFileName); + AIO_WritePool_setFile(ress.writeCtx, dstFile); + } + for (; fCtx->currFileIdx < fCtx->nbFilesTotal; fCtx->currFileIdx++) { + status = FIO_decompressSrcFile(fCtx, prefs, ress, outFileName, srcNamesTable[fCtx->currFileIdx]); + if (!status) fCtx->nbFilesProcessed++; + error |= status; + } + if ((!prefs->testMode) && (AIO_WritePool_closeFile(ress.writeCtx))) EXM_THROW(72, "Write error : %s : cannot properly close output file", strerror(errno)); } else { - unsigned u; - for (u=0; unbFilesTotal, outMirroredRootDirName); + + for (; fCtx->currFileIdx < fCtx->nbFilesTotal; fCtx->currFileIdx++) { /* create dstFileName */ + const char* const srcFileName = srcNamesTable[fCtx->currFileIdx]; + const char* dstFileName = NULL; + if (outMirroredRootDirName) { + char* validMirroredDirName = UTIL_createMirroredDestDirName(srcFileName, outMirroredRootDirName); + if (validMirroredDirName) { + dstFileName = FIO_determineDstName(srcFileName, validMirroredDirName); + free(validMirroredDirName); + } else { + DISPLAYLEVEL(2, "zstd: --output-dir-mirror cannot decompress '%s' into '%s'\n", srcFileName, outMirroredRootDirName); + } + } else { + dstFileName = FIO_determineDstName(srcFileName, outDirName); + } if (dstFileName == NULL) { error=1; continue; } - - error |= FIO_decompressSrcFile(prefs, ress, dstFileName, srcFileName); + status = FIO_decompressSrcFile(fCtx, prefs, ress, dstFileName, srcFileName); + if (!status) fCtx->nbFilesProcessed++; + error |= status; } + if (outDirName) + FIO_checkFilenameCollisions(srcNamesTable , (unsigned)fCtx->nbFilesTotal); + } + + if (FIO_shouldDisplayMultipleFileSummary(fCtx)) { + DISPLAY_PROGRESS("\r%79s\r", ""); + DISPLAY_SUMMARY("%d files decompressed : %6llu bytes total \n", + fCtx->nbFilesProcessed, (unsigned long long)fCtx->totalBytesOutput); } FIO_freeDResources(ress); return error; } - - /* ************************************************************************** * .zst file info (--list command) ***************************************************************************/ @@ -2264,7 +3408,9 @@ typedef struct { int numSkippableFrames; int decompUnavailable; int usesCheck; + BYTE checksum[4]; U32 nbFiles; + unsigned dictID; } fileInfo_t; typedef enum { @@ -2272,7 +3418,7 @@ typedef enum { info_frame_error=1, info_not_zstd=2, info_file_error=3, - info_truncated_input=4, + info_truncated_input=4 } InfoError; #define ERROR_IF(c,n,...) { \ @@ -2290,7 +3436,7 @@ FIO_analyzeFrames(fileInfo_t* info, FILE* const srcFile) for ( ; ; ) { BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; size_t const numBytesRead = fread(headerBuffer, 1, sizeof(headerBuffer), srcFile); - if (numBytesRead < ZSTD_FRAMEHEADERSIZE_MIN) { + if (numBytesRead < ZSTD_FRAMEHEADERSIZE_MIN(ZSTD_f_zstd1)) { if ( feof(srcFile) && (numBytesRead == 0) && (info->compressedSize > 0) @@ -2309,7 +3455,7 @@ FIO_analyzeFrames(fileInfo_t* info, FILE* const srcFile) { U32 const magicNumber = MEM_readLE32(headerBuffer); /* Zstandard frame */ if (magicNumber == ZSTD_MAGICNUMBER) { - ZSTD_frameHeader header; + ZSTD_FrameHeader header; U64 const frameContentSize = ZSTD_getFrameContentSize(headerBuffer, numBytesRead); if ( frameContentSize == ZSTD_CONTENTSIZE_ERROR || frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN ) { @@ -2319,6 +3465,12 @@ FIO_analyzeFrames(fileInfo_t* info, FILE* const srcFile) } ERROR_IF(ZSTD_getFrameHeader(&header, headerBuffer, numBytesRead) != 0, info_frame_error, "Error: could not decode frame header"); + if (info->dictID != 0 && info->dictID != header.dictID) { + DISPLAY("WARNING: File contains multiple frames with different dictionary IDs. Showing dictID 0 instead"); + info->dictID = 0; + } else { + info->dictID = header.dictID; + } info->windowSize = header.windowSize; /* move to the end of the frame header */ { size_t const headerSize = ZSTD_frameHeaderSize(headerBuffer, numBytesRead); @@ -2351,8 +3503,8 @@ FIO_analyzeFrames(fileInfo_t* info, FILE* const srcFile) int const contentChecksumFlag = (frameHeaderDescriptor & (1 << 2)) >> 2; if (contentChecksumFlag) { info->usesCheck = 1; - ERROR_IF(fseek(srcFile, 4, SEEK_CUR) != 0, - info_frame_error, "Error: could not skip past checksum"); + ERROR_IF(fread(info->checksum, 1, 4, srcFile) != 4, + info_frame_error, "Error: could not read checksum"); } } info->numActualFrames++; } @@ -2378,10 +3530,11 @@ static InfoError getFileInfo_fileConfirmed(fileInfo_t* info, const char* inFileName) { InfoError status; - FILE* const srcFile = FIO_openSrcFile(inFileName); + stat_t srcFileStat; + FILE* const srcFile = FIO_openSrcFile(NULL, inFileName, &srcFileStat); ERROR_IF(srcFile == NULL, info_file_error, "Error: could not open source file %s", inFileName); - info->compressedSize = UTIL_getFileSize(inFileName); + info->compressedSize = UTIL_getFileSizeStat(&srcFileStat); status = FIO_analyzeFrames(info, srcFile); fclose(srcFile); @@ -2406,25 +3559,24 @@ getFileInfo(fileInfo_t* info, const char* srcFileName) static void displayInfo(const char* inFileName, const fileInfo_t* info, int displayLevel) { - unsigned const unit = info->compressedSize < (1 MB) ? (1 KB) : (1 MB); - const char* const unitStr = info->compressedSize < (1 MB) ? "KB" : "MB"; - double const windowSizeUnit = (double)info->windowSize / unit; - double const compressedSizeUnit = (double)info->compressedSize / unit; - double const decompressedSizeUnit = (double)info->decompressedSize / unit; - double const ratio = (info->compressedSize == 0) ? 0 : ((double)info->decompressedSize)/info->compressedSize; + UTIL_HumanReadableSize_t const window_hrs = UTIL_makeHumanReadableSize(info->windowSize); + UTIL_HumanReadableSize_t const compressed_hrs = UTIL_makeHumanReadableSize(info->compressedSize); + UTIL_HumanReadableSize_t const decompressed_hrs = UTIL_makeHumanReadableSize(info->decompressedSize); + double const ratio = (info->compressedSize == 0) ? 0 : ((double)info->decompressedSize)/(double)info->compressedSize; const char* const checkString = (info->usesCheck ? "XXH64" : "None"); if (displayLevel <= 2) { if (!info->decompUnavailable) { - DISPLAYOUT("%6d %5d %7.2f %2s %9.2f %2s %5.3f %5s %s\n", + DISPLAYOUT("%6d %5d %6.*f%4s %8.*f%4s %5.3f %5s %s\n", info->numSkippableFrames + info->numActualFrames, info->numSkippableFrames, - compressedSizeUnit, unitStr, decompressedSizeUnit, unitStr, + compressed_hrs.precision, compressed_hrs.value, compressed_hrs.suffix, + decompressed_hrs.precision, decompressed_hrs.value, decompressed_hrs.suffix, ratio, checkString, inFileName); } else { - DISPLAYOUT("%6d %5d %7.2f %2s %5s %s\n", + DISPLAYOUT("%6d %5d %6.*f%4s %5s %s\n", info->numSkippableFrames + info->numActualFrames, info->numSkippableFrames, - compressedSizeUnit, unitStr, + compressed_hrs.precision, compressed_hrs.value, compressed_hrs.suffix, checkString, inFileName); } } else { @@ -2432,19 +3584,29 @@ displayInfo(const char* inFileName, const fileInfo_t* info, int displayLevel) DISPLAYOUT("# Zstandard Frames: %d\n", info->numActualFrames); if (info->numSkippableFrames) DISPLAYOUT("# Skippable Frames: %d\n", info->numSkippableFrames); - DISPLAYOUT("Window Size: %.2f %2s (%llu B)\n", - windowSizeUnit, unitStr, + DISPLAYOUT("DictID: %u\n", info->dictID); + DISPLAYOUT("Window Size: %.*f%s (%llu B)\n", + window_hrs.precision, window_hrs.value, window_hrs.suffix, (unsigned long long)info->windowSize); - DISPLAYOUT("Compressed Size: %.2f %2s (%llu B)\n", - compressedSizeUnit, unitStr, + DISPLAYOUT("Compressed Size: %.*f%s (%llu B)\n", + compressed_hrs.precision, compressed_hrs.value, compressed_hrs.suffix, (unsigned long long)info->compressedSize); if (!info->decompUnavailable) { - DISPLAYOUT("Decompressed Size: %.2f %2s (%llu B)\n", - decompressedSizeUnit, unitStr, + DISPLAYOUT("Decompressed Size: %.*f%s (%llu B)\n", + decompressed_hrs.precision, decompressed_hrs.value, decompressed_hrs.suffix, (unsigned long long)info->decompressedSize); DISPLAYOUT("Ratio: %.4f\n", ratio); } - DISPLAYOUT("Check: %s\n", checkString); + + if (info->usesCheck && info->numActualFrames == 1) { + DISPLAYOUT("Check: %s %02x%02x%02x%02x\n", checkString, + info->checksum[3], info->checksum[2], + info->checksum[1], info->checksum[0] + ); + } else { + DISPLAYOUT("Check: %s\n", checkString); + } + DISPLAYOUT("\n"); } } @@ -2494,7 +3656,7 @@ FIO_listFile(fileInfo_t* total, const char* inFileName, int displayLevel) displayInfo(inFileName, &info, displayLevel); *total = FIO_addFInfo(*total, info); assert(error == info_success || error == info_frame_error); - return error; + return (int)error; } } @@ -2508,7 +3670,7 @@ int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int dis } } if (numFiles == 0) { - if (!IS_CONSOLE(stdin)) { + if (!UTIL_isConsole(stdin)) { DISPLAYLEVEL(1, "zstd: --list does not support reading from standard input \n"); } DISPLAYLEVEL(1, "No files given \n"); @@ -2528,24 +3690,23 @@ int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int dis error |= FIO_listFile(&total, filenameTable[u], displayLevel); } } if (numFiles > 1 && displayLevel <= 2) { /* display total */ - unsigned const unit = total.compressedSize < (1 MB) ? (1 KB) : (1 MB); - const char* const unitStr = total.compressedSize < (1 MB) ? "KB" : "MB"; - double const compressedSizeUnit = (double)total.compressedSize / unit; - double const decompressedSizeUnit = (double)total.decompressedSize / unit; - double const ratio = (total.compressedSize == 0) ? 0 : ((double)total.decompressedSize)/total.compressedSize; + UTIL_HumanReadableSize_t const compressed_hrs = UTIL_makeHumanReadableSize(total.compressedSize); + UTIL_HumanReadableSize_t const decompressed_hrs = UTIL_makeHumanReadableSize(total.decompressedSize); + double const ratio = (total.compressedSize == 0) ? 0 : ((double)total.decompressedSize)/(double)total.compressedSize; const char* const checkString = (total.usesCheck ? "XXH64" : ""); DISPLAYOUT("----------------------------------------------------------------- \n"); if (total.decompUnavailable) { - DISPLAYOUT("%6d %5d %7.2f %2s %5s %u files\n", + DISPLAYOUT("%6d %5d %6.*f%4s %5s %u files\n", total.numSkippableFrames + total.numActualFrames, total.numSkippableFrames, - compressedSizeUnit, unitStr, + compressed_hrs.precision, compressed_hrs.value, compressed_hrs.suffix, checkString, (unsigned)total.nbFiles); } else { - DISPLAYOUT("%6d %5d %7.2f %2s %9.2f %2s %5.3f %5s %u files\n", + DISPLAYOUT("%6d %5d %6.*f%4s %8.*f%4s %5.3f %5s %u files\n", total.numSkippableFrames + total.numActualFrames, total.numSkippableFrames, - compressedSizeUnit, unitStr, decompressedSizeUnit, unitStr, + compressed_hrs.precision, compressed_hrs.value, compressed_hrs.suffix, + decompressed_hrs.precision, decompressed_hrs.value, decompressed_hrs.suffix, ratio, checkString, (unsigned)total.nbFiles); } } return error; diff --git a/programs/fileio.h b/programs/fileio.h index e46633752f4..5d7334ef5f0 100644 --- a/programs/fileio.h +++ b/programs/fileio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,13 +12,10 @@ #ifndef FILEIO_H_23981798732 #define FILEIO_H_23981798732 +#include "fileio_types.h" +#include "util.h" /* FileNamesTable */ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ -#include "zstd.h" /* ZSTD_* */ - -#if defined (__cplusplus) -extern "C" { -#endif - +#include "../lib/zstd.h" /* ZSTD_* */ /* ************************************* * Special i/o constants @@ -26,38 +23,54 @@ extern "C" { #define stdinmark "/*stdin*\\" #define stdoutmark "/*stdout*\\" #ifdef _WIN32 -# define nulmark "nul" +# define nulmark "NUL" #else # define nulmark "/dev/null" #endif + +/** + * We test whether the extension we found starts with 't', and if so, we append + * ".tar" to the end of the output name. + */ #define LZMA_EXTENSION ".lzma" #define XZ_EXTENSION ".xz" +#define TXZ_EXTENSION ".txz" + #define GZ_EXTENSION ".gz" +#define TGZ_EXTENSION ".tgz" + #define ZSTD_EXTENSION ".zst" +#define TZSTD_EXTENSION ".tzst" +#define ZSTD_ALT_EXTENSION ".zstd" /* allow decompression of .zstd files */ + #define LZ4_EXTENSION ".lz4" +#define TLZ4_EXTENSION ".tlz4" /*-************************************* * Types ***************************************/ -typedef enum { FIO_zstdCompression, FIO_gzipCompression, FIO_xzCompression, FIO_lzmaCompression, FIO_lz4Compression } FIO_compressionType_t; - -typedef struct FIO_prefs_s FIO_prefs_t; - FIO_prefs_t* FIO_createPreferences(void); void FIO_freePreferences(FIO_prefs_t* const prefs); -typedef struct FIO_display_prefs_s FIO_display_prefs_t; +/* Mutable struct containing relevant context and state regarding (de)compression with respect to file I/O */ +typedef struct FIO_ctx_s FIO_ctx_t; + +FIO_ctx_t* FIO_createContext(void); +void FIO_freeContext(FIO_ctx_t* const fCtx); + /*-************************************* * Parameters ***************************************/ +/* FIO_prefs_t functions */ void FIO_setCompressionType(FIO_prefs_t* const prefs, FIO_compressionType_t compressionType); void FIO_overwriteMode(FIO_prefs_t* const prefs); -void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, unsigned adapt); +void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, int adapt); void FIO_setAdaptMin(FIO_prefs_t* const prefs, int minCLevel); void FIO_setAdaptMax(FIO_prefs_t* const prefs, int maxCLevel); -void FIO_setBlockSize(FIO_prefs_t* const prefs, int blockSize); +void FIO_setUseRowMatchFinder(FIO_prefs_t* const prefs, int useRowMatchFinder); +void FIO_setJobSize(FIO_prefs_t* const prefs, int jobSize); void FIO_setChecksumFlag(FIO_prefs_t* const prefs, int checksumFlag); void FIO_setDictIDFlag(FIO_prefs_t* const prefs, int dictIDFlag); void FIO_setLdmBucketSizeLog(FIO_prefs_t* const prefs, int ldmBucketSizeLog); @@ -68,28 +81,46 @@ void FIO_setLdmMinMatch(FIO_prefs_t* const prefs, int ldmMinMatch); void FIO_setMemLimit(FIO_prefs_t* const prefs, unsigned memLimit); void FIO_setNbWorkers(FIO_prefs_t* const prefs, int nbWorkers); void FIO_setOverlapLog(FIO_prefs_t* const prefs, int overlapLog); -void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, unsigned flag); -void FIO_setSparseWrite(FIO_prefs_t* const prefs, unsigned sparse); /**< 0: no sparse; 1: disable on stdout; 2: always enabled */ +void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, int flag); +void FIO_setSparseWrite(FIO_prefs_t* const prefs, int sparse); /**< 0: no sparse; 1: disable on stdout; 2: always enabled */ void FIO_setRsyncable(FIO_prefs_t* const prefs, int rsyncable); +void FIO_setStreamSrcSize(FIO_prefs_t* const prefs, size_t streamSrcSize); +void FIO_setTargetCBlockSize(FIO_prefs_t* const prefs, size_t targetCBlockSize); +void FIO_setSrcSizeHint(FIO_prefs_t* const prefs, size_t srcSizeHint); +void FIO_setTestMode(FIO_prefs_t* const prefs, int testMode); void FIO_setLiteralCompressionMode( FIO_prefs_t* const prefs, - ZSTD_literalCompressionMode_e mode); + ZSTD_ParamSwitch_e mode); -void FIO_setNoProgress(unsigned noProgress); +void FIO_setProgressSetting(FIO_progressSetting_e progressSetting); void FIO_setNotificationLevel(int level); +void FIO_setExcludeCompressedFile(FIO_prefs_t* const prefs, int excludeCompressedFiles); +void FIO_setAllowBlockDevices(FIO_prefs_t* const prefs, int allowBlockDevices); +void FIO_setPatchFromMode(FIO_prefs_t* const prefs, int value); +void FIO_setContentSize(FIO_prefs_t* const prefs, int value); +void FIO_displayCompressionParameters(const FIO_prefs_t* prefs); +void FIO_setAsyncIOFlag(FIO_prefs_t* const prefs, int value); +void FIO_setPassThroughFlag(FIO_prefs_t* const prefs, int value); +void FIO_setMMapDict(FIO_prefs_t* const prefs, ZSTD_ParamSwitch_e value); + +/* FIO_ctx_t functions */ +void FIO_setNbFilesTotal(FIO_ctx_t* const fCtx, int value); +void FIO_setHasStdoutOutput(FIO_ctx_t* const fCtx, int value); +void FIO_determineHasStdinInput(FIO_ctx_t* const fCtx, const FileNamesTable* const filenames); /*-************************************* * Single File functions ***************************************/ /** FIO_compressFilename() : - @return : 0 == ok; 1 == pb with src file. */ -int FIO_compressFilename (FIO_prefs_t* const prefs, - const char* outfilename, const char* infilename, const char* dictFileName, - int compressionLevel, ZSTD_compressionParameters comprParams); + * @return : 0 == ok; 1 == pb with src file. */ +int FIO_compressFilename (FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, + const char* outfilename, const char* infilename, + const char* dictFileName, int compressionLevel, + ZSTD_compressionParameters comprParams); /** FIO_decompressFilename() : - @return : 0 == ok; 1 == pb with src file. */ -int FIO_decompressFilename (FIO_prefs_t* const prefs, + * @return : 0 == ok; 1 == pb with src file. */ +int FIO_decompressFilename (FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, const char* outfilename, const char* infilename, const char* dictFileName); int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int displayLevel); @@ -99,20 +130,32 @@ int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int dis * Multiple File functions ***************************************/ /** FIO_compressMultipleFilenames() : - @return : nb of missing files */ -int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs, - const char** srcNamesTable, unsigned nbFiles, + * @return : nb of missing files */ +int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, + FIO_prefs_t* const prefs, + const char** inFileNamesTable, + const char* outMirroredDirName, + const char* outDirName, const char* outFileName, const char* suffix, const char* dictFileName, int compressionLevel, ZSTD_compressionParameters comprParams); /** FIO_decompressMultipleFilenames() : - @return : nb of missing or skipped files */ -int FIO_decompressMultipleFilenames(FIO_prefs_t* const prefs, - const char** srcNamesTable, unsigned nbFiles, + * @return : nb of missing or skipped files */ +int FIO_decompressMultipleFilenames(FIO_ctx_t* const fCtx, + FIO_prefs_t* const prefs, + const char** srcNamesTable, + const char* outMirroredDirName, + const char* outDirName, const char* outFileName, const char* dictFileName); +/* FIO_checkFilenameCollisions() : + * Checks for and warns if there are any files that would have the same output path + */ +int FIO_checkFilenameCollisions(const char** filenameTable, unsigned nbFiles); + + /*-************************************* * Advanced stuff (should actually be hosted elsewhere) @@ -121,10 +164,8 @@ int FIO_decompressMultipleFilenames(FIO_prefs_t* const prefs, /* custom crash signal handler */ void FIO_addAbortHandler(void); - - -#if defined (__cplusplus) -} -#endif +char const* FIO_zlibVersion(void); +char const* FIO_lz4Version(void); +char const* FIO_lzmaVersion(void); #endif /* FILEIO_H_23981798732 */ diff --git a/programs/fileio_asyncio.c b/programs/fileio_asyncio.c new file mode 100644 index 00000000000..42a47201656 --- /dev/null +++ b/programs/fileio_asyncio.c @@ -0,0 +1,663 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "platform.h" +#include /* fprintf, open, fdopen, fread, _fileno, stdin, stdout */ +#include /* malloc, free */ +#include +#include /* errno */ + +#if defined (_MSC_VER) +# include +# include +#endif + +#include "fileio_asyncio.h" +#include "fileio_common.h" + +/* ********************************************************************** + * Sparse write + ************************************************************************/ + +/** AIO_fwriteSparse() : +* @return : storedSkips, +* argument for next call to AIO_fwriteSparse() or AIO_fwriteSparseEnd() */ +static unsigned +AIO_fwriteSparse(FILE* file, + const void* buffer, size_t bufferSize, + const FIO_prefs_t* const prefs, + unsigned storedSkips) +{ + const size_t* const bufferT = (const size_t*)buffer; /* Buffer is supposed malloc'ed, hence aligned on size_t */ + size_t bufferSizeT = bufferSize / sizeof(size_t); + const size_t* const bufferTEnd = bufferT + bufferSizeT; + const size_t* ptrT = bufferT; + static const size_t segmentSizeT = (32 KB) / sizeof(size_t); /* check every 32 KB */ + + if (prefs->testMode) return 0; /* do not output anything in test mode */ + + if (!prefs->sparseFileSupport) { /* normal write */ + size_t const sizeCheck = fwrite(buffer, 1, bufferSize, file); + if (sizeCheck != bufferSize) + EXM_THROW(70, "Write error : cannot write block : %s", + strerror(errno)); + return 0; + } + + /* avoid int overflow */ + if (storedSkips > 1 GB) { + if (LONG_SEEK(file, 1 GB, SEEK_CUR) != 0) + EXM_THROW(91, "1 GB skip error (sparse file support)"); + storedSkips -= 1 GB; + } + + while (ptrT < bufferTEnd) { + size_t nb0T; + + /* adjust last segment if < 32 KB */ + size_t seg0SizeT = segmentSizeT; + if (seg0SizeT > bufferSizeT) seg0SizeT = bufferSizeT; + bufferSizeT -= seg0SizeT; + + /* count leading zeroes */ + for (nb0T=0; (nb0T < seg0SizeT) && (ptrT[nb0T] == 0); nb0T++) ; + storedSkips += (unsigned)(nb0T * sizeof(size_t)); + + if (nb0T != seg0SizeT) { /* not all 0s */ + size_t const nbNon0ST = seg0SizeT - nb0T; + /* skip leading zeros */ + if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) + EXM_THROW(92, "Sparse skip error ; try --no-sparse"); + storedSkips = 0; + /* write the rest */ + if (fwrite(ptrT + nb0T, sizeof(size_t), nbNon0ST, file) != nbNon0ST) + EXM_THROW(93, "Write error : cannot write block : %s", + strerror(errno)); + } + ptrT += seg0SizeT; + } + + { static size_t const maskT = sizeof(size_t)-1; + if (bufferSize & maskT) { + /* size not multiple of sizeof(size_t) : implies end of block */ + const char* const restStart = (const char*)bufferTEnd; + const char* restPtr = restStart; + const char* const restEnd = (const char*)buffer + bufferSize; + assert(restEnd > restStart && restEnd < restStart + sizeof(size_t)); + for ( ; (restPtr < restEnd) && (*restPtr == 0); restPtr++) ; + storedSkips += (unsigned) (restPtr - restStart); + if (restPtr != restEnd) { + /* not all remaining bytes are 0 */ + size_t const restSize = (size_t)(restEnd - restPtr); + if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) + EXM_THROW(92, "Sparse skip error ; try --no-sparse"); + if (fwrite(restPtr, 1, restSize, file) != restSize) + EXM_THROW(95, "Write error : cannot write end of decoded block : %s", + strerror(errno)); + storedSkips = 0; + } } } + + return storedSkips; +} + +static void +AIO_fwriteSparseEnd(const FIO_prefs_t* const prefs, FILE* file, unsigned storedSkips) +{ + if (prefs->testMode) assert(storedSkips == 0); + if (storedSkips>0) { + assert(prefs->sparseFileSupport > 0); /* storedSkips>0 implies sparse support is enabled */ + (void)prefs; /* assert can be disabled, in which case prefs becomes unused */ + if (LONG_SEEK(file, storedSkips-1, SEEK_CUR) != 0) + EXM_THROW(69, "Final skip error (sparse file support)"); + /* last zero must be explicitly written, + * so that skipped ones get implicitly translated as zero by FS */ + { const char lastZeroByte[1] = { 0 }; + if (fwrite(lastZeroByte, 1, 1, file) != 1) + EXM_THROW(69, "Write error : cannot write last zero : %s", strerror(errno)); + } } +} + + +/* ********************************************************************** + * AsyncIO functionality + ************************************************************************/ + +/* AIO_supported: + * Returns 1 if AsyncIO is supported on the system, 0 otherwise. */ +int AIO_supported(void) { +#ifdef ZSTD_MULTITHREAD + return 1; +#else + return 0; +#endif +} + +/* *********************************** + * Generic IoPool implementation + *************************************/ + +static IOJob_t *AIO_IOPool_createIoJob(IOPoolCtx_t *ctx, size_t bufferSize) { + IOJob_t* const job = (IOJob_t*) malloc(sizeof(IOJob_t)); + void* const buffer = malloc(bufferSize); + if(!job || !buffer) + EXM_THROW(101, "Allocation error : not enough memory"); + job->buffer = buffer; + job->bufferSize = bufferSize; + job->usedBufferSize = 0; + job->file = NULL; + job->ctx = ctx; + job->offset = 0; + return job; +} + + +/* AIO_IOPool_createThreadPool: + * Creates a thread pool and a mutex for threaded IO pool. + * Displays warning if asyncio is requested but MT isn't available. */ +static void AIO_IOPool_createThreadPool(IOPoolCtx_t* ctx, const FIO_prefs_t* prefs) { + ctx->threadPool = NULL; + ctx->threadPoolActive = 0; + if(prefs->asyncIO) { + if (ZSTD_pthread_mutex_init(&ctx->ioJobsMutex, NULL)) + EXM_THROW(102,"Failed creating ioJobsMutex mutex"); + /* We want MAX_IO_JOBS-2 queue items because we need to always have 1 free buffer to + * decompress into and 1 buffer that's actively written to disk and owned by the writing thread. */ + assert(MAX_IO_JOBS >= 2); + ctx->threadPool = POOL_create(1, MAX_IO_JOBS - 2); + ctx->threadPoolActive = 1; + if (!ctx->threadPool) + EXM_THROW(104, "Failed creating I/O thread pool"); + } +} + +/* AIO_IOPool_init: + * Allocates and sets and a new I/O thread pool including its included availableJobs. */ +static void AIO_IOPool_init(IOPoolCtx_t* ctx, const FIO_prefs_t* prefs, POOL_function poolFunction, size_t bufferSize) { + int i; + AIO_IOPool_createThreadPool(ctx, prefs); + ctx->prefs = prefs; + ctx->poolFunction = poolFunction; + ctx->totalIoJobs = ctx->threadPool ? MAX_IO_JOBS : 2; + ctx->availableJobsCount = ctx->totalIoJobs; + for(i=0; i < ctx->availableJobsCount; i++) { + ctx->availableJobs[i] = AIO_IOPool_createIoJob(ctx, bufferSize); + } + ctx->jobBufferSize = bufferSize; + ctx->file = NULL; +} + + +/* AIO_IOPool_threadPoolActive: + * Check if current operation uses thread pool. + * Note that in some cases we have a thread pool initialized but choose not to use it. */ +static int AIO_IOPool_threadPoolActive(IOPoolCtx_t* ctx) { + return ctx->threadPool && ctx->threadPoolActive; +} + + +/* AIO_IOPool_lockJobsMutex: + * Locks the IO jobs mutex if threading is active */ +static void AIO_IOPool_lockJobsMutex(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + ZSTD_pthread_mutex_lock(&ctx->ioJobsMutex); +} + +/* AIO_IOPool_unlockJobsMutex: + * Unlocks the IO jobs mutex if threading is active */ +static void AIO_IOPool_unlockJobsMutex(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + ZSTD_pthread_mutex_unlock(&ctx->ioJobsMutex); +} + +/* AIO_IOPool_releaseIoJob: + * Releases an acquired job back to the pool. Doesn't execute the job. */ +static void AIO_IOPool_releaseIoJob(IOJob_t* job) { + IOPoolCtx_t* const ctx = (IOPoolCtx_t *) job->ctx; + AIO_IOPool_lockJobsMutex(ctx); + assert(ctx->availableJobsCount < ctx->totalIoJobs); + ctx->availableJobs[ctx->availableJobsCount++] = job; + AIO_IOPool_unlockJobsMutex(ctx); +} + +/* AIO_IOPool_join: + * Waits for all tasks in the pool to finish executing. */ +static void AIO_IOPool_join(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + POOL_joinJobs(ctx->threadPool); +} + +/* AIO_IOPool_setThreaded: + * Allows (de)activating threaded mode, to be used when the expected overhead + * of threading costs more than the expected gains. */ +static void AIO_IOPool_setThreaded(IOPoolCtx_t* ctx, int threaded) { + assert(threaded == 0 || threaded == 1); + assert(ctx != NULL); + if(ctx->threadPoolActive != threaded) { + AIO_IOPool_join(ctx); + ctx->threadPoolActive = threaded; + } +} + +/* AIO_IOPool_free: + * Release a previously allocated IO thread pool. Makes sure all tasks are done and released. */ +static void AIO_IOPool_destroy(IOPoolCtx_t* ctx) { + int i; + if(ctx->threadPool) { + /* Make sure we finish all tasks and then free the resources */ + AIO_IOPool_join(ctx); + /* Make sure we are not leaking availableJobs */ + assert(ctx->availableJobsCount == ctx->totalIoJobs); + POOL_free(ctx->threadPool); + ZSTD_pthread_mutex_destroy(&ctx->ioJobsMutex); + } + assert(ctx->file == NULL); + for(i=0; iavailableJobsCount; i++) { + IOJob_t* job = (IOJob_t*) ctx->availableJobs[i]; + free(job->buffer); + free(job); + } +} + +/* AIO_IOPool_acquireJob: + * Returns an available io job to be used for a future io. */ +static IOJob_t* AIO_IOPool_acquireJob(IOPoolCtx_t* ctx) { + IOJob_t* job; + assert(ctx->file != NULL || ctx->prefs->testMode); + AIO_IOPool_lockJobsMutex(ctx); + assert(ctx->availableJobsCount > 0); + job = (IOJob_t*) ctx->availableJobs[--ctx->availableJobsCount]; + AIO_IOPool_unlockJobsMutex(ctx); + job->usedBufferSize = 0; + job->file = ctx->file; + job->offset = 0; + return job; +} + + +/* AIO_IOPool_setFile: + * Sets the destination file for future files in the pool. + * Requires completion of all queued jobs and release of all otherwise acquired jobs. */ +static void AIO_IOPool_setFile(IOPoolCtx_t* ctx, FILE* file) { + assert(ctx!=NULL); + AIO_IOPool_join(ctx); + assert(ctx->availableJobsCount == ctx->totalIoJobs); + ctx->file = file; +} + +static FILE* AIO_IOPool_getFile(const IOPoolCtx_t* ctx) { + return ctx->file; +} + +/* AIO_IOPool_enqueueJob: + * Enqueues an io job for execution. + * The queued job shouldn't be used directly after queueing it. */ +static void AIO_IOPool_enqueueJob(IOJob_t* job) { + IOPoolCtx_t* const ctx = (IOPoolCtx_t *)job->ctx; + if(AIO_IOPool_threadPoolActive(ctx)) + POOL_add(ctx->threadPool, ctx->poolFunction, job); + else + ctx->poolFunction(job); +} + +/* *********************************** + * WritePool implementation + *************************************/ + +/* AIO_WritePool_acquireJob: + * Returns an available write job to be used for a future write. */ +IOJob_t* AIO_WritePool_acquireJob(WritePoolCtx_t* ctx) { + return AIO_IOPool_acquireJob(&ctx->base); +} + +/* AIO_WritePool_enqueueAndReacquireWriteJob: + * Queues a write job for execution and acquires a new one. + * After execution `job`'s pointed value would change to the newly acquired job. + * Make sure to set `usedBufferSize` to the wanted length before call. + * The queued job shouldn't be used directly after queueing it. */ +void AIO_WritePool_enqueueAndReacquireWriteJob(IOJob_t **job) { + AIO_IOPool_enqueueJob(*job); + *job = AIO_IOPool_acquireJob((IOPoolCtx_t *)(*job)->ctx); +} + +/* AIO_WritePool_sparseWriteEnd: + * Ends sparse writes to the current file. + * Blocks on completion of all current write jobs before executing. */ +void AIO_WritePool_sparseWriteEnd(WritePoolCtx_t* ctx) { + assert(ctx != NULL); + AIO_IOPool_join(&ctx->base); + AIO_fwriteSparseEnd(ctx->base.prefs, ctx->base.file, ctx->storedSkips); + ctx->storedSkips = 0; +} + +/* AIO_WritePool_setFile: + * Sets the destination file for future writes in the pool. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. + * Also requires ending of sparse write if a previous file was used in sparse mode. */ +void AIO_WritePool_setFile(WritePoolCtx_t* ctx, FILE* file) { + AIO_IOPool_setFile(&ctx->base, file); + assert(ctx->storedSkips == 0); +} + +/* AIO_WritePool_getFile: + * Returns the file the writePool is currently set to write to. */ +FILE* AIO_WritePool_getFile(const WritePoolCtx_t* ctx) { + return AIO_IOPool_getFile(&ctx->base); +} + +/* AIO_WritePool_releaseIoJob: + * Releases an acquired job back to the pool. Doesn't execute the job. */ +void AIO_WritePool_releaseIoJob(IOJob_t* job) { + AIO_IOPool_releaseIoJob(job); +} + +/* AIO_WritePool_closeFile: + * Ends sparse write and closes the writePool's current file and sets the file to NULL. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. */ +int AIO_WritePool_closeFile(WritePoolCtx_t* ctx) { + FILE* const dstFile = ctx->base.file; + assert(dstFile!=NULL || ctx->base.prefs->testMode!=0); + AIO_WritePool_sparseWriteEnd(ctx); + AIO_IOPool_setFile(&ctx->base, NULL); + return fclose(dstFile); +} + +/* AIO_WritePool_executeWriteJob: + * Executes a write job synchronously. Can be used as a function for a thread pool. */ +static void AIO_WritePool_executeWriteJob(void* opaque){ + IOJob_t* const job = (IOJob_t*) opaque; + WritePoolCtx_t* const ctx = (WritePoolCtx_t*) job->ctx; + ctx->storedSkips = AIO_fwriteSparse(job->file, job->buffer, job->usedBufferSize, ctx->base.prefs, ctx->storedSkips); + AIO_IOPool_releaseIoJob(job); +} + +/* AIO_WritePool_create: + * Allocates and sets and a new write pool including its included jobs. */ +WritePoolCtx_t* AIO_WritePool_create(const FIO_prefs_t* prefs, size_t bufferSize) { + WritePoolCtx_t* const ctx = (WritePoolCtx_t*) malloc(sizeof(WritePoolCtx_t)); + if(!ctx) EXM_THROW(100, "Allocation error : not enough memory"); + AIO_IOPool_init(&ctx->base, prefs, AIO_WritePool_executeWriteJob, bufferSize); + ctx->storedSkips = 0; + return ctx; +} + +/* AIO_WritePool_free: + * Frees and releases a writePool and its resources. Closes destination file if needs to. */ +void AIO_WritePool_free(WritePoolCtx_t* ctx) { + /* Make sure we finish all tasks and then free the resources */ + if(AIO_WritePool_getFile(ctx)) + AIO_WritePool_closeFile(ctx); + AIO_IOPool_destroy(&ctx->base); + assert(ctx->storedSkips==0); + free(ctx); +} + +/* AIO_WritePool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_WritePool_setAsync(WritePoolCtx_t* ctx, int async) { + AIO_IOPool_setThreaded(&ctx->base, async); +} + + +/* *********************************** + * ReadPool implementation + *************************************/ +static void AIO_ReadPool_releaseAllCompletedJobs(ReadPoolCtx_t* ctx) { + int i; + for(i=0; icompletedJobsCount; i++) { + IOJob_t* job = (IOJob_t*) ctx->completedJobs[i]; + AIO_IOPool_releaseIoJob(job); + } + ctx->completedJobsCount = 0; +} + +static void AIO_ReadPool_addJobToCompleted(IOJob_t* job) { + ReadPoolCtx_t* const ctx = (ReadPoolCtx_t *)job->ctx; + AIO_IOPool_lockJobsMutex(&ctx->base); + assert(ctx->completedJobsCount < MAX_IO_JOBS); + ctx->completedJobs[ctx->completedJobsCount++] = job; + if(AIO_IOPool_threadPoolActive(&ctx->base)) { + ZSTD_pthread_cond_signal(&ctx->jobCompletedCond); + } + AIO_IOPool_unlockJobsMutex(&ctx->base); +} + +/* AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked: + * Looks through the completed jobs for a job matching the waitingOnOffset and returns it, + * if job wasn't found returns NULL. + * IMPORTANT: assumes ioJobsMutex is locked. */ +static IOJob_t* AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ReadPoolCtx_t* ctx) { + IOJob_t *job = NULL; + int i; + /* This implementation goes through all completed jobs and looks for the one matching the next offset. + * While not strictly needed for a single threaded reader implementation (as in such a case we could expect + * reads to be completed in order) this implementation was chosen as it better fits other asyncio + * interfaces (such as io_uring) that do not provide promises regarding order of completion. */ + for (i=0; icompletedJobsCount; i++) { + job = (IOJob_t *) ctx->completedJobs[i]; + if (job->offset == ctx->waitingOnOffset) { + ctx->completedJobs[i] = ctx->completedJobs[--ctx->completedJobsCount]; + return job; + } + } + return NULL; +} + +/* AIO_ReadPool_numReadsInFlight: + * Returns the number of IO read jobs currently in flight. */ +static size_t AIO_ReadPool_numReadsInFlight(ReadPoolCtx_t* ctx) { + const int jobsHeld = (ctx->currentJobHeld==NULL ? 0 : 1); + return (size_t)(ctx->base.totalIoJobs - (ctx->base.availableJobsCount + ctx->completedJobsCount + jobsHeld)); +} + +/* AIO_ReadPool_getNextCompletedJob: + * Returns a completed IOJob_t for the next read in line based on waitingOnOffset and advances waitingOnOffset. + * Would block. */ +static IOJob_t* AIO_ReadPool_getNextCompletedJob(ReadPoolCtx_t* ctx) { + IOJob_t *job = NULL; + AIO_IOPool_lockJobsMutex(&ctx->base); + + job = AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ctx); + + /* As long as we didn't find the job matching the next read, and we have some reads in flight continue waiting */ + while (!job && (AIO_ReadPool_numReadsInFlight(ctx) > 0)) { + assert(ctx->base.threadPool != NULL); /* we shouldn't be here if we work in sync mode */ + ZSTD_pthread_cond_wait(&ctx->jobCompletedCond, &ctx->base.ioJobsMutex); + job = AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ctx); + } + + if(job) { + assert(job->offset == ctx->waitingOnOffset); + ctx->waitingOnOffset += job->usedBufferSize; + } + + AIO_IOPool_unlockJobsMutex(&ctx->base); + return job; +} + + +/* AIO_ReadPool_executeReadJob: + * Executes a read job synchronously. Can be used as a function for a thread pool. */ +static void AIO_ReadPool_executeReadJob(void* opaque){ + IOJob_t* const job = (IOJob_t*) opaque; + ReadPoolCtx_t* const ctx = (ReadPoolCtx_t *)job->ctx; + if(ctx->reachedEof) { + job->usedBufferSize = 0; + AIO_ReadPool_addJobToCompleted(job); + return; + } + job->usedBufferSize = fread(job->buffer, 1, job->bufferSize, job->file); + if(job->usedBufferSize < job->bufferSize) { + if(ferror(job->file)) { + EXM_THROW(37, "Read error"); + } else if(feof(job->file)) { + ctx->reachedEof = 1; + } else { + EXM_THROW(37, "Unexpected short read"); + } + } + AIO_ReadPool_addJobToCompleted(job); +} + +static void AIO_ReadPool_enqueueRead(ReadPoolCtx_t* ctx) { + IOJob_t* const job = AIO_IOPool_acquireJob(&ctx->base); + job->offset = ctx->nextReadOffset; + ctx->nextReadOffset += job->bufferSize; + AIO_IOPool_enqueueJob(job); +} + +static void AIO_ReadPool_startReading(ReadPoolCtx_t* ctx) { + while(ctx->base.availableJobsCount) { + AIO_ReadPool_enqueueRead(ctx); + } +} + +/* AIO_ReadPool_setFile: + * Sets the source file for future read in the pool. Initiates reading immediately if file is not NULL. + * Waits for all current enqueued tasks to complete if a previous file was set. */ +void AIO_ReadPool_setFile(ReadPoolCtx_t* ctx, FILE* file) { + assert(ctx!=NULL); + AIO_IOPool_join(&ctx->base); + AIO_ReadPool_releaseAllCompletedJobs(ctx); + if (ctx->currentJobHeld) { + AIO_IOPool_releaseIoJob((IOJob_t *)ctx->currentJobHeld); + ctx->currentJobHeld = NULL; + } + AIO_IOPool_setFile(&ctx->base, file); + ctx->nextReadOffset = 0; + ctx->waitingOnOffset = 0; + ctx->srcBuffer = ctx->coalesceBuffer; + ctx->srcBufferLoaded = 0; + ctx->reachedEof = 0; + if(file != NULL) + AIO_ReadPool_startReading(ctx); +} + +/* AIO_ReadPool_create: + * Allocates and sets and a new readPool including its included jobs. + * bufferSize should be set to the maximal buffer we want to read at a time, will also be used + * as our basic read size. */ +ReadPoolCtx_t* AIO_ReadPool_create(const FIO_prefs_t* prefs, size_t bufferSize) { + ReadPoolCtx_t* const ctx = (ReadPoolCtx_t*) malloc(sizeof(ReadPoolCtx_t)); + if(!ctx) EXM_THROW(100, "Allocation error : not enough memory"); + AIO_IOPool_init(&ctx->base, prefs, AIO_ReadPool_executeReadJob, bufferSize); + + ctx->coalesceBuffer = (U8*) malloc(bufferSize * 2); + if(!ctx->coalesceBuffer) EXM_THROW(100, "Allocation error : not enough memory"); + ctx->srcBuffer = ctx->coalesceBuffer; + ctx->srcBufferLoaded = 0; + ctx->completedJobsCount = 0; + ctx->currentJobHeld = NULL; + + if(ctx->base.threadPool) + if (ZSTD_pthread_cond_init(&ctx->jobCompletedCond, NULL)) + EXM_THROW(103,"Failed creating jobCompletedCond cond"); + + return ctx; +} + +/* AIO_ReadPool_free: + * Frees and releases a readPool and its resources. Closes source file. */ +void AIO_ReadPool_free(ReadPoolCtx_t* ctx) { + if(AIO_ReadPool_getFile(ctx)) + AIO_ReadPool_closeFile(ctx); + if(ctx->base.threadPool) + ZSTD_pthread_cond_destroy(&ctx->jobCompletedCond); + AIO_IOPool_destroy(&ctx->base); + free(ctx->coalesceBuffer); + free(ctx); +} + +/* AIO_ReadPool_consumeBytes: + * Consumes byes from srcBuffer's beginning and updates srcBufferLoaded accordingly. */ +void AIO_ReadPool_consumeBytes(ReadPoolCtx_t* ctx, size_t n) { + assert(n <= ctx->srcBufferLoaded); + ctx->srcBufferLoaded -= n; + ctx->srcBuffer += n; +} + +/* AIO_ReadPool_releaseCurrentlyHeldAndGetNext: + * Release the current held job and get the next one, returns NULL if no next job available. */ +static IOJob_t* AIO_ReadPool_releaseCurrentHeldAndGetNext(ReadPoolCtx_t* ctx) { + if (ctx->currentJobHeld) { + AIO_IOPool_releaseIoJob((IOJob_t *)ctx->currentJobHeld); + ctx->currentJobHeld = NULL; + AIO_ReadPool_enqueueRead(ctx); + } + ctx->currentJobHeld = AIO_ReadPool_getNextCompletedJob(ctx); + return (IOJob_t*) ctx->currentJobHeld; +} + +/* AIO_ReadPool_fillBuffer: + * Tries to fill the buffer with at least n or jobBufferSize bytes (whichever is smaller). + * Returns if srcBuffer has at least the expected number of bytes loaded or if we've reached the end of the file. + * Return value is the number of bytes added to the buffer. + * Note that srcBuffer might have up to 2 times jobBufferSize bytes. */ +size_t AIO_ReadPool_fillBuffer(ReadPoolCtx_t* ctx, size_t n) { + IOJob_t *job; + int useCoalesce = 0; + if(n > ctx->base.jobBufferSize) + n = ctx->base.jobBufferSize; + + /* We are good, don't read anything */ + if (ctx->srcBufferLoaded >= n) + return 0; + + /* We still have bytes loaded, but not enough to satisfy caller. We need to get the next job + * and coalesce the remaining bytes with the next job's buffer */ + if (ctx->srcBufferLoaded > 0) { + useCoalesce = 1; + memcpy(ctx->coalesceBuffer, ctx->srcBuffer, ctx->srcBufferLoaded); + ctx->srcBuffer = ctx->coalesceBuffer; + } + + /* Read the next chunk */ + job = AIO_ReadPool_releaseCurrentHeldAndGetNext(ctx); + if(!job) + return 0; + if(useCoalesce) { + assert(ctx->srcBufferLoaded + job->usedBufferSize <= 2*ctx->base.jobBufferSize); + memcpy(ctx->coalesceBuffer + ctx->srcBufferLoaded, job->buffer, job->usedBufferSize); + ctx->srcBufferLoaded += job->usedBufferSize; + } + else { + ctx->srcBuffer = (U8 *) job->buffer; + ctx->srcBufferLoaded = job->usedBufferSize; + } + return job->usedBufferSize; +} + +/* AIO_ReadPool_consumeAndRefill: + * Consumes the current buffer and refills it with bufferSize bytes. */ +size_t AIO_ReadPool_consumeAndRefill(ReadPoolCtx_t* ctx) { + AIO_ReadPool_consumeBytes(ctx, ctx->srcBufferLoaded); + return AIO_ReadPool_fillBuffer(ctx, ctx->base.jobBufferSize); +} + +/* AIO_ReadPool_getFile: + * Returns the current file set for the read pool. */ +FILE* AIO_ReadPool_getFile(const ReadPoolCtx_t* ctx) { + return AIO_IOPool_getFile(&ctx->base); +} + +/* AIO_ReadPool_closeFile: + * Closes the current set file. Waits for all current enqueued tasks to complete and resets state. */ +int AIO_ReadPool_closeFile(ReadPoolCtx_t* ctx) { + FILE* const file = AIO_ReadPool_getFile(ctx); + AIO_ReadPool_setFile(ctx, NULL); + return fclose(file); +} + +/* AIO_ReadPool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_ReadPool_setAsync(ReadPoolCtx_t* ctx, int async) { + AIO_IOPool_setThreaded(&ctx->base, async); +} diff --git a/programs/fileio_asyncio.h b/programs/fileio_asyncio.h new file mode 100644 index 00000000000..d4980ef5b01 --- /dev/null +++ b/programs/fileio_asyncio.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + /* + * FileIO AsyncIO exposes read/write IO pools that allow doing IO asynchronously. + * Current implementation relies on having one thread that reads and one that + * writes. + * Each IO pool supports up to `MAX_IO_JOBS` that can be enqueued for work, but + * are performed serially by the appropriate worker thread. + * Most systems exposes better primitives to perform asynchronous IO, such as + * io_uring on newer linux systems. The API is built in such a way that in the + * future we could replace the threads with better solutions when available. + */ + +#ifndef ZSTD_FILEIO_ASYNCIO_H +#define ZSTD_FILEIO_ASYNCIO_H + +#include "../lib/common/mem.h" /* U32, U64 */ +#include "fileio_types.h" +#include "platform.h" +#include "util.h" +#include "../lib/common/pool.h" +#include "../lib/common/threading.h" + +#define MAX_IO_JOBS (10) + +typedef struct { + /* These struct fields should be set only on creation and not changed afterwards */ + POOL_ctx* threadPool; + int threadPoolActive; + int totalIoJobs; + const FIO_prefs_t* prefs; + POOL_function poolFunction; + + /* Controls the file we currently write to, make changes only by using provided utility functions */ + FILE* file; + + /* The jobs and availableJobsCount fields are accessed by both the main and worker threads and should + * only be mutated after locking the mutex */ + ZSTD_pthread_mutex_t ioJobsMutex; + void* availableJobs[MAX_IO_JOBS]; + int availableJobsCount; + size_t jobBufferSize; +} IOPoolCtx_t; + +typedef struct { + IOPoolCtx_t base; + + /* State regarding the currently read file */ + int reachedEof; + U64 nextReadOffset; + U64 waitingOnOffset; + + /* We may hold an IOJob object as needed if we actively expose its buffer. */ + void *currentJobHeld; + + /* Coalesce buffer is used to join two buffers in case where we need to read more bytes than left in + * the first of them. Shouldn't be accessed from outside ot utility functions. */ + U8 *coalesceBuffer; + + /* Read buffer can be used by consumer code, take care when copying this pointer aside as it might + * change when consuming / refilling buffer. */ + U8 *srcBuffer; + size_t srcBufferLoaded; + + /* We need to know what tasks completed so we can use their buffers when their time comes. + * Should only be accessed after locking base.ioJobsMutex . */ + void* completedJobs[MAX_IO_JOBS]; + int completedJobsCount; + ZSTD_pthread_cond_t jobCompletedCond; +} ReadPoolCtx_t; + +typedef struct { + IOPoolCtx_t base; + unsigned storedSkips; +} WritePoolCtx_t; + +typedef struct { + /* These fields are automatically set and shouldn't be changed by non WritePool code. */ + void *ctx; + FILE* file; + void *buffer; + size_t bufferSize; + + /* This field should be changed before a job is queued for execution and should contain the number + * of bytes to write from the buffer. */ + size_t usedBufferSize; + U64 offset; +} IOJob_t; + +/* AIO_supported: + * Returns 1 if AsyncIO is supported on the system, 0 otherwise. */ +int AIO_supported(void); + + +/* AIO_WritePool_releaseIoJob: + * Releases an acquired job back to the pool. Doesn't execute the job. */ +void AIO_WritePool_releaseIoJob(IOJob_t *job); + +/* AIO_WritePool_acquireJob: + * Returns an available write job to be used for a future write. */ +IOJob_t* AIO_WritePool_acquireJob(WritePoolCtx_t *ctx); + +/* AIO_WritePool_enqueueAndReacquireWriteJob: + * Enqueues a write job for execution and acquires a new one. + * After execution `job`'s pointed value would change to the newly acquired job. + * Make sure to set `usedBufferSize` to the wanted length before call. + * The queued job shouldn't be used directly after queueing it. */ +void AIO_WritePool_enqueueAndReacquireWriteJob(IOJob_t **job); + +/* AIO_WritePool_sparseWriteEnd: + * Ends sparse writes to the current file. + * Blocks on completion of all current write jobs before executing. */ +void AIO_WritePool_sparseWriteEnd(WritePoolCtx_t *ctx); + +/* AIO_WritePool_setFile: + * Sets the destination file for future writes in the pool. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. + * Also requires ending of sparse write if a previous file was used in sparse mode. */ +void AIO_WritePool_setFile(WritePoolCtx_t *ctx, FILE* file); + +/* AIO_WritePool_getFile: + * Returns the file the writePool is currently set to write to. */ +FILE* AIO_WritePool_getFile(const WritePoolCtx_t* ctx); + +/* AIO_WritePool_closeFile: + * Ends sparse write and closes the writePool's current file and sets the file to NULL. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. */ +int AIO_WritePool_closeFile(WritePoolCtx_t *ctx); + +/* AIO_WritePool_create: + * Allocates and sets and a new write pool including its included jobs. + * bufferSize should be set to the maximal buffer we want to write to at a time. */ +WritePoolCtx_t* AIO_WritePool_create(const FIO_prefs_t* prefs, size_t bufferSize); + +/* AIO_WritePool_free: + * Frees and releases a writePool and its resources. Closes destination file. */ +void AIO_WritePool_free(WritePoolCtx_t* ctx); + +/* AIO_WritePool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_WritePool_setAsync(WritePoolCtx_t* ctx, int async); + +/* AIO_ReadPool_create: + * Allocates and sets and a new readPool including its included jobs. + * bufferSize should be set to the maximal buffer we want to read at a time, will also be used + * as our basic read size. */ +ReadPoolCtx_t* AIO_ReadPool_create(const FIO_prefs_t* prefs, size_t bufferSize); + +/* AIO_ReadPool_free: + * Frees and releases a readPool and its resources. Closes source file. */ +void AIO_ReadPool_free(ReadPoolCtx_t* ctx); + +/* AIO_ReadPool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_ReadPool_setAsync(ReadPoolCtx_t* ctx, int async); + +/* AIO_ReadPool_consumeBytes: + * Consumes byes from srcBuffer's beginning and updates srcBufferLoaded accordingly. */ +void AIO_ReadPool_consumeBytes(ReadPoolCtx_t *ctx, size_t n); + +/* AIO_ReadPool_fillBuffer: + * Makes sure buffer has at least n bytes loaded (as long as n is not bigger than the initialized bufferSize). + * Returns if srcBuffer has at least n bytes loaded or if we've reached the end of the file. + * Return value is the number of bytes added to the buffer. + * Note that srcBuffer might have up to 2 times bufferSize bytes. */ +size_t AIO_ReadPool_fillBuffer(ReadPoolCtx_t *ctx, size_t n); + +/* AIO_ReadPool_consumeAndRefill: + * Consumes the current buffer and refills it with bufferSize bytes. */ +size_t AIO_ReadPool_consumeAndRefill(ReadPoolCtx_t *ctx); + +/* AIO_ReadPool_setFile: + * Sets the source file for future read in the pool. Initiates reading immediately if file is not NULL. + * Waits for all current enqueued tasks to complete if a previous file was set. */ +void AIO_ReadPool_setFile(ReadPoolCtx_t *ctx, FILE* file); + +/* AIO_ReadPool_getFile: + * Returns the current file set for the read pool. */ +FILE* AIO_ReadPool_getFile(const ReadPoolCtx_t *ctx); + +/* AIO_ReadPool_closeFile: + * Closes the current set file. Waits for all current enqueued tasks to complete and resets state. */ +int AIO_ReadPool_closeFile(ReadPoolCtx_t *ctx); + +#endif /* ZSTD_FILEIO_ASYNCIO_H */ diff --git a/programs/fileio_common.h b/programs/fileio_common.h new file mode 100644 index 00000000000..8aa70edea81 --- /dev/null +++ b/programs/fileio_common.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_FILEIO_COMMON_H +#define ZSTD_FILEIO_COMMON_H + +#include "../lib/common/mem.h" /* U32, U64 */ +#include "fileio_types.h" +#include "platform.h" +#include "timefn.h" /* UTIL_getTime, UTIL_clockSpanMicro */ + +/*-************************************* +* Macros +***************************************/ +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) +#undef MAX +#define MAX(a,b) ((a)>(b) ? (a) : (b)) +#undef MIN /* in case it would be already defined */ +#define MIN(a,b) ((a) < (b) ? (a) : (b)) + +extern FIO_display_prefs_t g_display_prefs; + +#define DISPLAY_F(f, ...) fprintf((f), __VA_ARGS__) +#define DISPLAYOUT(...) DISPLAY_F(stdout, __VA_ARGS__) +#define DISPLAY(...) DISPLAY_F(stderr, __VA_ARGS__) +#define DISPLAYLEVEL(l, ...) { if (g_display_prefs.displayLevel>=l) { DISPLAY(__VA_ARGS__); } } + +extern UTIL_time_t g_displayClock; + +#define REFRESH_RATE ((U64)(SEC_TO_MICRO / 6)) +#define READY_FOR_UPDATE() (UTIL_clockSpanMicro(g_displayClock) > REFRESH_RATE || g_display_prefs.displayLevel >= 4) +#define DELAY_NEXT_UPDATE() { g_displayClock = UTIL_getTime(); } +#define DISPLAYUPDATE(l, ...) { \ + if (g_display_prefs.displayLevel>=l && (g_display_prefs.progressSetting != FIO_ps_never)) { \ + if (READY_FOR_UPDATE()) { \ + DELAY_NEXT_UPDATE(); \ + DISPLAY(__VA_ARGS__); \ + if (g_display_prefs.displayLevel>=4) fflush(stderr); \ + } } } + +#define SHOULD_DISPLAY_SUMMARY() \ + (g_display_prefs.displayLevel >= 2 || g_display_prefs.progressSetting == FIO_ps_always) +#define SHOULD_DISPLAY_PROGRESS() \ + (g_display_prefs.progressSetting != FIO_ps_never && SHOULD_DISPLAY_SUMMARY()) +#define DISPLAY_PROGRESS(...) { if (SHOULD_DISPLAY_PROGRESS()) { DISPLAYLEVEL(1, __VA_ARGS__); }} +#define DISPLAYUPDATE_PROGRESS(...) { if (SHOULD_DISPLAY_PROGRESS()) { DISPLAYUPDATE(1, __VA_ARGS__); }} +#define DISPLAY_SUMMARY(...) { if (SHOULD_DISPLAY_SUMMARY()) { DISPLAYLEVEL(1, __VA_ARGS__); } } + +#define EXM_THROW(error, ...) \ +{ \ + DISPLAYLEVEL(1, "zstd: "); \ + DISPLAYLEVEL(5, "Error defined at %s, line %i : \n", __FILE__, __LINE__); \ + DISPLAYLEVEL(1, "error %i : ", error); \ + DISPLAYLEVEL(1, __VA_ARGS__); \ + DISPLAYLEVEL(1, " \n"); \ + exit(error); \ +} + +#define CHECK_V(v, f) \ + v = f; \ + if (ZSTD_isError(v)) { \ + DISPLAYLEVEL(5, "%s \n", #f); \ + EXM_THROW(11, "%s", ZSTD_getErrorName(v)); \ + } +#define CHECK(f) { size_t err; CHECK_V(err, f); } + + +/* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW */ +#if defined(LIBC_NO_FSEEKO) +/* Some older libc implementations don't include these functions (e.g. Bionic < 24) */ +# define LONG_SEEK fseek +# define LONG_TELL ftell +#elif defined(_MSC_VER) && _MSC_VER >= 1400 +# define LONG_SEEK _fseeki64 +# define LONG_TELL _ftelli64 +#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ +# define LONG_SEEK fseeko +# define LONG_TELL ftello +#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) +# define LONG_SEEK fseeko64 +# define LONG_TELL ftello64 +#elif defined(_WIN32) && !defined(__DJGPP__) +# include + static int LONG_SEEK(FILE* file, __int64 offset, int origin) { + LARGE_INTEGER off; + DWORD method; + off.QuadPart = offset; + if (origin == SEEK_END) + method = FILE_END; + else if (origin == SEEK_CUR) + method = FILE_CURRENT; + else + method = FILE_BEGIN; + + if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method)) + return 0; + else + return -1; + } + static __int64 LONG_TELL(FILE* file) { + LARGE_INTEGER off, newOff; + off.QuadPart = 0; + newOff.QuadPart = 0; + SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, &newOff, FILE_CURRENT); + return newOff.QuadPart; + } +#else +# define LONG_SEEK fseek +# define LONG_TELL ftell +#endif + +#endif /* ZSTD_FILEIO_COMMON_H */ diff --git a/programs/fileio_types.h b/programs/fileio_types.h new file mode 100644 index 00000000000..9bbb5154979 --- /dev/null +++ b/programs/fileio_types.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef FILEIO_TYPES_HEADER +#define FILEIO_TYPES_HEADER + +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ +#include "../lib/zstd.h" /* ZSTD_* */ + +/*-************************************* +* Parameters: FIO_prefs_t +***************************************/ + +typedef struct FIO_display_prefs_s FIO_display_prefs_t; + +typedef enum { FIO_ps_auto, FIO_ps_never, FIO_ps_always } FIO_progressSetting_e; + +struct FIO_display_prefs_s { + int displayLevel; /* 0 : no display; 1: errors; 2: + result + interaction + warnings; 3: + progression; 4: + information */ + FIO_progressSetting_e progressSetting; +}; + + +typedef enum { FIO_zstdCompression, FIO_gzipCompression, FIO_xzCompression, FIO_lzmaCompression, FIO_lz4Compression } FIO_compressionType_t; + +typedef struct FIO_prefs_s { + + /* Algorithm preferences */ + FIO_compressionType_t compressionType; + int sparseFileSupport; /* 0: no sparse allowed; 1: auto (file yes, stdout no); 2: force sparse */ + int dictIDFlag; + int checksumFlag; + int jobSize; + int overlapLog; + int adaptiveMode; + int useRowMatchFinder; + int rsyncable; + int minAdaptLevel; + int maxAdaptLevel; + int ldmFlag; + int ldmHashLog; + int ldmMinMatch; + int ldmBucketSizeLog; + int ldmHashRateLog; + size_t streamSrcSize; + size_t targetCBlockSize; + int srcSizeHint; + int testMode; + ZSTD_ParamSwitch_e literalCompressionMode; + + /* IO preferences */ + int removeSrcFile; + int overwrite; + int asyncIO; + + /* Computation resources preferences */ + unsigned memLimit; + int nbWorkers; + + int excludeCompressedFiles; + int patchFromMode; + int contentSize; + int allowBlockDevices; + int passThrough; + ZSTD_ParamSwitch_e mmapDict; +} FIO_prefs_t; + +typedef enum {FIO_mallocDict, FIO_mmapDict} FIO_dictBufferType_t; + +typedef struct { + void* dictBuffer; + size_t dictBufferSize; + FIO_dictBufferType_t dictBufferType; +#if defined(_MSC_VER) || defined(_WIN32) + HANDLE dictHandle; +#endif +} FIO_Dict_t; + +#endif /* FILEIO_TYPES_HEADER */ diff --git a/programs/lorem.c b/programs/lorem.c new file mode 100644 index 00000000000..79030c92f32 --- /dev/null +++ b/programs/lorem.c @@ -0,0 +1,285 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* Implementation notes: + * + * This is a very simple lorem ipsum generator + * which features a static list of words + * and print them one after another randomly + * with a fake sentence / paragraph structure. + * + * The goal is to generate a printable text + * that can be used to fake a text compression scenario. + * The resulting compression / ratio curve of the lorem ipsum generator + * is more satisfying than the previous statistical generator, + * which was initially designed for entropy compression, + * and lacks a regularity more representative of text. + * + * The compression ratio achievable on the generated lorem ipsum + * is still a bit too good, presumably because the dictionary is a bit too + * small. It would be possible to create some more complex scheme, notably by + * enlarging the dictionary with a word generator, and adding grammatical rules + * (composition) and syntax rules. But that's probably overkill for the intended + * goal. + */ + +#include "lorem.h" +#include +#include /* INT_MAX */ +#include /* memcpy */ + +#define WORD_MAX_SIZE 20 + +/* Define the word pool */ +static const char* kWords[] = { + "lorem", "ipsum", "dolor", "sit", "amet", + "consectetur", "adipiscing", "elit", "sed", "do", + "eiusmod", "tempor", "incididunt", "ut", "labore", + "et", "dolore", "magna", "aliqua", "dis", + "lectus", "vestibulum", "mattis", "ullamcorper", "velit", + "commodo", "a", "lacus", "arcu", "magnis", + "parturient", "montes", "nascetur", "ridiculus", "mus", + "mauris", "nulla", "malesuada", "pellentesque", "eget", + "gravida", "in", "dictum", "non", "erat", + "nam", "voluptat", "maecenas", "blandit", "aliquam", + "etiam", "enim", "lobortis", "scelerisque", "fermentum", + "dui", "faucibus", "ornare", "at", "elementum", + "eu", "facilisis", "odio", "morbi", "quis", + "eros", "donec", "ac", "orci", "purus", + "turpis", "cursus", "leo", "vel", "porta", + "consequat", "interdum", "varius", "vulputate", "aliquet", + "pharetra", "nunc", "auctor", "urna", "id", + "metus", "viverra", "nibh", "cras", "mi", + "unde", "omnis", "iste", "natus", "error", + "perspiciatis", "voluptatem", "accusantium", "doloremque", "laudantium", + "totam", "rem", "aperiam", "eaque", "ipsa", + "quae", "ab", "illo", "inventore", "veritatis", + "quasi", "architecto", "beatae", "vitae", "dicta", + "sunt", "explicabo", "nemo", "ipsam", "quia", + "voluptas", "aspernatur", "aut", "odit", "fugit", + "consequuntur", "magni", "dolores", "eos", "qui", + "ratione", "sequi", "nesciunt", "neque", "porro", + "quisquam", "est", "dolorem", "adipisci", "numquam", + "eius", "modi", "tempora", "incidunt", "magnam", + "quaerat", "ad", "minima", "veniam", "nostrum", + "ullam", "corporis", "suscipit", "laboriosam", "nisi", + "aliquid", "ex", "ea", "commodi", "consequatur", + "autem", "eum", "iure", "voluptate", "esse", + "quam", "nihil", "molestiae", "illum", "fugiat", + "quo", "pariatur", "vero", "accusamus", "iusto", + "dignissimos", "ducimus", "blanditiis", "praesentium", "voluptatum", + "deleniti", "atque", "corrupti", "quos", "quas", + "molestias", "excepturi", "sint", "occaecati", "cupiditate", + "provident", "similique", "culpa", "officia", "deserunt", + "mollitia", "animi", "laborum", "dolorum", "fuga", + "harum", "quidem", "rerum", "facilis", "expedita", + "distinctio", "libero", "tempore", "cum", "soluta", + "nobis", "eligendi", "optio", "cumque", "impedit", + "minus", "quod", "maxime", "placeat", "facere", + "possimus", "assumenda", "repellendus", "temporibus", "quibusdam", + "officiis", "debitis", "saepe", "eveniet", "voluptates", + "repudiandae", "recusandae", "itaque", "earum", "hic", + "tenetur", "sapiente", "delectus", "reiciendis", "cillum", + "maiores", "alias", "perferendis", "doloribus", "asperiores", + "repellat", "minim", "nostrud", "exercitation", "ullamco", + "laboris", "aliquip", "duis", "aute", "irure", +}; +static const unsigned kNbWords = sizeof(kWords) / sizeof(kWords[0]); + +/* simple 1-dimension distribution, based on word's length, favors small words + */ +static const int kWeights[] = { 0, 8, 6, 4, 3, 2 }; +static const size_t kNbWeights = sizeof(kWeights) / sizeof(kWeights[0]); + +#define DISTRIB_SIZE_MAX 650 +static int g_distrib[DISTRIB_SIZE_MAX] = { 0 }; +static unsigned g_distribCount = 0; + +static void countFreqs( + const char* words[], + size_t nbWords, + const int* weights, + size_t nbWeights) +{ + unsigned total = 0; + size_t w; + for (w = 0; w < nbWords; w++) { + size_t len = strlen(words[w]); + int lmax; + if (len >= nbWeights) + len = nbWeights - 1; + lmax = weights[len]; + total += (unsigned)lmax; + } + g_distribCount = total; + assert(g_distribCount <= DISTRIB_SIZE_MAX); +} + +static void init_word_distrib( + const char* words[], + size_t nbWords, + const int* weights, + size_t nbWeights) +{ + size_t w, d = 0; + countFreqs(words, nbWords, weights, nbWeights); + for (w = 0; w < nbWords; w++) { + size_t len = strlen(words[w]); + int l, lmax; + if (len >= nbWeights) + len = nbWeights - 1; + lmax = weights[len]; + for (l = 0; l < lmax; l++) { + g_distrib[d++] = (int)w; + } + } +} + +/* Note: this unit only works when invoked sequentially. + * No concurrent access is allowed */ +static char* g_ptr = NULL; +static size_t g_nbChars = 0; +static size_t g_maxChars = 10000000; +static unsigned g_randRoot = 0; + +#define RDG_rotl32(x, r) ((x << r) | (x >> (32 - r))) +static unsigned LOREM_rand(unsigned range) +{ + static const unsigned prime1 = 2654435761U; + static const unsigned prime2 = 2246822519U; + unsigned rand32 = g_randRoot; + rand32 *= prime1; + rand32 ^= prime2; + rand32 = RDG_rotl32(rand32, 13); + g_randRoot = rand32; + return (unsigned)(((unsigned long long)rand32 * range) >> 32); +} + +static void writeLastCharacters(void) +{ + size_t lastChars = g_maxChars - g_nbChars; + assert(g_maxChars >= g_nbChars); + if (lastChars == 0) + return; + g_ptr[g_nbChars++] = '.'; + if (lastChars > 2) { + memset(g_ptr + g_nbChars, ' ', lastChars - 2); + } + if (lastChars > 1) { + g_ptr[g_maxChars - 1] = '\n'; + } + g_nbChars = g_maxChars; +} + +static void generateWord(const char* word, const char* separator, int upCase) +{ + size_t const len = strlen(word) + strlen(separator); + if (g_nbChars + len > g_maxChars) { + writeLastCharacters(); + return; + } + memcpy(g_ptr + g_nbChars, word, strlen(word)); + if (upCase) { + static const char toUp = 'A' - 'a'; + g_ptr[g_nbChars] = (char)(g_ptr[g_nbChars] + toUp); + } + g_nbChars += strlen(word); + memcpy(g_ptr + g_nbChars, separator, strlen(separator)); + g_nbChars += strlen(separator); +} + +static int about(unsigned target) +{ + return (int)(LOREM_rand(target) + LOREM_rand(target) + 1); +} + +/* Function to generate a random sentence */ +static void generateSentence(int nbWords) +{ + int commaPos = about(9); + int comma2 = commaPos + about(7); + int qmark = (LOREM_rand(11) == 7); + const char* endSep = qmark ? "? " : ". "; + int i; + for (i = 0; i < nbWords; i++) { + int const wordID = g_distrib[LOREM_rand(g_distribCount)]; + const char* const word = kWords[wordID]; + const char* sep = " "; + if (i == commaPos) + sep = ", "; + if (i == comma2) + sep = ", "; + if (i == nbWords - 1) + sep = endSep; + generateWord(word, sep, i == 0); + } +} + +static void generateParagraph(int nbSentences) +{ + int i; + for (i = 0; i < nbSentences; i++) { + int wordsPerSentence = about(11); + generateSentence(wordsPerSentence); + } + if (g_nbChars < g_maxChars) { + g_ptr[g_nbChars++] = '\n'; + } + if (g_nbChars < g_maxChars) { + g_ptr[g_nbChars++] = '\n'; + } +} + +/* It's "common" for lorem ipsum generators to start with the same first + * pre-defined sentence */ +static void generateFirstSentence(void) +{ + int i; + for (i = 0; i < 18; i++) { + const char* word = kWords[i]; + const char* separator = " "; + if (i == 4) + separator = ", "; + if (i == 7) + separator = ", "; + generateWord(word, separator, i == 0); + } + generateWord(kWords[18], ". ", 0); +} + +size_t +LOREM_genBlock(void* buffer, size_t size, unsigned seed, int first, int fill) +{ + g_ptr = (char*)buffer; + assert(size < INT_MAX); + g_maxChars = size; + g_nbChars = 0; + g_randRoot = seed; + if (g_distribCount == 0) { + init_word_distrib(kWords, kNbWords, kWeights, kNbWeights); + } + + if (first) { + generateFirstSentence(); + } + while (g_nbChars < g_maxChars) { + int sentencePerParagraph = about(7); + generateParagraph(sentencePerParagraph); + if (!fill) + break; /* only generate one paragraph in not-fill mode */ + } + g_ptr = NULL; + return g_nbChars; +} + +void LOREM_genBuffer(void* buffer, size_t size, unsigned seed) +{ + LOREM_genBlock(buffer, size, seed, 1, 1); +} diff --git a/programs/lorem.h b/programs/lorem.h new file mode 100644 index 00000000000..4a87f8748a5 --- /dev/null +++ b/programs/lorem.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* lorem ipsum generator */ + +#include /* size_t */ + +/* + * LOREM_genBuffer(): + * Generate @size bytes of compressible data using lorem ipsum generator + * into provided @buffer. + */ +void LOREM_genBuffer(void* buffer, size_t size, unsigned seed); + +/* + * LOREM_genBlock(): + * Similar to LOREM_genBuffer, with additional controls : + * - @first : generate the first sentence + * - @fill : fill the entire @buffer, + * if ==0: generate one paragraph at most. + * @return : nb of bytes generated into @buffer. + */ +size_t LOREM_genBlock(void* buffer, size_t size, + unsigned seed, + int first, int fill); diff --git a/programs/platform.h b/programs/platform.h index 38ded872743..e2cc1c3e65f 100644 --- a/programs/platform.h +++ b/programs/platform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,17 +11,12 @@ #ifndef PLATFORM_H_MODULE #define PLATFORM_H_MODULE -#if defined (__cplusplus) -extern "C" { -#endif - - - /* ************************************** * Compiler Options ****************************************/ #if defined(_MSC_VER) # define _CRT_SECURE_NO_WARNINGS /* Disable Visual Studio warning messages for fopen, strncpy, strerror */ +# define _CRT_NONSTDC_NO_WARNINGS /* Disable C4996 complaining about posix function names */ # if (_MSC_VER <= 1800) /* 1800 == Visual Studio 2013 */ # define _CRT_SECURE_NO_DEPRECATE /* VS2005 - must be declared before and */ # define snprintf sprintf_s /* snprintf unsupported by Visual <= 2013 */ @@ -32,12 +27,12 @@ extern "C" { /* ************************************** * Detect 64-bit OS -* http://nadeausoftware.com/articles/2012/02/c_c_tip_how_detect_processor_type_using_compiler_predefined_macros +* https://nadeausoftware.com/articles/2012/02/c_c_tip_how_detect_processor_type_using_compiler_predefined_macros ****************************************/ #if defined __ia64 || defined _M_IA64 /* Intel Itanium */ \ || defined __powerpc64__ || defined __ppc64__ || defined __PPC64__ /* POWER 64-bit */ \ || (defined __sparc && (defined __sparcv9 || defined __sparc_v9__ || defined __arch64__)) || defined __sparc64__ /* SPARC 64-bit */ \ - || defined __x86_64__s || defined _M_X64 /* x86 64-bit */ \ + || defined __x86_64__ || defined _M_X64 /* x86 64-bit */ \ || defined __arm64__ || defined __aarch64__ || defined __ARM64_ARCH_8__ /* ARM 64-bit */ \ || (defined __mips && (__mips == 64 || __mips == 4 || __mips == 3)) /* MIPS 64-bit */ \ || defined _LP64 || defined __LP64__ /* NetBSD, OpenBSD */ || defined __64BIT__ /* AIX */ || defined _ADDR64 /* Cray */ \ @@ -73,13 +68,12 @@ extern "C" { ***************************************************************/ #ifndef PLATFORM_POSIX_VERSION -# if (defined(__APPLE__) && defined(__MACH__)) || defined(__SVR4) || defined(_AIX) || defined(__hpux) /* POSIX.1-2001 (SUSv3) conformant */ \ - || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) /* BSD distros */ +# if (defined(__APPLE__) && defined(__MACH__)) || defined(__SVR4) || defined(_AIX) || defined(__hpux) /* POSIX.1-2001 (SUSv3) conformant */ /* exception rule : force posix version to 200112L, * note: it's better to use unistd.h's _POSIX_VERSION whenever possible */ # define PLATFORM_POSIX_VERSION 200112L -/* try to determine posix version through official unistd.h's _POSIX_VERSION (http://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). +/* try to determine posix version through official unistd.h's _POSIX_VERSION (https://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). * note : there is no simple way to know in advance if is present or not on target system, * Posix specification mandates its presence and its content, but target system must respect this spec. * It's necessary to _not_ #include whenever target OS is not unix-like @@ -88,11 +82,11 @@ extern "C" { */ # elif !defined(_WIN32) \ && ( defined(__unix__) || defined(__unix) \ - || defined(__midipix__) || defined(__VMS) || defined(__HAIKU__) ) + || defined(_QNX_SOURCE) || defined(__midipix__) || defined(__VMS) || defined(__HAIKU__) ) -# if defined(__linux__) || defined(__linux) +# if defined(__linux__) || defined(__linux) || defined(__CYGWIN__) # ifndef _POSIX_C_SOURCE -# define _POSIX_C_SOURCE 200112L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */ +# define _POSIX_C_SOURCE 200809L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */ # endif # endif # include /* declares _POSIX_VERSION */ @@ -102,6 +96,12 @@ extern "C" { # define PLATFORM_POSIX_VERSION 1 # endif +# ifdef __UCLIBC__ +# ifndef __USE_MISC +# define __USE_MISC /* enable st_mtim on uclibc */ +# endif +# endif + # else /* non-unix target platform (like Windows) */ # define PLATFORM_POSIX_VERSION 0 # endif @@ -109,26 +109,49 @@ extern "C" { #endif /* PLATFORM_POSIX_VERSION */ +#if PLATFORM_POSIX_VERSION > 1 + /* glibc < 2.26 may not expose struct timespec def without this. + * See issue #1920. */ +# ifndef _ATFILE_SOURCE +# define _ATFILE_SOURCE +# endif +#endif + + /*-********************************************* * Detect if isatty() and fileno() are available +* +* Note: Use UTIL_isConsole() for the zstd CLI +* instead, as it allows faking is console for +* testing. ************************************************/ #if (defined(__linux__) && (PLATFORM_POSIX_VERSION > 1)) \ || (PLATFORM_POSIX_VERSION >= 200112L) \ - || defined(__DJGPP__) \ - || defined(__MSYS__) + || defined(__DJGPP__) # include /* isatty */ +# include /* fileno */ # define IS_CONSOLE(stdStream) isatty(fileno(stdStream)) -#elif defined(MSDOS) || defined(OS2) || defined(__CYGWIN__) +#elif defined(MSDOS) || defined(OS2) # include /* _isatty */ # define IS_CONSOLE(stdStream) _isatty(_fileno(stdStream)) -#elif defined(WIN32) || defined(_WIN32) +#elif defined(_WIN32) # include /* _isatty */ # include /* DeviceIoControl, HANDLE, FSCTL_SET_SPARSE */ # include /* FILE */ + +#if defined (__cplusplus) +extern "C" { +#endif + static __inline int IS_CONSOLE(FILE* stdStream) { DWORD dummy; return _isatty(_fileno(stdStream)) && GetConsoleMode((HANDLE)_get_osfhandle(_fileno(stdStream)), &dummy); } + +#if defined (__cplusplus) +} +#endif + #else # define IS_CONSOLE(stdStream) 0 #endif @@ -137,7 +160,7 @@ static __inline int IS_CONSOLE(FILE* stdStream) { /****************************** * OS-specific IO behaviors ******************************/ -#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(_WIN32) +#if defined(MSDOS) || defined(OS2) || defined(_WIN32) # include /* _O_BINARY */ # include /* _setmode, _fileno, _get_osfhandle */ # if !defined(__DJGPP__) @@ -176,13 +199,13 @@ static __inline int IS_CONSOLE(FILE* stdStream) { #ifndef ZSTD_SETPRIORITY_SUPPORT - /* mandates presence of and support for setpriority() : http://man7.org/linux/man-pages/man2/setpriority.2.html */ + /* mandates presence of and support for setpriority() : https://man7.org/linux/man-pages/man2/setpriority.2.html */ # define ZSTD_SETPRIORITY_SUPPORT (PLATFORM_POSIX_VERSION >= 200112L) #endif #ifndef ZSTD_NANOSLEEP_SUPPORT - /* mandates support of nanosleep() within : http://man7.org/linux/man-pages/man2/nanosleep.2.html */ + /* mandates support of nanosleep() within : https://man7.org/linux/man-pages/man2/nanosleep.2.html */ # if (defined(__linux__) && (PLATFORM_POSIX_VERSION >= 199309L)) \ || (PLATFORM_POSIX_VERSION >= 200112L) # define ZSTD_NANOSLEEP_SUPPORT 1 @@ -191,9 +214,4 @@ static __inline int IS_CONSOLE(FILE* stdStream) { # endif #endif - -#if defined (__cplusplus) -} -#endif - #endif /* PLATFORM_H_MODULE */ diff --git a/programs/timefn.c b/programs/timefn.c index 096e1910bf4..d7f330974d3 100644 --- a/programs/timefn.c +++ b/programs/timefn.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,7 +12,8 @@ /* === Dependencies === */ #include "timefn.h" - +#include "platform.h" /* set _POSIX_C_SOURCE */ +#include /* CLOCK_MONOTONIC, TIME_UTC */ /*-**************************************** * Time functions @@ -20,46 +21,37 @@ #if defined(_WIN32) /* Windows */ +#include /* LARGE_INTEGER */ #include /* abort */ #include /* perror */ -UTIL_time_t UTIL_getTime(void) { UTIL_time_t x; QueryPerformanceCounter(&x); return x; } - -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) +UTIL_time_t UTIL_getTime(void) { static LARGE_INTEGER ticksPerSecond; + static double nsFactor = 1.0; static int init = 0; if (!init) { if (!QueryPerformanceFrequency(&ticksPerSecond)) { perror("timefn::QueryPerformanceFrequency"); abort(); } + nsFactor = 1000000000.0 / (double)ticksPerSecond.QuadPart; init = 1; } - return 1000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; -} - -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) -{ - static LARGE_INTEGER ticksPerSecond; - static int init = 0; - if (!init) { - if (!QueryPerformanceFrequency(&ticksPerSecond)) { - perror("timefn::QueryPerformanceFrequency"); - abort(); - } - init = 1; + { UTIL_time_t r; + LARGE_INTEGER x; + QueryPerformanceCounter(&x); + r.t = (PTime)((double)x.QuadPart * nsFactor); + return r; } - return 1000000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; } - #elif defined(__APPLE__) && defined(__MACH__) -UTIL_time_t UTIL_getTime(void) { return mach_absolute_time(); } +#include /* mach_timebase_info_data_t, mach_timebase_info, mach_absolute_time */ -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) +UTIL_time_t UTIL_getTime(void) { static mach_timebase_info_data_t rate; static int init = 0; @@ -67,24 +59,41 @@ PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) mach_timebase_info(&rate); init = 1; } - return (((clockEnd - clockStart) * (PTime)rate.numer) / ((PTime)rate.denom))/1000ULL; + { UTIL_time_t r; + r.t = mach_absolute_time() * (PTime)rate.numer / (PTime)rate.denom; + return r; + } } -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) +/* POSIX.1-2001 (optional) */ +#elif defined(CLOCK_MONOTONIC) + +#include /* abort */ +#include /* perror */ + +UTIL_time_t UTIL_getTime(void) { - static mach_timebase_info_data_t rate; - static int init = 0; - if (!init) { - mach_timebase_info(&rate); - init = 1; + /* time must be initialized, othersize it may fail msan test. + * No good reason, likely a limitation of timespec_get() for some target */ + struct timespec time = { 0, 0 }; + if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) { + perror("timefn::clock_gettime(CLOCK_MONOTONIC)"); + abort(); + } + { UTIL_time_t r; + r.t = (PTime)time.tv_sec * 1000000000ULL + (PTime)time.tv_nsec; + return r; } - return ((clockEnd - clockStart) * (PTime)rate.numer) / ((PTime)rate.denom); } - +/* C11 requires support of timespec_get(). + * However, FreeBSD 11 claims C11 compliance while lacking timespec_get(). + * Double confirm timespec_get() support by checking the definition of TIME_UTC. + * However, some versions of Android manage to simultaneously define TIME_UTC + * and lack timespec_get() support... */ #elif (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */) \ - && defined(TIME_UTC) /* C11 requires timespec_get, but FreeBSD 11 lacks it, while still claiming C11 compliance */ + && defined(TIME_UTC) && !defined(__ANDROID__) #include /* abort */ #include /* perror */ @@ -93,65 +102,49 @@ UTIL_time_t UTIL_getTime(void) { /* time must be initialized, othersize it may fail msan test. * No good reason, likely a limitation of timespec_get() for some target */ - UTIL_time_t time = UTIL_TIME_INITIALIZER; + struct timespec time = { 0, 0 }; if (timespec_get(&time, TIME_UTC) != TIME_UTC) { - perror("timefn::timespec_get"); + perror("timefn::timespec_get(TIME_UTC)"); abort(); } - return time; -} - -static UTIL_time_t UTIL_getSpanTime(UTIL_time_t begin, UTIL_time_t end) -{ - UTIL_time_t diff; - if (end.tv_nsec < begin.tv_nsec) { - diff.tv_sec = (end.tv_sec - 1) - begin.tv_sec; - diff.tv_nsec = (end.tv_nsec + 1000000000ULL) - begin.tv_nsec; - } else { - diff.tv_sec = end.tv_sec - begin.tv_sec; - diff.tv_nsec = end.tv_nsec - begin.tv_nsec; + { UTIL_time_t r; + r.t = (PTime)time.tv_sec * 1000000000ULL + (PTime)time.tv_nsec; + return r; } - return diff; } -PTime UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end) -{ - UTIL_time_t const diff = UTIL_getSpanTime(begin, end); - PTime micro = 0; - micro += 1000000ULL * diff.tv_sec; - micro += diff.tv_nsec / 1000ULL; - return micro; -} -PTime UTIL_getSpanTimeNano(UTIL_time_t begin, UTIL_time_t end) +#else /* relies on standard C90 (note : clock_t produces wrong measurements for multi-threaded workloads) */ + +UTIL_time_t UTIL_getTime(void) { - UTIL_time_t const diff = UTIL_getSpanTime(begin, end); - PTime nano = 0; - nano += 1000000000ULL * diff.tv_sec; - nano += diff.tv_nsec; - return nano; + UTIL_time_t r; + r.t = (PTime)clock() * 1000000000ULL / CLOCKS_PER_SEC; + return r; } - - -#else /* relies on standard C90 (note : clock_t measurements can be wrong when using multi-threading) */ - -UTIL_time_t UTIL_getTime(void) { return clock(); } -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } +#define TIME_MT_MEASUREMENTS_NOT_SUPPORTED #endif +/* ==== Common functions, valid for all time API ==== */ +PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) +{ + return clockEnd.t - clockStart.t; +} + +PTime UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end) +{ + return UTIL_getSpanTimeNano(begin, end) / 1000ULL; +} -/* returns time span in microseconds */ PTime UTIL_clockSpanMicro(UTIL_time_t clockStart ) { UTIL_time_t const clockEnd = UTIL_getTime(); return UTIL_getSpanTimeMicro(clockStart, clockEnd); } -/* returns time span in microseconds */ PTime UTIL_clockSpanNano(UTIL_time_t clockStart ) { UTIL_time_t const clockEnd = UTIL_getTime(); @@ -166,3 +159,12 @@ void UTIL_waitForNextTick(void) clockEnd = UTIL_getTime(); } while (UTIL_getSpanTimeNano(clockStart, clockEnd) == 0); } + +int UTIL_support_MT_measurements(void) +{ +# if defined(TIME_MT_MEASUREMENTS_NOT_SUPPORTED) + return 0; +# else + return 1; +# endif +} diff --git a/programs/timefn.h b/programs/timefn.h index d1ddd31b1c0..80f72e228a3 100644 --- a/programs/timefn.h +++ b/programs/timefn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,79 +11,49 @@ #ifndef TIME_FN_H_MODULE_287987 #define TIME_FN_H_MODULE_287987 -#if defined (__cplusplus) -extern "C" { -#endif - - /*-**************************************** -* Dependencies -******************************************/ -#include /* utime */ -#if defined(_MSC_VER) -# include /* utime */ -#else -# include /* utime */ -#endif -#include /* clock_t, clock, CLOCKS_PER_SEC */ - - - -/*-**************************************** -* Local Types +* Types ******************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include +# if defined(_AIX) +# include +# else +# include /* uint64_t */ +# endif typedef uint64_t PTime; /* Precise Time */ #else typedef unsigned long long PTime; /* does not support compilers without long long support */ #endif +/* UTIL_time_t contains a nanosecond time counter. + * The absolute value is not meaningful. + * It's only valid to compute the difference between 2 measurements. */ +typedef struct { PTime t; } UTIL_time_t; +#define UTIL_TIME_INITIALIZER { 0 } /*-**************************************** * Time functions ******************************************/ -#if defined(_WIN32) /* Windows */ - - #include /* LARGE_INTEGER */ - typedef LARGE_INTEGER UTIL_time_t; - #define UTIL_TIME_INITIALIZER { { 0, 0 } } - -#elif defined(__APPLE__) && defined(__MACH__) - - #include - typedef PTime UTIL_time_t; - #define UTIL_TIME_INITIALIZER 0 - -#elif (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */) \ - && defined(TIME_UTC) /* C11 requires timespec_get, but FreeBSD 11 lacks it, while still claiming C11 compliance */ - - typedef struct timespec UTIL_time_t; - #define UTIL_TIME_INITIALIZER { 0, 0 } -#else /* relies on standard C90 (note : clock_t measurements can be wrong when using multi-threading) */ - - typedef clock_t UTIL_time_t; - #define UTIL_TIME_INITIALIZER 0 - -#endif +UTIL_time_t UTIL_getTime(void); +/* Timer resolution can be low on some platforms. + * To improve accuracy, it's recommended to wait for a new tick + * before starting benchmark measurements */ +void UTIL_waitForNextTick(void); +/* tells if timefn will return correct time measurements + * in presence of multi-threaded workload. + * note : this is not the case if only C90 clock_t measurements are available */ +int UTIL_support_MT_measurements(void); -UTIL_time_t UTIL_getTime(void); -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd); PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd); - -#define SEC_TO_MICRO ((PTime)1000000) -PTime UTIL_clockSpanMicro(UTIL_time_t clockStart); PTime UTIL_clockSpanNano(UTIL_time_t clockStart); -void UTIL_waitForNextTick(void); - +PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd); +PTime UTIL_clockSpanMicro(UTIL_time_t clockStart); -#if defined (__cplusplus) -} -#endif +#define SEC_TO_MICRO ((PTime)1000000) /* nb of microseconds in a second */ #endif /* TIME_FN_H_MODULE_287987 */ diff --git a/programs/util.c b/programs/util.c index 6190bca5027..652530b1223 100644 --- a/programs/util.c +++ b/programs/util.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,159 +8,915 @@ * You may select, at your option, one of the above-listed licenses. */ -#if defined (__cplusplus) -extern "C" { -#endif - - /*-**************************************** * Dependencies ******************************************/ #include "util.h" /* note : ensure that platform.h is included first ! */ +#include /* malloc, realloc, free */ +#include /* fprintf */ +#include /* clock_t, clock, CLOCKS_PER_SEC, nanosleep */ #include #include +#if defined(__FreeBSD__) +#include /* __FreeBSD_version */ +#endif /* #ifdef __FreeBSD__ */ + +#if defined(_WIN32) +# include /* utime */ +# include /* _chmod */ +# define ZSTD_USE_UTIMENSAT 0 +#else +# include /* chown, stat */ +# include /* utimensat, st_mtime */ +# if (PLATFORM_POSIX_VERSION >= 200809L && defined(st_mtime)) \ + || (defined(__FreeBSD__) && __FreeBSD_version >= 1100056) +# define ZSTD_USE_UTIMENSAT 1 +# else +# define ZSTD_USE_UTIMENSAT 0 +# endif +# if ZSTD_USE_UTIMENSAT +# include /* AT_FDCWD */ +# else +# include /* utime */ +# endif +#endif + +#if defined(_MSC_VER) || defined(__MINGW32__) || defined (__MSVCRT__) +#include /* needed for _mkdir in windows */ +#endif + +#if defined(__linux__) || (PLATFORM_POSIX_VERSION >= 200112L) /* opendir, readdir require POSIX.1-2001 */ +# include /* opendir, readdir */ +# include /* strerror, memcpy */ +#endif /* #ifdef _WIN32 */ + +/*-**************************************** +* Internal Macros +******************************************/ + +/* CONTROL is almost like an assert(), but is never disabled. + * It's designed for failures that may happen rarely, + * but we don't want to maintain a specific error code path for them, + * such as a malloc() returning NULL for example. + * Since it's always active, this macro can trigger side effects. + */ +#define CONTROL(c) { \ + if (!(c)) { \ + UTIL_DISPLAYLEVEL(1, "Error : %s, %i : %s", \ + __FILE__, __LINE__, #c); \ + exit(1); \ +} } + +/* console log */ +#define UTIL_DISPLAY(...) fprintf(stderr, __VA_ARGS__) +#define UTIL_DISPLAYLEVEL(l, ...) { if (g_utilDisplayLevel>=l) { UTIL_DISPLAY(__VA_ARGS__); } } + +static int g_traceDepth = 0; +int g_traceFileStat = 0; + +#define UTIL_TRACE_CALL(...) \ + { \ + if (g_traceFileStat) { \ + UTIL_DISPLAY("Trace:FileStat: %*s> ", g_traceDepth, ""); \ + UTIL_DISPLAY(__VA_ARGS__); \ + UTIL_DISPLAY("\n"); \ + ++g_traceDepth; \ + } \ + } -int UTIL_fileExist(const char* filename) +#define UTIL_TRACE_RET(ret) \ + { \ + if (g_traceFileStat) { \ + --g_traceDepth; \ + UTIL_DISPLAY("Trace:FileStat: %*s< %d\n", g_traceDepth, "", (ret)); \ + } \ + } + +/* A modified version of realloc(). + * If UTIL_realloc() fails the original block is freed. + */ +UTIL_STATIC void* UTIL_realloc(void *ptr, size_t size) { - stat_t statbuf; + void *newptr = realloc(ptr, size); + if (newptr) return newptr; + free(ptr); + return NULL; +} + +#if defined(_MSC_VER) + #define chmod _chmod +#endif + +#ifndef ZSTD_HAVE_FCHMOD +#if PLATFORM_POSIX_VERSION >= 199309L +#define ZSTD_HAVE_FCHMOD +#endif +#endif + +#ifndef ZSTD_HAVE_FCHOWN +#if PLATFORM_POSIX_VERSION >= 200809L +#define ZSTD_HAVE_FCHOWN +#endif +#endif + +/*-**************************************** +* Console log +******************************************/ +int g_utilDisplayLevel; + +int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, + const char* acceptableLetters, int hasStdinInput) { + int ch, result; + + if (hasStdinInput) { + UTIL_DISPLAY("stdin is an input - not proceeding.\n"); + return 1; + } + + UTIL_DISPLAY("%s", prompt); + ch = getchar(); + result = 0; + if (strchr(acceptableLetters, ch) == NULL) { + UTIL_DISPLAY("%s \n", abortMsg); + result = 1; + } + /* flush the rest */ + while ((ch!=EOF) && (ch!='\n')) + ch = getchar(); + return result; +} + + +/*-************************************* +* Constants +***************************************/ +#define KB * (1 << 10) +#define LIST_SIZE_INCREASE (8 KB) +#define MAX_FILE_OF_FILE_NAMES_SIZE (1<<20)*50 + + +/*-************************************* +* Functions +***************************************/ + +void UTIL_traceFileStat(void) +{ + g_traceFileStat = 1; +} + +int UTIL_fstat(const int fd, const char* filename, stat_t* statbuf) +{ + int ret; + UTIL_TRACE_CALL("UTIL_stat(%d, %s)", fd, filename); #if defined(_MSC_VER) - int const stat_error = _stat64(filename, &statbuf); + if (fd >= 0) { + ret = !_fstat64(fd, statbuf); + } else { + ret = !_stat64(filename, statbuf); + } +#elif defined(__MINGW32__) && defined (__MSVCRT__) + if (fd >= 0) { + ret = !_fstati64(fd, statbuf); + } else { + ret = !_stati64(filename, statbuf); + } #else - int const stat_error = stat(filename, &statbuf); + if (fd >= 0) { + ret = !fstat(fd, statbuf); + } else { + ret = !stat(filename, statbuf); + } #endif - return !stat_error; + UTIL_TRACE_RET(ret); + return ret; +} + +int UTIL_stat(const char* filename, stat_t* statbuf) +{ + return UTIL_fstat(-1, filename, statbuf); +} + +int UTIL_isFdRegularFile(int fd) +{ + stat_t statbuf; + int ret; + UTIL_TRACE_CALL("UTIL_isFdRegularFile(%d)", fd); + ret = fd >= 0 && UTIL_fstat(fd, "", &statbuf) && UTIL_isRegularFileStat(&statbuf); + UTIL_TRACE_RET(ret); + return ret; } int UTIL_isRegularFile(const char* infilename) { stat_t statbuf; - return UTIL_getFileStat(infilename, &statbuf); /* Only need to know whether it is a regular file */ + int ret; + UTIL_TRACE_CALL("UTIL_isRegularFile(%s)", infilename); + ret = UTIL_stat(infilename, &statbuf) && UTIL_isRegularFileStat(&statbuf); + UTIL_TRACE_RET(ret); + return ret; } -int UTIL_getFileStat(const char* infilename, stat_t *statbuf) +int UTIL_isRegularFileStat(const stat_t* statbuf) { - int r; #if defined(_MSC_VER) - r = _stat64(infilename, statbuf); - if (r || !(statbuf->st_mode & S_IFREG)) return 0; /* No good... */ + return (statbuf->st_mode & S_IFREG) != 0; #else - r = stat(infilename, statbuf); - if (r || !S_ISREG(statbuf->st_mode)) return 0; /* No good... */ + return S_ISREG(statbuf->st_mode) != 0; #endif - return 1; } -int UTIL_setFileStat(const char *filename, stat_t *statbuf) +/* like chmod, but avoid changing permission of /dev/null */ +int UTIL_chmod(char const* filename, const stat_t* statbuf, mode_t permissions) +{ + return UTIL_fchmod(-1, filename, statbuf, permissions); +} + +int UTIL_fchmod(const int fd, char const* filename, const stat_t* statbuf, mode_t permissions) +{ + stat_t localStatBuf; + UTIL_TRACE_CALL("UTIL_chmod(%s, %#4o)", filename, (unsigned)permissions); + if (statbuf == NULL) { + if (!UTIL_fstat(fd, filename, &localStatBuf)) { + UTIL_TRACE_RET(0); + return 0; + } + statbuf = &localStatBuf; + } + if (!UTIL_isRegularFileStat(statbuf)) { + UTIL_TRACE_RET(0); + return 0; /* pretend success, but don't change anything */ + } +#ifdef ZSTD_HAVE_FCHMOD + if (fd >= 0) { + int ret; + UTIL_TRACE_CALL("fchmod"); + ret = fchmod(fd, permissions); + UTIL_TRACE_RET(ret); + UTIL_TRACE_RET(ret); + return ret; + } else +#endif + { + int ret; + UTIL_TRACE_CALL("chmod"); + ret = chmod(filename, permissions); + UTIL_TRACE_RET(ret); + UTIL_TRACE_RET(ret); + return ret; + } +} + +/* set access and modification times */ +int UTIL_utime(const char* filename, const stat_t *statbuf) +{ + int ret; + UTIL_TRACE_CALL("UTIL_utime(%s)", filename); + /* We check that st_mtime is a macro here in order to give us confidence + * that struct stat has a struct timespec st_mtim member. We need this + * check because there are some platforms that claim to be POSIX 2008 + * compliant but which do not have st_mtim... */ + /* FreeBSD has implemented POSIX 2008 for a long time but still only + * advertises support for POSIX 2001. They have a version macro that + * lets us safely gate them in. + * See https://docs.freebsd.org/en/books/porters-handbook/versions/. + */ +#if ZSTD_USE_UTIMENSAT + { + /* (atime, mtime) */ + struct timespec timebuf[2] = { {0, UTIME_NOW} }; + timebuf[1] = statbuf->st_mtim; + ret = utimensat(AT_FDCWD, filename, timebuf, 0); + } +#else + { + struct utimbuf timebuf; + timebuf.actime = time(NULL); + timebuf.modtime = statbuf->st_mtime; + ret = utime(filename, &timebuf); + } +#endif + errno = 0; + UTIL_TRACE_RET(ret); + return ret; +} + +int UTIL_setFileStat(const char *filename, const stat_t *statbuf) +{ + return UTIL_setFDStat(-1, filename, statbuf); +} + +int UTIL_setFDStat(const int fd, const char *filename, const stat_t *statbuf) { int res = 0; - struct utimbuf timebuf; + stat_t curStatBuf; + UTIL_TRACE_CALL("UTIL_setFileStat(%d, %s)", fd, filename); - if (!UTIL_isRegularFile(filename)) + if (!UTIL_fstat(fd, filename, &curStatBuf) || !UTIL_isRegularFileStat(&curStatBuf)) { + UTIL_TRACE_RET(-1); return -1; + } - timebuf.actime = time(NULL); - timebuf.modtime = statbuf->st_mtime; - res += utime(filename, &timebuf); /* set access and modification times */ + /* Mimic gzip's behavior: + * + * "Change the group first, then the permissions, then the owner. + * That way, the permissions will be correct on systems that allow + * users to give away files, without introducing a security hole. + * Security depends on permissions not containing the setuid or + * setgid bits." */ #if !defined(_WIN32) - res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */ +#ifdef ZSTD_HAVE_FCHOWN + if (fd >= 0) { + res += fchown(fd, -1, statbuf->st_gid); /* Apply group ownership */ + } else +#endif + { + res += chown(filename, -1, statbuf->st_gid); /* Apply group ownership */ + } #endif - res += chmod(filename, statbuf->st_mode & 07777); /* Copy file permissions */ + res += UTIL_fchmod(fd, filename, &curStatBuf, statbuf->st_mode & 0777); /* Copy file permissions */ + +#if !defined(_WIN32) +#ifdef ZSTD_HAVE_FCHOWN + if (fd >= 0) { + res += fchown(fd, statbuf->st_uid, -1); /* Apply user ownership */ + } else +#endif + { + res += chown(filename, statbuf->st_uid, -1); /* Apply user ownership */ + } +#endif errno = 0; + UTIL_TRACE_RET(-res); return -res; /* number of errors is returned */ } -U32 UTIL_isDirectory(const char* infilename) +int UTIL_isDirectory(const char* infilename) { - int r; stat_t statbuf; + int ret; + UTIL_TRACE_CALL("UTIL_isDirectory(%s)", infilename); + ret = UTIL_stat(infilename, &statbuf) && UTIL_isDirectoryStat(&statbuf); + UTIL_TRACE_RET(ret); + return ret; +} + +int UTIL_isDirectoryStat(const stat_t* statbuf) +{ + int ret; + UTIL_TRACE_CALL("UTIL_isDirectoryStat()"); #if defined(_MSC_VER) - r = _stat64(infilename, &statbuf); - if (!r && (statbuf.st_mode & _S_IFDIR)) return 1; + ret = (statbuf->st_mode & _S_IFDIR) != 0; #else - r = stat(infilename, &statbuf); - if (!r && S_ISDIR(statbuf.st_mode)) return 1; + ret = S_ISDIR(statbuf->st_mode) != 0; #endif - return 0; + UTIL_TRACE_RET(ret); + return ret; +} + +int UTIL_compareStr(const void *p1, const void *p2) { + return strcmp(* (char * const *) p1, * (char * const *) p2); } -int UTIL_isSameFile(const char* file1, const char* file2) +int UTIL_isSameFile(const char* fName1, const char* fName2) { -#if defined(_MSC_VER) + int ret; + assert(fName1 != NULL); assert(fName2 != NULL); + UTIL_TRACE_CALL("UTIL_isSameFile(%s, %s)", fName1, fName2); +#if defined(_MSC_VER) || defined(_WIN32) + /* note : Visual does not support file identification by inode. + * inode does not work on Windows, even with a posix layer, like msys2. + * The following work-around is limited to detecting exact name repetition only, + * aka `filename` is considered different from `subdir/../filename` */ + ret = !strcmp(fName1, fName2); +#else + { stat_t file1Stat; + stat_t file2Stat; + ret = UTIL_stat(fName1, &file1Stat) + && UTIL_stat(fName2, &file2Stat) + && UTIL_isSameFileStat(fName1, fName2, &file1Stat, &file2Stat); + } +#endif + UTIL_TRACE_RET(ret); + return ret; +} + +int UTIL_isSameFileStat( + const char* fName1, const char* fName2, + const stat_t* file1Stat, const stat_t* file2Stat) +{ + int ret; + assert(fName1 != NULL); assert(fName2 != NULL); + UTIL_TRACE_CALL("UTIL_isSameFileStat(%s, %s)", fName1, fName2); +#if defined(_MSC_VER) || defined(_WIN32) /* note : Visual does not support file identification by inode. + * inode does not work on Windows, even with a posix layer, like msys2. * The following work-around is limited to detecting exact name repetition only, * aka `filename` is considered different from `subdir/../filename` */ - return !strcmp(file1, file2); + (void)file1Stat; + (void)file2Stat; + ret = !strcmp(fName1, fName2); #else - stat_t file1Stat; - stat_t file2Stat; - return UTIL_getFileStat(file1, &file1Stat) - && UTIL_getFileStat(file2, &file2Stat) - && (file1Stat.st_dev == file2Stat.st_dev) - && (file1Stat.st_ino == file2Stat.st_ino); + { + ret = (file1Stat->st_dev == file2Stat->st_dev) + && (file1Stat->st_ino == file2Stat->st_ino); + } #endif + UTIL_TRACE_RET(ret); + return ret; } -U32 UTIL_isLink(const char* infilename) +/* UTIL_isFIFO : distinguish named pipes */ +int UTIL_isFIFO(const char* infilename) { + UTIL_TRACE_CALL("UTIL_isFIFO(%s)", infilename); /* macro guards, as defined in : https://linux.die.net/man/2/lstat */ #if PLATFORM_POSIX_VERSION >= 200112L - int r; - stat_t statbuf; - r = lstat(infilename, &statbuf); - if (!r && S_ISLNK(statbuf.st_mode)) return 1; + { + stat_t statbuf; + if (UTIL_stat(infilename, &statbuf) && UTIL_isFIFOStat(&statbuf)) { + UTIL_TRACE_RET(1); + return 1; + } + } +#endif + (void)infilename; + UTIL_TRACE_RET(0); + return 0; +} + +/* UTIL_isFIFO : distinguish named pipes */ +int UTIL_isFIFOStat(const stat_t* statbuf) +{ +/* macro guards, as defined in : https://linux.die.net/man/2/lstat */ +#if PLATFORM_POSIX_VERSION >= 200112L + if (S_ISFIFO(statbuf->st_mode)) return 1; +#endif + (void)statbuf; + return 0; +} + +/* process substitution */ +int UTIL_isFileDescriptorPipe(const char* filename) +{ + UTIL_TRACE_CALL("UTIL_isFileDescriptorPipe(%s)", filename); + /* Check if the filename is a /dev/fd/ path which indicates a file descriptor */ + if (filename[0] == '/' && strncmp(filename, "/dev/fd/", 8) == 0) { + UTIL_TRACE_RET(1); + return 1; + } + + /* Check for alternative process substitution formats on different systems */ + if (filename[0] == '/' && strncmp(filename, "/proc/self/fd/", 14) == 0) { + UTIL_TRACE_RET(1); + return 1; + } + + UTIL_TRACE_RET(0); + return 0; /* Not recognized as a file descriptor pipe */ +} + +/* UTIL_isBlockDevStat : distinguish named pipes */ +int UTIL_isBlockDevStat(const stat_t* statbuf) +{ +/* macro guards, as defined in : https://linux.die.net/man/2/lstat */ +#if PLATFORM_POSIX_VERSION >= 200112L + if (S_ISBLK(statbuf->st_mode)) return 1; +#endif + (void)statbuf; + return 0; +} + +int UTIL_isLink(const char* infilename) +{ + UTIL_TRACE_CALL("UTIL_isLink(%s)", infilename); +/* macro guards, as defined in : https://linux.die.net/man/2/lstat */ +#if PLATFORM_POSIX_VERSION >= 200112L + { + stat_t statbuf; + int const r = lstat(infilename, &statbuf); + if (!r && S_ISLNK(statbuf.st_mode)) { + UTIL_TRACE_RET(1); + return 1; + } + } #endif (void)infilename; + UTIL_TRACE_RET(0); return 0; } +static int g_fakeStdinIsConsole = 0; +static int g_fakeStderrIsConsole = 0; +static int g_fakeStdoutIsConsole = 0; + +int UTIL_isConsole(FILE* file) +{ + int ret; + UTIL_TRACE_CALL("UTIL_isConsole(%d)", fileno(file)); + if (file == stdin && g_fakeStdinIsConsole) + ret = 1; + else if (file == stderr && g_fakeStderrIsConsole) + ret = 1; + else if (file == stdout && g_fakeStdoutIsConsole) + ret = 1; + else + ret = IS_CONSOLE(file); + UTIL_TRACE_RET(ret); + return ret; +} + +void UTIL_fakeStdinIsConsole(void) +{ + g_fakeStdinIsConsole = 1; +} +void UTIL_fakeStdoutIsConsole(void) +{ + g_fakeStdoutIsConsole = 1; +} +void UTIL_fakeStderrIsConsole(void) +{ + g_fakeStderrIsConsole = 1; +} + U64 UTIL_getFileSize(const char* infilename) { - if (!UTIL_isRegularFile(infilename)) return UTIL_FILESIZE_UNKNOWN; - { int r; + stat_t statbuf; + UTIL_TRACE_CALL("UTIL_getFileSize(%s)", infilename); + if (!UTIL_stat(infilename, &statbuf)) { + UTIL_TRACE_RET(-1); + return UTIL_FILESIZE_UNKNOWN; + } + { + U64 const size = UTIL_getFileSizeStat(&statbuf); + UTIL_TRACE_RET((int)size); + return size; + } +} + +U64 UTIL_getFileSizeStat(const stat_t* statbuf) +{ + if (!UTIL_isRegularFileStat(statbuf)) return UTIL_FILESIZE_UNKNOWN; #if defined(_MSC_VER) - struct __stat64 statbuf; - r = _stat64(infilename, &statbuf); - if (r || !(statbuf.st_mode & S_IFREG)) return UTIL_FILESIZE_UNKNOWN; + if (!(statbuf->st_mode & S_IFREG)) return UTIL_FILESIZE_UNKNOWN; #elif defined(__MINGW32__) && defined (__MSVCRT__) - struct _stati64 statbuf; - r = _stati64(infilename, &statbuf); - if (r || !(statbuf.st_mode & S_IFREG)) return UTIL_FILESIZE_UNKNOWN; + if (!(statbuf->st_mode & S_IFREG)) return UTIL_FILESIZE_UNKNOWN; #else - struct stat statbuf; - r = stat(infilename, &statbuf); - if (r || !S_ISREG(statbuf.st_mode)) return UTIL_FILESIZE_UNKNOWN; + if (!S_ISREG(statbuf->st_mode)) return UTIL_FILESIZE_UNKNOWN; #endif - return (U64)statbuf.st_size; - } + return (U64)statbuf->st_size; } +UTIL_HumanReadableSize_t UTIL_makeHumanReadableSize(U64 size) +{ + UTIL_HumanReadableSize_t hrs; + + if (g_utilDisplayLevel > 3) { + /* In verbose mode, do not scale sizes down, except in the case of + * values that exceed the integral precision of a double. */ + if (size >= (1ull << 53)) { + hrs.value = (double)size / (1ull << 20); + hrs.suffix = " MiB"; + /* At worst, a double representation of a maximal size will be + * accurate to better than tens of kilobytes. */ + hrs.precision = 2; + } else { + hrs.value = (double)size; + hrs.suffix = " B"; + hrs.precision = 0; + } + } else { + /* In regular mode, scale sizes down and use suffixes. */ + if (size >= (1ull << 60)) { + hrs.value = (double)size / (1ull << 60); + hrs.suffix = " EiB"; + } else if (size >= (1ull << 50)) { + hrs.value = (double)size / (1ull << 50); + hrs.suffix = " PiB"; + } else if (size >= (1ull << 40)) { + hrs.value = (double)size / (1ull << 40); + hrs.suffix = " TiB"; + } else if (size >= (1ull << 30)) { + hrs.value = (double)size / (1ull << 30); + hrs.suffix = " GiB"; + } else if (size >= (1ull << 20)) { + hrs.value = (double)size / (1ull << 20); + hrs.suffix = " MiB"; + } else if (size >= (1ull << 10)) { + hrs.value = (double)size / (1ull << 10); + hrs.suffix = " KiB"; + } else { + hrs.value = (double)size; + hrs.suffix = " B"; + } + + if (hrs.value >= 100 || (U64)hrs.value == size) { + hrs.precision = 0; + } else if (hrs.value >= 10) { + hrs.precision = 1; + } else if (hrs.value > 1) { + hrs.precision = 2; + } else { + hrs.precision = 3; + } + } + + return hrs; +} -U64 UTIL_getTotalFileSize(const char* const * const fileNamesTable, unsigned nbFiles) +U64 UTIL_getTotalFileSize(const char* const * fileNamesTable, unsigned nbFiles) { U64 total = 0; - int error = 0; unsigned n; + UTIL_TRACE_CALL("UTIL_getTotalFileSize(%u)", nbFiles); for (n=0; n 0) { + totalRead += bytesRead; + + /* If buffer is nearly full, expand it */ + if (bufSize - totalRead < 1 KB) { + if (bufSize >= MAX_FILE_OF_FILE_NAMES_SIZE) { + /* Too large, abort */ + free(buf); + return NULL; + } + + { size_t newBufSize = bufSize * 2; + if (newBufSize > MAX_FILE_OF_FILE_NAMES_SIZE) + newBufSize = MAX_FILE_OF_FILE_NAMES_SIZE; + + { char* newBuf = (char*)realloc(buf, newBufSize); + if (newBuf == NULL) { + free(buf); + return NULL; + } + + buf = newBuf; + bufSize = newBufSize; + } } } + } + + /* Add null terminator to the end */ + buf[totalRead] = '\0'; + *totalReadPtr = totalRead; + + return buf; +} + +/* Process a buffer containing multiple lines and count the number of lines */ +static size_t UTIL_processLines(char* buffer, size_t bufferSize) +{ + size_t lineCount = 0; + size_t i = 0; + + /* Convert newlines to null terminators and count lines */ + while (i < bufferSize) { + if (buffer[i] == '\n') { + buffer[i] = '\0'; /* Replace newlines with null terminators */ + lineCount++; + } + i++; + } + + /* Count the last line if it doesn't end with a newline */ + if (bufferSize > 0 && (i == 0 || buffer[i-1] != '\0')) { + lineCount++; + } + + return lineCount; +} + +/* Create an array of pointers to the lines in a buffer */ +static const char** UTIL_createLinePointers(char* buffer, size_t numLines, size_t bufferSize) +{ + size_t lineIndex = 0; + size_t pos = 0; + void* const bufferPtrs = malloc(numLines * sizeof(const char**)); + const char** const linePointers = (const char**)bufferPtrs; + if (bufferPtrs == NULL) return NULL; + + while (lineIndex < numLines && pos < bufferSize) { + size_t len = 0; + linePointers[lineIndex++] = buffer+pos; + + /* Find the next null terminator, being careful not to go past the buffer */ + while ((pos + len < bufferSize) && buffer[pos + len] != '\0') { + len++; + } + + /* Move past this string and its null terminator */ + pos += len; + if (pos < bufferSize) pos++; /* Skip the null terminator if we're not at buffer end */ + } + + /* Verify we processed the expected number of lines */ + if (lineIndex != numLines) { + /* Something went wrong - we didn't find as many lines as expected */ + free(bufferPtrs); + return NULL; + } + + return linePointers; +} + +FileNamesTable* +UTIL_createFileNamesTable_fromFileList(const char* fileList) +{ + stat_t statbuf; + char* buffer = NULL; + size_t numLines = 0; + size_t bufferSize = 0; + + /* Check if the input is a valid file */ + if (!UTIL_stat(fileList, &statbuf)) { + return NULL; + } + + /* Check if the input is a supported type */ + if (!UTIL_isRegularFileStat(&statbuf) && + !UTIL_isFIFOStat(&statbuf) && + !UTIL_isFileDescriptorPipe(fileList)) { + return NULL; + } + + /* Open the input file */ + { FILE* const inFile = fopen(fileList, "rb"); + if (inFile == NULL) return NULL; + + /* Read the file content */ + buffer = UTIL_readFileContent(inFile, &bufferSize); + fclose(inFile); + } + + if (buffer == NULL) return NULL; + + /* Process lines */ + numLines = UTIL_processLines(buffer, bufferSize); + if (numLines == 0) { + free(buffer); + return NULL; + } + + /* Create line pointers */ + { const char** linePointers = UTIL_createLinePointers(buffer, numLines, bufferSize); + if (linePointers == NULL) { + free(buffer); + return NULL; + } + + /* Create the final table */ + return UTIL_assembleFileNamesTable(linePointers, numLines, buffer); + } +} + + +static FileNamesTable* +UTIL_assembleFileNamesTable2(const char** filenames, size_t tableSize, size_t tableCapacity, char* buf) +{ + FileNamesTable* const table = (FileNamesTable*) malloc(sizeof(*table)); + CONTROL(table != NULL); + table->fileNames = filenames; + table->buf = buf; + table->tableSize = tableSize; + table->tableCapacity = tableCapacity; + return table; +} + +FileNamesTable* +UTIL_assembleFileNamesTable(const char** filenames, size_t tableSize, char* buf) +{ + return UTIL_assembleFileNamesTable2(filenames, tableSize, tableSize, buf); +} + +void UTIL_freeFileNamesTable(FileNamesTable* table) +{ + if (table==NULL) return; + free((void*)table->fileNames); + free(table->buf); + free(table); +} + +FileNamesTable* UTIL_allocateFileNamesTable(size_t tableSize) +{ + const char** const fnTable = (const char**)malloc(tableSize * sizeof(*fnTable)); + FileNamesTable* fnt; + if (fnTable==NULL) return NULL; + fnt = UTIL_assembleFileNamesTable(fnTable, tableSize, NULL); + fnt->tableSize = 0; /* the table is empty */ + return fnt; +} + +int UTIL_searchFileNamesTable(FileNamesTable* table, char const* name) { + size_t i; + for(i=0 ;i < table->tableSize; i++) { + if(!strcmp(table->fileNames[i], name)) { + return (int)i; + } + } + return -1; +} + +void UTIL_refFilename(FileNamesTable* fnt, const char* filename) +{ + assert(fnt->tableSize < fnt->tableCapacity); + fnt->fileNames[fnt->tableSize] = filename; + fnt->tableSize++; +} + +static size_t getTotalTableSize(FileNamesTable* table) +{ + size_t fnb, totalSize = 0; + for(fnb = 0 ; fnb < table->tableSize && table->fileNames[fnb] ; ++fnb) { + totalSize += strlen(table->fileNames[fnb]) + 1; /* +1 to add '\0' at the end of each fileName */ + } + return totalSize; +} + +FileNamesTable* +UTIL_mergeFileNamesTable(FileNamesTable* table1, FileNamesTable* table2) +{ + unsigned newTableIdx = 0; + size_t pos = 0; + size_t newTotalTableSize; + char* buf; + + FileNamesTable* const newTable = UTIL_assembleFileNamesTable(NULL, 0, NULL); + CONTROL( newTable != NULL ); + + newTotalTableSize = getTotalTableSize(table1) + getTotalTableSize(table2); + + buf = (char*) calloc(newTotalTableSize, sizeof(*buf)); + CONTROL ( buf != NULL ); + + newTable->buf = buf; + newTable->tableSize = table1->tableSize + table2->tableSize; + newTable->fileNames = (const char **) calloc(newTable->tableSize, sizeof(*(newTable->fileNames))); + CONTROL ( newTable->fileNames != NULL ); + + { unsigned idx1; + for( idx1=0 ; (idx1 < table1->tableSize) && table1->fileNames[idx1] && (pos < newTotalTableSize); ++idx1, ++newTableIdx) { + size_t const curLen = strlen(table1->fileNames[idx1]); + memcpy(buf+pos, table1->fileNames[idx1], curLen); + assert(newTableIdx <= newTable->tableSize); + newTable->fileNames[newTableIdx] = buf+pos; + pos += curLen+1; + } } + + { unsigned idx2; + for( idx2=0 ; (idx2 < table2->tableSize) && table2->fileNames[idx2] && (pos < newTotalTableSize) ; ++idx2, ++newTableIdx) { + size_t const curLen = strlen(table2->fileNames[idx2]); + memcpy(buf+pos, table2->fileNames[idx2], curLen); + assert(newTableIdx < newTable->tableSize); + newTable->fileNames[newTableIdx] = buf+pos; + pos += curLen+1; + } } + assert(pos <= newTotalTableSize); + newTable->tableSize = newTableIdx; + + UTIL_freeFileNamesTable(table1); + UTIL_freeFileNamesTable(table2); + + return newTable; } #ifdef _WIN32 -int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd, int followLinks) +static int UTIL_prepareFileList(const char* dirName, + char** bufStart, size_t* pos, + char** bufEnd, int followLinks) { char* path; - int dirLength, fnameLength, pathLength, nbFiles = 0; + size_t dirLength, pathLength; + int nbFiles = 0; WIN32_FIND_DATAA cFile; HANDLE hFile; - dirLength = (int)strlen(dirName); + dirLength = strlen(dirName); path = (char*) malloc(dirLength + 3); if (!path) return 0; @@ -172,12 +928,13 @@ int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char hFile=FindFirstFileA(path, &cFile); if (hFile == INVALID_HANDLE_VALUE) { UTIL_DISPLAYLEVEL(1, "Cannot open directory '%s'\n", dirName); + free(path); return 0; } free(path); do { - fnameLength = (int)strlen(cFile.cFileName); + size_t const fnameLength = strlen(cFile.cFileName); path = (char*) malloc(dirLength + fnameLength + 2); if (!path) { FindClose(hFile); return 0; } memcpy(path, dirName, dirLength); @@ -205,8 +962,7 @@ int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char memcpy(*bufStart + *pos, path, pathLength+1 /* include final \0 */); *pos += pathLength + 1; nbFiles++; - } - } + } } free(path); } while (FindNextFileA(hFile, &cFile)); @@ -216,24 +972,28 @@ int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char #elif defined(__linux__) || (PLATFORM_POSIX_VERSION >= 200112L) /* opendir, readdir require POSIX.1-2001 */ -int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd, int followLinks) +static int UTIL_prepareFileList(const char *dirName, + char** bufStart, size_t* pos, + char** bufEnd, int followLinks) { - DIR *dir; - struct dirent *entry; - char* path; - int dirLength, fnameLength, pathLength, nbFiles = 0; + DIR* dir; + struct dirent * entry; + size_t dirLength; + int nbFiles = 0; if (!(dir = opendir(dirName))) { UTIL_DISPLAYLEVEL(1, "Cannot open directory '%s': %s\n", dirName, strerror(errno)); return 0; } - dirLength = (int)strlen(dirName); + dirLength = strlen(dirName); errno = 0; while ((entry = readdir(dir)) != NULL) { + char* path; + size_t fnameLength, pathLength; if (strcmp (entry->d_name, "..") == 0 || strcmp (entry->d_name, ".") == 0) continue; - fnameLength = (int)strlen(entry->d_name); + fnameLength = strlen(entry->d_name); path = (char*) malloc(dirLength + fnameLength + 2); if (!path) { closedir(dir); return 0; } memcpy(path, dirName, dirLength); @@ -245,6 +1005,7 @@ int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char if (!followLinks && UTIL_isLink(path)) { UTIL_DISPLAYLEVEL(2, "Warning : %s is a symbolic link, ignoring\n", path); + free(path); continue; } @@ -254,22 +1015,25 @@ int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char } else { if (*bufStart + *pos + pathLength >= *bufEnd) { ptrdiff_t newListSize = (*bufEnd - *bufStart) + LIST_SIZE_INCREASE; - *bufStart = (char*)UTIL_realloc(*bufStart, newListSize); - *bufEnd = *bufStart + newListSize; - if (*bufStart == NULL) { free(path); closedir(dir); return 0; } + assert(newListSize >= 0); + *bufStart = (char*)UTIL_realloc(*bufStart, (size_t)newListSize); + if (*bufStart != NULL) { + *bufEnd = *bufStart + newListSize; + } else { + free(path); closedir(dir); return 0; + } } if (*bufStart + *pos + pathLength < *bufEnd) { memcpy(*bufStart + *pos, path, pathLength + 1); /* with final \0 */ *pos += pathLength + 1; nbFiles++; - } - } + } } free(path); errno = 0; /* clear errno after UTIL_isDirectory, UTIL_prepareFileList */ } if (errno != 0) { - UTIL_DISPLAYLEVEL(1, "readdir(%s) error: %s\n", dirName, strerror(errno)); + UTIL_DISPLAYLEVEL(1, "readdir(%s) error: %s \n", dirName, strerror(errno)); free(*bufStart); *bufStart = NULL; } @@ -279,81 +1043,407 @@ int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char #else -int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd, int followLinks) +static int UTIL_prepareFileList(const char *dirName, + char** bufStart, size_t* pos, + char** bufEnd, int followLinks) { (void)bufStart; (void)bufEnd; (void)pos; (void)followLinks; - UTIL_DISPLAYLEVEL(1, "Directory %s ignored (compiled without _WIN32 or _POSIX_C_SOURCE)\n", dirName); + UTIL_DISPLAYLEVEL(1, "Directory %s ignored (compiled without _WIN32 or _POSIX_C_SOURCE) \n", dirName); return 0; } #endif /* #ifdef _WIN32 */ -/* - * UTIL_createFileList - takes a list of files and directories (params: inputNames, inputNamesNb), scans directories, - * and returns a new list of files (params: return value, allocatedBuffer, allocatedNamesNb). - * After finishing usage of the list the structures should be freed with UTIL_freeFileList(params: return value, allocatedBuffer) - * In case of error UTIL_createFileList returns NULL and UTIL_freeFileList should not be called. - */ -const char** -UTIL_createFileList(const char **inputNames, unsigned inputNamesNb, - char** allocatedBuffer, unsigned* allocatedNamesNb, - int followLinks) +int UTIL_isCompressedFile(const char *inputName, const char *extensionList[]) { - size_t pos; - unsigned i, nbFiles; - char* buf = (char*)malloc(LIST_SIZE_INCREASE); - char* bufend = buf + LIST_SIZE_INCREASE; - const char** fileTable; + const char* ext = UTIL_getFileExtension(inputName); + while(*extensionList!=NULL) + { + const int isCompressedExtension = strcmp(ext,*extensionList); + if(isCompressedExtension==0) + return 1; + ++extensionList; + } + return 0; +} - if (!buf) return NULL; +/*Utility function to get file extension from file */ +const char* UTIL_getFileExtension(const char* infilename) +{ + const char* extension = strrchr(infilename, '.'); + if(!extension || extension==infilename) return ""; + return extension; +} - for (i=0, pos=0, nbFiles=0; i= bufend) { - ptrdiff_t newListSize = (bufend - buf) + LIST_SIZE_INCREASE; - buf = (char*)UTIL_realloc(buf, newListSize); - bufend = buf + newListSize; - if (!buf) return NULL; - } - if (buf + pos + len < bufend) { - memcpy(buf+pos, inputNames[i], len+1); /* with final \0 */ - pos += len + 1; - nbFiles++; - } - } else { - nbFiles += UTIL_prepareFileList(inputNames[i], &buf, &pos, &bufend, followLinks); - if (buf == NULL) return NULL; - } } +static int pathnameHas2Dots(const char *pathname) +{ + /* We need to figure out whether any ".." present in the path is a whole + * path token, which is the case if it is bordered on both sides by either + * the beginning/end of the path or by a directory separator. + */ + const char *needle = pathname; + while (1) { + needle = strstr(needle, ".."); + + if (needle == NULL) { + return 0; + } - if (nbFiles == 0) { free(buf); return NULL; } + if ((needle == pathname || needle[-1] == PATH_SEP) + && (needle[2] == '\0' || needle[2] == PATH_SEP)) { + return 1; + } + + /* increment so we search for the next match */ + needle++; + }; + return 0; +} - fileTable = (const char**)malloc((nbFiles+1) * sizeof(const char*)); - if (!fileTable) { free(buf); return NULL; } +static int isFileNameValidForMirroredOutput(const char *filename) +{ + return !pathnameHas2Dots(filename); +} - for (i=0, pos=0; i bufend) { free(buf); free((void*)fileTable); return NULL; } +static int makeDir(const char *dir, mode_t mode) +{ +#if defined(_MSC_VER) || defined(__MINGW32__) || defined (__MSVCRT__) + int ret = _mkdir(dir); + (void) mode; +#else + int ret = mkdir(dir, mode); +#endif + if (ret != 0) { + if (errno == EEXIST) + return 0; + UTIL_DISPLAY("zstd: failed to create DIR %s: %s\n", dir, strerror(errno)); + } + return ret; +} - *allocatedBuffer = buf; - *allocatedNamesNb = nbFiles; +/* this function requires a mutable input string */ +static void convertPathnameToDirName(char *pathname) +{ + size_t len = 0; + char* pos = NULL; + /* get dir name from pathname similar to 'dirname()' */ + assert(pathname != NULL); + + /* remove trailing '/' chars */ + len = strlen(pathname); + assert(len > 0); + while (pathname[len] == PATH_SEP) { + pathname[len] = '\0'; + len--; + } + if (len == 0) return; + + /* if input is a single file, return '.' instead. i.e. + * "xyz/abc/file.txt" => "xyz/abc" + "./file.txt" => "." + "file.txt" => "." + */ + pos = strrchr(pathname, PATH_SEP); + if (pos == NULL) { + pathname[0] = '.'; + pathname[1] = '\0'; + } else { + *pos = '\0'; + } +} - return fileTable; +/* pathname must be valid */ +static const char* trimLeadingRootChar(const char *pathname) +{ + assert(pathname != NULL); + if (pathname[0] == PATH_SEP) + return pathname + 1; + return pathname; } +/* pathname must be valid */ +static const char* trimLeadingCurrentDirConst(const char *pathname) +{ + assert(pathname != NULL); + if ((pathname[0] == '.') && (pathname[1] == PATH_SEP)) + return pathname + 2; + return pathname; +} -/*-**************************************** -* Console log -******************************************/ -int g_utilDisplayLevel; +static char* +trimLeadingCurrentDir(char *pathname) +{ + /* 'union charunion' can do const-cast without compiler warning */ + union charunion { + char *chr; + const char* cchr; + } ptr; + ptr.cchr = trimLeadingCurrentDirConst(pathname); + return ptr.chr; +} + +/* remove leading './' or '/' chars here */ +static const char * trimPath(const char *pathname) +{ + return trimLeadingRootChar( + trimLeadingCurrentDirConst(pathname)); +} +static char* mallocAndJoin2Dir(const char *dir1, const char *dir2) +{ + assert(dir1 != NULL && dir2 != NULL); + { const size_t dir1Size = strlen(dir1); + const size_t dir2Size = strlen(dir2); + char *outDirBuffer, *buffer; + + outDirBuffer = (char *) malloc(dir1Size + dir2Size + 2); + CONTROL(outDirBuffer != NULL); + + memcpy(outDirBuffer, dir1, dir1Size); + outDirBuffer[dir1Size] = '\0'; + + buffer = outDirBuffer + dir1Size; + if (dir1Size > 0 && *(buffer - 1) != PATH_SEP) { + *buffer = PATH_SEP; + buffer++; + } + memcpy(buffer, dir2, dir2Size); + buffer[dir2Size] = '\0'; + + return outDirBuffer; + } +} + +/* this function will return NULL if input srcFileName is not valid name for mirrored output path */ +char* UTIL_createMirroredDestDirName(const char* srcFileName, const char* outDirRootName) +{ + char* pathname = NULL; + if (!isFileNameValidForMirroredOutput(srcFileName)) + return NULL; + + pathname = mallocAndJoin2Dir(outDirRootName, trimPath(srcFileName)); + + convertPathnameToDirName(pathname); + return pathname; +} + +static int +mirrorSrcDir(char* srcDirName, const char* outDirName) +{ + mode_t srcMode; + int status = 0; + char* newDir = mallocAndJoin2Dir(outDirName, trimPath(srcDirName)); + if (!newDir) + return -ENOMEM; + + srcMode = getDirMode(srcDirName); + status = makeDir(newDir, srcMode); + free(newDir); + return status; +} + +static int +mirrorSrcDirRecursive(char* srcDirName, const char* outDirName) +{ + int status = 0; + char* pp = trimLeadingCurrentDir(srcDirName); + char* sp = NULL; + + while ((sp = strchr(pp, PATH_SEP)) != NULL) { + if (sp != pp) { + *sp = '\0'; + status = mirrorSrcDir(srcDirName, outDirName); + if (status != 0) + return status; + *sp = PATH_SEP; + } + pp = sp + 1; + } + status = mirrorSrcDir(srcDirName, outDirName); + return status; +} + +static void +makeMirroredDestDirsWithSameSrcDirMode(char** srcDirNames, unsigned nbFile, const char* outDirName) +{ + unsigned int i = 0; + for (i = 0; i < nbFile; i++) + mirrorSrcDirRecursive(srcDirNames[i], outDirName); +} + +static int +firstIsParentOrSameDirOfSecond(const char* firstDir, const char* secondDir) +{ + size_t firstDirLen = strlen(firstDir), + secondDirLen = strlen(secondDir); + return firstDirLen <= secondDirLen && + (secondDir[firstDirLen] == PATH_SEP || secondDir[firstDirLen] == '\0') && + 0 == strncmp(firstDir, secondDir, firstDirLen); +} + +static int compareDir(const void* pathname1, const void* pathname2) { + /* sort it after remove the leading '/' or './'*/ + const char* s1 = trimPath(*(char * const *) pathname1); + const char* s2 = trimPath(*(char * const *) pathname2); + return strcmp(s1, s2); +} + +static void +makeUniqueMirroredDestDirs(char** srcDirNames, unsigned nbFile, const char* outDirName) +{ + unsigned int i = 0, uniqueDirNr = 0; + char** uniqueDirNames = NULL; + + if (nbFile == 0) + return; + + uniqueDirNames = (char** ) malloc(nbFile * sizeof (char *)); + CONTROL(uniqueDirNames != NULL); + + /* if dirs is "a/b/c" and "a/b/c/d", we only need call: + * we just need "a/b/c/d" */ + qsort((void *)srcDirNames, nbFile, sizeof(char*), compareDir); + + uniqueDirNr = 1; + uniqueDirNames[uniqueDirNr - 1] = srcDirNames[0]; + for (i = 1; i < nbFile; i++) { + char* prevDirName = srcDirNames[i - 1]; + char* currDirName = srcDirNames[i]; + + /* note: we always compare trimmed path, i.e.: + * src dir of "./foo" and "/foo" will be both saved into: + * "outDirName/foo/" */ + if (!firstIsParentOrSameDirOfSecond(trimPath(prevDirName), + trimPath(currDirName))) + uniqueDirNr++; + + /* we need to maintain original src dir name instead of trimmed + * dir, so we can retrieve the original src dir's mode_t */ + uniqueDirNames[uniqueDirNr - 1] = currDirName; + } + + makeMirroredDestDirsWithSameSrcDirMode(uniqueDirNames, uniqueDirNr, outDirName); + + free(uniqueDirNames); +} + +static void +makeMirroredDestDirs(char** srcFileNames, unsigned nbFile, const char* outDirName) +{ + unsigned int i = 0; + for (i = 0; i < nbFile; ++i) + convertPathnameToDirName(srcFileNames[i]); + makeUniqueMirroredDestDirs(srcFileNames, nbFile, outDirName); +} + +void UTIL_mirrorSourceFilesDirectories(const char** inFileNames, unsigned int nbFile, const char* outDirName) +{ + unsigned int i = 0, validFilenamesNr = 0; + char** srcFileNames = (char **) malloc(nbFile * sizeof (char *)); + CONTROL(srcFileNames != NULL); + + /* check input filenames is valid */ + for (i = 0; i < nbFile; ++i) { + if (isFileNameValidForMirroredOutput(inFileNames[i])) { + char* fname = STRDUP(inFileNames[i]); + CONTROL(fname != NULL); + srcFileNames[validFilenamesNr++] = fname; + } + } + + if (validFilenamesNr > 0) { + makeDir(outDirName, DIR_DEFAULT_MODE); + makeMirroredDestDirs(srcFileNames, validFilenamesNr, outDirName); + } + + for (i = 0; i < validFilenamesNr; i++) + free(srcFileNames[i]); + free(srcFileNames); +} + +FileNamesTable* +UTIL_createExpandedFNT(const char* const* inputNames, size_t nbIfns, int followLinks) +{ + unsigned nbFiles; + char* buf = (char*)malloc(LIST_SIZE_INCREASE); + char* bufend = buf + LIST_SIZE_INCREASE; + + if (!buf) return NULL; + + { size_t ifnNb, pos; + for (ifnNb=0, pos=0, nbFiles=0; ifnNb= bufend) { + ptrdiff_t newListSize = (bufend - buf) + LIST_SIZE_INCREASE; + assert(newListSize >= 0); + buf = (char*)UTIL_realloc(buf, (size_t)newListSize); + if (!buf) return NULL; + bufend = buf + newListSize; + } + if (buf + pos + len < bufend) { + memcpy(buf+pos, inputNames[ifnNb], len+1); /* including final \0 */ + pos += len + 1; + nbFiles++; + } + } else { + nbFiles += (unsigned)UTIL_prepareFileList(inputNames[ifnNb], &buf, &pos, &bufend, followLinks); + if (buf == NULL) return NULL; + } } } + + /* note : even if nbFiles==0, function returns a valid, though empty, FileNamesTable* object */ + + { size_t ifnNb, pos; + size_t const fntCapacity = nbFiles + 1; /* minimum 1, allows adding one reference, typically stdin */ + const char** const fileNamesTable = (const char**)malloc(fntCapacity * sizeof(*fileNamesTable)); + if (!fileNamesTable) { free(buf); return NULL; } + + for (ifnNb = 0, pos = 0; ifnNb < nbFiles; ifnNb++) { + fileNamesTable[ifnNb] = buf + pos; + if (buf + pos > bufend) { free(buf); free((void*)fileNamesTable); return NULL; } + pos += strlen(fileNamesTable[ifnNb]) + 1; + } + return UTIL_assembleFileNamesTable2(fileNamesTable, nbFiles, fntCapacity, buf); + } +} + + +void UTIL_expandFNT(FileNamesTable** fnt, int followLinks) +{ + FileNamesTable* const newFNT = UTIL_createExpandedFNT((*fnt)->fileNames, (*fnt)->tableSize, followLinks); + CONTROL(newFNT != NULL); + UTIL_freeFileNamesTable(*fnt); + *fnt = newFNT; +} + +FileNamesTable* UTIL_createFNT_fromROTable(const char** filenames, size_t nbFilenames) +{ + size_t const sizeof_FNTable = nbFilenames * sizeof(*filenames); + const char** const newFNTable = (const char**)malloc(sizeof_FNTable); + if (newFNTable==NULL) return NULL; + memcpy((void*)newFNTable, filenames, sizeof_FNTable); /* void* : mitigate a Visual compiler bug or limitation */ + return UTIL_assembleFileNamesTable(newFNTable, nbFilenames, NULL); +} /*-**************************************** -* count the number of physical cores +* count the number of cores ******************************************/ #if defined(_WIN32) || defined(WIN32) @@ -362,10 +1452,26 @@ int g_utilDisplayLevel; typedef BOOL(WINAPI* LPFN_GLPI)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD); -int UTIL_countPhysicalCores(void) +DWORD CountSetBits(ULONG_PTR bitMask) { - static int numPhysicalCores = 0; - if (numPhysicalCores != 0) return numPhysicalCores; + DWORD LSHIFT = sizeof(ULONG_PTR)*8 - 1; + DWORD bitSetCount = 0; + ULONG_PTR bitTest = (ULONG_PTR)1 << LSHIFT; + DWORD i; + + for (i = 0; i <= LSHIFT; ++i) + { + bitSetCount += ((bitMask & bitTest)?1:0); + bitTest/=2; + } + + return bitSetCount; +} + +int UTIL_countCores(int logical) +{ + static int numCores = 0; + if (numCores != 0) return numCores; { LPFN_GLPI glpi; BOOL done = FALSE; @@ -374,8 +1480,13 @@ int UTIL_countPhysicalCores(void) DWORD returnLength = 0; size_t byteOffset = 0; - glpi = (LPFN_GLPI)GetProcAddress(GetModuleHandle(TEXT("kernel32")), - "GetLogicalProcessorInformation"); +#if defined(_MSC_VER) +/* Visual Studio does not like the following cast */ +# pragma warning( disable : 4054 ) /* conversion from function ptr to data ptr */ +# pragma warning( disable : 4055 ) /* conversion from data ptr to function ptr */ +#endif + glpi = (LPFN_GLPI)(void*)GetProcAddress(GetModuleHandle(TEXT("kernel32")), + "GetLogicalProcessorInformation"); if (glpi == NULL) { goto failed; @@ -399,15 +1510,17 @@ int UTIL_countPhysicalCores(void) } } else { done = TRUE; - } - } + } } ptr = buffer; while (byteOffset + sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) <= returnLength) { if (ptr->Relationship == RelationProcessorCore) { - numPhysicalCores++; + if (logical) + numCores += CountSetBits(ptr->ProcessorMask); + else + numCores++; } ptr++; @@ -416,17 +1529,17 @@ int UTIL_countPhysicalCores(void) free(buffer); - return numPhysicalCores; + return numCores; } failed: /* try to fall back on GetSystemInfo */ { SYSTEM_INFO sysinfo; GetSystemInfo(&sysinfo); - numPhysicalCores = sysinfo.dwNumberOfProcessors; - if (numPhysicalCores == 0) numPhysicalCores = 1; /* just in case */ + numCores = sysinfo.dwNumberOfProcessors; + if (numCores == 0) numCores = 1; /* just in case */ } - return numPhysicalCores; + return numCores; } #elif defined(__APPLE__) @@ -435,24 +1548,24 @@ int UTIL_countPhysicalCores(void) /* Use apple-provided syscall * see: man 3 sysctl */ -int UTIL_countPhysicalCores(void) +int UTIL_countCores(int logical) { - static S32 numPhysicalCores = 0; /* apple specifies int32_t */ - if (numPhysicalCores != 0) return numPhysicalCores; + static S32 numCores = 0; /* apple specifies int32_t */ + if (numCores != 0) return numCores; { size_t size = sizeof(S32); - int const ret = sysctlbyname("hw.physicalcpu", &numPhysicalCores, &size, NULL, 0); + int const ret = sysctlbyname(logical ? "hw.logicalcpu" : "hw.physicalcpu", &numCores, &size, NULL, 0); if (ret != 0) { if (errno == ENOENT) { /* entry not present, fall back on 1 */ - numPhysicalCores = 1; + numCores = 1; } else { - perror("zstd: can't get number of physical cpus"); + perror("zstd: can't get number of cpus"); exit(1); } } - return numPhysicalCores; + return numCores; } } @@ -461,16 +1574,16 @@ int UTIL_countPhysicalCores(void) /* parse /proc/cpuinfo * siblings / cpu cores should give hyperthreading ratio * otherwise fall back on sysconf */ -int UTIL_countPhysicalCores(void) +int UTIL_countCores(int logical) { - static int numPhysicalCores = 0; + static int numCores = 0; - if (numPhysicalCores != 0) return numPhysicalCores; + if (numCores != 0) return numCores; - numPhysicalCores = (int)sysconf(_SC_NPROCESSORS_ONLN); - if (numPhysicalCores == -1) { + numCores = (int)sysconf(_SC_NPROCESSORS_ONLN); + if (numCores == -1) { /* value not queryable, fall back on 1 */ - return numPhysicalCores = 1; + return numCores = 1; } /* try to determine if there's hyperthreading */ @@ -484,7 +1597,7 @@ int UTIL_countPhysicalCores(void) if (cpuinfo == NULL) { /* fall back on the sysconf value */ - return numPhysicalCores; + return numCores; } /* assume the cpu cores/siblings values will be constant across all @@ -493,7 +1606,7 @@ int UTIL_countPhysicalCores(void) if (fgets(buff, BUF_SIZE, cpuinfo) != NULL) { if (strncmp(buff, "siblings", 8) == 0) { const char* const sep = strchr(buff, ':'); - if (*sep == '\0') { + if (sep == NULL || *sep == '\0') { /* formatting was broken? */ goto failed; } @@ -502,7 +1615,7 @@ int UTIL_countPhysicalCores(void) } if (strncmp(buff, "cpu cores", 9) == 0) { const char* const sep = strchr(buff, ':'); - if (*sep == '\0') { + if (sep == NULL || *sep == '\0') { /* formatting was broken? */ goto failed; } @@ -512,77 +1625,107 @@ int UTIL_countPhysicalCores(void) } else if (ferror(cpuinfo)) { /* fall back on the sysconf value */ goto failed; - } - } - if (siblings && cpu_cores) { + } } + if (siblings && cpu_cores && siblings > cpu_cores) { ratio = siblings / cpu_cores; } + + if (ratio && numCores > ratio && !logical) { + numCores = numCores / ratio; + } + failed: fclose(cpuinfo); - return numPhysicalCores = numPhysicalCores / ratio; + return numCores; } } #elif defined(__FreeBSD__) -#include #include /* Use physical core sysctl when available * see: man 4 smp, man 3 sysctl */ -int UTIL_countPhysicalCores(void) +int UTIL_countCores(int logical) { - static int numPhysicalCores = 0; /* freebsd sysctl is native int sized */ - if (numPhysicalCores != 0) return numPhysicalCores; + static int numCores = 0; /* freebsd sysctl is native int sized */ +#if __FreeBSD_version >= 1300008 + static int perCore = 1; +#endif + if (numCores != 0) return numCores; #if __FreeBSD_version >= 1300008 - { size_t size = sizeof(numPhysicalCores); - int ret = sysctlbyname("kern.smp.cores", &numPhysicalCores, &size, NULL, 0); - if (ret == 0) return numPhysicalCores; + { size_t size = sizeof(numCores); + int ret = sysctlbyname("kern.smp.cores", &numCores, &size, NULL, 0); + if (ret == 0) { + if (logical) { + ret = sysctlbyname("kern.smp.threads_per_core", &perCore, &size, NULL, 0); + /* default to physical cores if logical cannot be read */ + if (ret == 0) + numCores *= perCore; + } + + return numCores; + } if (errno != ENOENT) { - perror("zstd: can't get number of physical cpus"); + perror("zstd: can't get number of cpus"); exit(1); } /* sysctl not present, fall through to older sysconf method */ } +#else + /* suppress unused parameter warning */ + (void) logical; #endif - numPhysicalCores = (int)sysconf(_SC_NPROCESSORS_ONLN); - if (numPhysicalCores == -1) { + numCores = (int)sysconf(_SC_NPROCESSORS_ONLN); + if (numCores == -1) { /* value not queryable, fall back on 1 */ - numPhysicalCores = 1; + numCores = 1; } - return numPhysicalCores; + return numCores; } -#elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) +#elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__CYGWIN__) /* Use POSIX sysconf * see: man 3 sysconf */ -int UTIL_countPhysicalCores(void) +int UTIL_countCores(int logical) { - static int numPhysicalCores = 0; + static int numCores = 0; - if (numPhysicalCores != 0) return numPhysicalCores; + /* suppress unused parameter warning */ + (void)logical; - numPhysicalCores = (int)sysconf(_SC_NPROCESSORS_ONLN); - if (numPhysicalCores == -1) { + if (numCores != 0) return numCores; + + numCores = (int)sysconf(_SC_NPROCESSORS_ONLN); + if (numCores == -1) { /* value not queryable, fall back on 1 */ - return numPhysicalCores = 1; + return numCores = 1; } - return numPhysicalCores; + return numCores; } #else -int UTIL_countPhysicalCores(void) +int UTIL_countCores(int logical) { + /* suppress unused parameter warning */ + (void)logical; + /* assume 1 */ return 1; } #endif -#if defined (__cplusplus) +int UTIL_countPhysicalCores(void) +{ + return UTIL_countCores(0); +} + +int UTIL_countLogicalCores(void) +{ + return UTIL_countCores(1); } -#endif diff --git a/programs/util.h b/programs/util.h index d6e5bb550ec..65e12633a67 100644 --- a/programs/util.h +++ b/programs/util.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,45 +11,35 @@ #ifndef UTIL_H_MODULE #define UTIL_H_MODULE -#if defined (__cplusplus) -extern "C" { -#endif - - /*-**************************************** * Dependencies ******************************************/ #include "platform.h" /* PLATFORM_POSIX_VERSION, ZSTD_NANOSLEEP_SUPPORT, ZSTD_SETPRIORITY_SUPPORT */ -#include /* malloc, realloc, free */ #include /* size_t, ptrdiff_t */ -#include /* fprintf */ +#include /* FILE */ #include /* stat, utime */ #include /* stat, chmod */ -#if defined(_MSC_VER) -# include /* utime */ -# include /* _chmod */ -#else -# include /* chown, stat */ -# include /* utime */ +#include "../lib/common/mem.h" /* U64 */ +#if !(defined(_MSC_VER) || defined(__MINGW32__) || defined (__MSVCRT__)) +#include #endif -#include /* clock_t, clock, CLOCKS_PER_SEC, nanosleep */ -#include "mem.h" /* U32, U64 */ - /*-************************************************************ -* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW +* Fix fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW ***************************************************************/ -#if defined(_MSC_VER) && (_MSC_VER >= 1400) -# define UTIL_fseek _fseeki64 +#if defined(LIBC_NO_FSEEKO) +/* Some older libc implementations don't include these functions (e.g. Bionic < 24) */ +# define UTIL_fseek fseek +#elif defined(_MSC_VER) && (_MSC_VER >= 1400) +# define UTIL_fseek _fseeki64 #elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ # define UTIL_fseek fseeko #elif defined(__MINGW32__) && defined(__MSVCRT__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) -# define UTIL_fseek fseeko64 +# define UTIL_fseek fseeko64 #else -# define UTIL_fseek fseek +# define UTIL_fseek fseek #endif - /*-************************************************* * Sleep & priority functions: Windows - Posix - others ***************************************************/ @@ -74,19 +64,13 @@ extern "C" { # define SET_REALTIME_PRIORITY /* disabled */ # endif -#else /* unknown non-unix operating systen */ +#else /* unknown non-unix operating system */ # define UTIL_sleep(s) /* disabled */ # define UTIL_sleepMilli(milli) /* disabled */ # define SET_REALTIME_PRIORITY /* disabled */ #endif -/*-************************************* -* Constants -***************************************/ -#define LIST_SIZE_INCREASE (8*1024) - - /*-**************************************** * Compiler specifics ******************************************/ @@ -104,79 +88,275 @@ extern "C" { #endif +#if defined (__cplusplus) +extern "C" { +#endif + /*-**************************************** * Console log ******************************************/ extern int g_utilDisplayLevel; -#define UTIL_DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define UTIL_DISPLAYLEVEL(l, ...) { if (g_utilDisplayLevel>=l) { UTIL_DISPLAY(__VA_ARGS__); } } + +/** + * Displays a message prompt and returns success (0) if first character from stdin + * matches any from acceptableLetters. Otherwise, returns failure (1) and displays abortMsg. + * If any of the inputs are stdin itself, then automatically return failure (1). + */ +int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, const char* acceptableLetters, int hasStdinInput); /*-**************************************** * File functions ******************************************/ #if defined(_MSC_VER) - #define chmod _chmod typedef struct __stat64 stat_t; + typedef int mode_t; +#elif defined(__MINGW32__) && defined (__MSVCRT__) + typedef struct _stati64 stat_t; #else typedef struct stat stat_t; #endif +#if defined(_MSC_VER) || defined(__MINGW32__) || defined (__MSVCRT__) /* windows support */ +#define PATH_SEP '\\' +#define STRDUP(s) _strdup(s) +#else +#define PATH_SEP '/' +#define STRDUP(s) strdup(s) +#endif + -int UTIL_fileExist(const char* filename); +/** + * Calls platform's equivalent of stat() on filename and writes info to statbuf. + * Returns success (1) or failure (0). + * + * UTIL_fstat() is like UTIL_stat() but takes an optional fd that refers to the + * file in question. It turns out that this can be meaningfully faster. If fd is + * -1, behaves just like UTIL_stat() (i.e., falls back to using the filename). + */ +int UTIL_stat(const char* filename, stat_t* statbuf); +int UTIL_fstat(const int fd, const char* filename, stat_t* statbuf); + +/** + * Instead of getting a file's stats, this updates them with the info in the + * provided stat_t. Currently sets owner, group, atime, and mtime. Will only + * update this info for regular files. + * + * UTIL_setFDStat() also takes an fd, and will preferentially use that to + * indicate which file to modify, If fd is -1, it will fall back to using the + * filename. + */ +int UTIL_setFileStat(const char* filename, const stat_t* statbuf); +int UTIL_setFDStat(const int fd, const char* filename, const stat_t* statbuf); + +/** + * Set atime to now and mtime to the st_mtim in statbuf. + * + * Directly wraps utime() or utimensat(). Returns -1 on error. + * Does not validate filename is valid. + */ +int UTIL_utime(const char* filename, const stat_t *statbuf); + +/* + * These helpers operate on a pre-populated stat_t, i.e., the result of + * calling one of the above functions. + */ + +int UTIL_isRegularFileStat(const stat_t* statbuf); +int UTIL_isDirectoryStat(const stat_t* statbuf); +int UTIL_isFIFOStat(const stat_t* statbuf); +int UTIL_isBlockDevStat(const stat_t* statbuf); +U64 UTIL_getFileSizeStat(const stat_t* statbuf); + +/** + * Like chmod(), but only modifies regular files. Provided statbuf may be NULL, + * in which case this function will stat() the file internally, in order to + * check whether it should be modified. + * + * If fd is -1, fd is ignored and the filename is used. + */ +int UTIL_chmod(char const* filename, const stat_t* statbuf, mode_t permissions); +int UTIL_fchmod(const int fd, char const* filename, const stat_t* statbuf, mode_t permissions); + +/* + * In the absence of a pre-existing stat result on the file in question, these + * functions will do a stat() call internally and then use that result to + * compute the needed information. + */ + +int UTIL_isFdRegularFile(int fd); int UTIL_isRegularFile(const char* infilename); -int UTIL_setFileStat(const char* filename, stat_t* statbuf); -U32 UTIL_isDirectory(const char* infilename); -int UTIL_getFileStat(const char* infilename, stat_t* statbuf); +int UTIL_isDirectory(const char* infilename); int UTIL_isSameFile(const char* file1, const char* file2); +int UTIL_isSameFileStat(const char* file1, const char* file2, const stat_t* file1Stat, const stat_t* file2Stat); +int UTIL_isCompressedFile(const char* infilename, const char *extensionList[]); +int UTIL_isLink(const char* infilename); +int UTIL_isFIFO(const char* infilename); +int UTIL_isFileDescriptorPipe(const char* filename); + +/** + * Returns with the given file descriptor is a console. + * Allows faking whether stdin/stdout/stderr is a console + * using UTIL_fake*IsConsole(). + */ +int UTIL_isConsole(FILE* file); + +/** + * Pretends that stdin/stdout/stderr is a console for testing. + */ +void UTIL_fakeStdinIsConsole(void); +void UTIL_fakeStdoutIsConsole(void); +void UTIL_fakeStderrIsConsole(void); + +/** + * Emit traces for functions that read, or modify file metadata. + */ +void UTIL_traceFileStat(void); -U32 UTIL_isLink(const char* infilename); #define UTIL_FILESIZE_UNKNOWN ((U64)(-1)) U64 UTIL_getFileSize(const char* infilename); +U64 UTIL_getTotalFileSize(const char* const * fileNamesTable, unsigned nbFiles); + +/** + * Take @size in bytes, + * prepare the components to pretty-print it in a scaled way. + * The components in the returned struct should be passed in + * precision, value, suffix order to a "%.*f%s" format string. + * Output policy is sensible to @g_utilDisplayLevel, + * for verbose mode (@g_utilDisplayLevel >= 4), + * does not scale down. + */ +typedef struct { + double value; + int precision; + const char* suffix; +} UTIL_HumanReadableSize_t; -U64 UTIL_getTotalFileSize(const char* const * const fileNamesTable, unsigned nbFiles); +UTIL_HumanReadableSize_t UTIL_makeHumanReadableSize(U64 size); -/* - * A modified version of realloc(). - * If UTIL_realloc() fails the original block is freed. -*/ -UTIL_STATIC void* UTIL_realloc(void *ptr, size_t size) -{ - void *newptr = realloc(ptr, size); - if (newptr) return newptr; - free(ptr); - return NULL; -} +int UTIL_compareStr(const void *p1, const void *p2); +const char* UTIL_getFileExtension(const char* infilename); +void UTIL_mirrorSourceFilesDirectories(const char** fileNamesTable, unsigned int nbFiles, const char *outDirName); +char* UTIL_createMirroredDestDirName(const char* srcFileName, const char* outDirRootName); + + + +/*-**************************************** + * Lists of Filenames + ******************************************/ + +typedef struct +{ const char** fileNames; + char* buf; /* fileNames are stored in this buffer (or are read-only) */ + size_t tableSize; /* nb of fileNames */ + size_t tableCapacity; +} FileNamesTable; + +/*! UTIL_createFileNamesTable_fromFileList() : + * read filenames from @inputFileName, and store them into returned object. + * @return : a FileNamesTable*, or NULL in case of error (ex: @inputFileName doesn't exist). + * Note: inputFileSize must be less than 50MB + */ +FileNamesTable* +UTIL_createFileNamesTable_fromFileList(const char* inputFileName); + +/*! UTIL_assembleFileNamesTable() : + * This function takes ownership of its arguments, @filenames and @buf, + * and store them inside the created object. + * note : this function never fails, + * it will rather exit() the program if internal allocation fails. + * @return : resulting FileNamesTable* object. + */ +FileNamesTable* +UTIL_assembleFileNamesTable(const char** filenames, size_t tableSize, char* buf); + +/*! UTIL_freeFileNamesTable() : + * This function is compatible with NULL argument and never fails. + */ +void UTIL_freeFileNamesTable(FileNamesTable* table); + +/*! UTIL_mergeFileNamesTable(): + * @return : FileNamesTable*, concatenation of @table1 and @table2 + * note: @table1 and @table2 are consumed (freed) by this operation + */ +FileNamesTable* +UTIL_mergeFileNamesTable(FileNamesTable* table1, FileNamesTable* table2); + + +/*! UTIL_expandFNT() : + * read names from @fnt, and expand those corresponding to directories + * update @fnt, now containing only file names, + * note : in case of error, @fnt[0] is NULL + */ +void UTIL_expandFNT(FileNamesTable** fnt, int followLinks); -int UTIL_prepareFileList(const char* dirName, char** bufStart, size_t* pos, char** bufEnd, int followLinks); +/*! UTIL_createFNT_fromROTable() : + * copy the @filenames pointer table inside the returned object. + * The names themselves are still stored in their original buffer, which must outlive the object. + * @return : a FileNamesTable* object, + * or NULL in case of error + */ +FileNamesTable* +UTIL_createFNT_fromROTable(const char** filenames, size_t nbFilenames); + +/*! UTIL_allocateFileNamesTable() : + * Allocates a table of const char*, to insert read-only names later on. + * The created FileNamesTable* doesn't hold a buffer. + * @return : FileNamesTable*, or NULL, if allocation fails. + */ +FileNamesTable* UTIL_allocateFileNamesTable(size_t tableSize); + +/*! UTIL_searchFileNamesTable() : + * Searched through entries in FileNamesTable for a specific name. + * @return : index of entry if found or -1 if not found + */ +int UTIL_searchFileNamesTable(FileNamesTable* table, char const* name); + +/*! UTIL_refFilename() : + * Add a reference to read-only name into @fnt table. + * As @filename is only referenced, its lifetime must outlive @fnt. + * Internal table must be large enough to reference a new member, + * otherwise its UB (protected by an `assert()`). + */ +void UTIL_refFilename(FileNamesTable* fnt, const char* filename); + + +/* UTIL_createExpandedFNT() is only active if UTIL_HAS_CREATEFILELIST is defined. + * Otherwise, UTIL_createExpandedFNT() is a shell function which does nothing + * apart from displaying a warning message. + */ #ifdef _WIN32 # define UTIL_HAS_CREATEFILELIST #elif defined(__linux__) || (PLATFORM_POSIX_VERSION >= 200112L) /* opendir, readdir require POSIX.1-2001 */ # define UTIL_HAS_CREATEFILELIST -# include /* opendir, readdir */ -# include /* strerror, memcpy */ +# define UTIL_HAS_MIRRORFILELIST #else -#endif /* #ifdef _WIN32 */ + /* do not define UTIL_HAS_CREATEFILELIST */ +#endif -/* - * UTIL_createFileList - takes a list of files and directories (params: inputNames, inputNamesNb), scans directories, - * and returns a new list of files (params: return value, allocatedBuffer, allocatedNamesNb). - * After finishing usage of the list the structures should be freed with UTIL_freeFileList(params: return value, allocatedBuffer) - * In case of error UTIL_createFileList returns NULL and UTIL_freeFileList should not be called. - */ -const char** -UTIL_createFileList(const char **inputNames, unsigned inputNamesNb, - char** allocatedBuffer, unsigned* allocatedNamesNb, - int followLinks); - -UTIL_STATIC void UTIL_freeFileList(const char** filenameTable, char* allocatedBuffer) -{ - if (allocatedBuffer) free(allocatedBuffer); - if (filenameTable) free((void*)filenameTable); -} +/*! UTIL_createExpandedFNT() : + * read names from @filenames, and expand those corresponding to directories. + * links are followed or not depending on @followLinks directive. + * @return : an expanded FileNamesTable*, where each name is a file + * or NULL in case of error + */ +FileNamesTable* +UTIL_createExpandedFNT(const char* const* filenames, size_t nbFilenames, int followLinks); + +#if defined(_WIN32) +DWORD CountSetBits(ULONG_PTR bitMask); +#endif + +/*-**************************************** + * System + ******************************************/ + +int UTIL_countCores(int logical); int UTIL_countPhysicalCores(void); +int UTIL_countLogicalCores(void); + #if defined (__cplusplus) } #endif diff --git a/programs/windres/generate_res.bat b/programs/windres/generate_res.bat deleted file mode 100644 index 7ff9aef5102..00000000000 --- a/programs/windres/generate_res.bat +++ /dev/null @@ -1,11 +0,0 @@ -@echo off -REM http://stackoverflow.com/questions/708238/how-do-i-add-an-icon-to-a-mingw-gcc-compiled-executable - -where /q windres.exe -IF ERRORLEVEL 1 ( - ECHO The windres.exe is missing. Ensure it is installed and placed in your PATH. - EXIT /B -) ELSE ( - windres.exe -I ../lib -I windres -i windres/zstd.rc -O coff -F pe-x86-64 -o windres/zstd64.res - windres.exe -I ../lib -I windres -i windres/zstd.rc -O coff -F pe-i386 -o windres/zstd32.res -) diff --git a/programs/windres/verrsrc.h b/programs/windres/verrsrc.h index e282add025a..61b1f3ddc7e 100644 --- a/programs/windres/verrsrc.h +++ b/programs/windres/verrsrc.h @@ -1,3 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ /* minimal set of defines required to generate zstd.res from zstd.rc */ #define VS_VERSION_INFO 1 diff --git a/programs/windres/zstd.rc b/programs/windres/zstd.rc index f5e404730d2..a2118c2df10 100644 --- a/programs/windres/zstd.rc +++ b/programs/windres/zstd.rc @@ -32,11 +32,11 @@ BEGIN BEGIN BLOCK "040904B0" BEGIN - VALUE "CompanyName", "Yann Collet, Facebook, Inc." + VALUE "CompanyName", "Meta Platforms, Inc." VALUE "FileDescription", "Zstandard - Fast and efficient compression algorithm" VALUE "FileVersion", ZSTD_VERSION_STRING VALUE "InternalName", "zstd.exe" - VALUE "LegalCopyright", "Copyright (c) 2013-present, Yann Collet, Facebook, Inc." + VALUE "LegalCopyright", "Copyright (c) Meta Platforms, Inc. and affiliates." VALUE "OriginalFilename", "zstd.exe" VALUE "ProductName", "Zstandard" VALUE "ProductVersion", ZSTD_VERSION_STRING diff --git a/programs/windres/zstd32.res b/programs/windres/zstd32.res index 84349925492..9984215b543 100644 Binary files a/programs/windres/zstd32.res and b/programs/windres/zstd32.res differ diff --git a/programs/windres/zstd64.res b/programs/windres/zstd64.res index da859810fc8..615f3894430 100644 Binary files a/programs/windres/zstd64.res and b/programs/windres/zstd64.res differ diff --git a/programs/zstd.1 b/programs/zstd.1 index cb4e1271a0f..e49a182d506 100644 --- a/programs/zstd.1 +++ b/programs/zstd.1 @@ -1,5 +1,5 @@ . -.TH "ZSTD" "1" "December 2018" "zstd 1.3.8" "User Commands" +.TH "ZSTD" "1" "February 2025" "zstd 1.5.7" "User Commands" . .SH "NAME" \fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files @@ -17,10 +17,10 @@ \fBzstdcat\fR is equivalent to \fBzstd \-dcf\fR . .SH "DESCRIPTION" -\fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip (1)\fR and \fBxz (1)\fR\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, with fast modes at > 200 MB/s per core, and strong modes nearing lzma compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core\. +\fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip\fR(1) and \fBxz\fR(1)\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, from fast modes at > 200 MB/s per core, to strong modes with excellent compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core, which remains roughly stable at all compression settings\. . .P -\fBzstd\fR command line syntax is generally similar to gzip, but features the following differences : +\fBzstd\fR command line syntax is generally similar to gzip, but features the following few differences: . .IP "\(bu" 4 Source files are preserved by default\. It\'s possible to remove them automatically by using the \fB\-\-rm\fR command\. @@ -29,15 +29,18 @@ Source files are preserved by default\. It\'s possible to remove them automatica When compressing a single file, \fBzstd\fR displays progress notifications and result summary by default\. Use \fB\-q\fR to turn them off\. . .IP "\(bu" 4 -\fBzstd\fR does not accept input from console, but it properly accepts \fBstdin\fR when it\'s not the console\. +\fBzstd\fR displays a short help page when command line is an error\. Use \fB\-q\fR to turn it off\. . .IP "\(bu" 4 -\fBzstd\fR displays a short help page when command line is an error\. Use \fB\-q\fR to turn it off\. +\fBzstd\fR does not accept input from console, though it does accept \fBstdin\fR when it\'s not the console\. +. +.IP "\(bu" 4 +\fBzstd\fR does not store the input\'s filename or attributes, only its contents\. . .IP "" 0 . .P -\fBzstd\fR compresses or decompresses each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal : it will display an error message and skip the \fIfile\fR\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\. +\fBzstd\fR processes each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal: it will display an error message and skip the file\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\. . .P Unless \fB\-\-stdout\fR or \fB\-o\fR is specified, \fIfiles\fR are written to a new file whose name is derived from the source \fIfile\fR name: @@ -50,12 +53,12 @@ When decompressing, the \fB\.zst\fR suffix is removed from the source filename t . .IP "" 0 . -.SS "Concatenation with \.zst files" -It is possible to concatenate \fB\.zst\fR files as is\. \fBzstd\fR will decompress such files as if they were a single \fB\.zst\fR file\. +.SS "Concatenation with \.zst Files" +It is possible to concatenate multiple \fB\.zst\fR files\. \fBzstd\fR will decompress such agglomerated file as if it was a single \fB\.zst\fR file\. . .SH "OPTIONS" . -.SS "Integer suffixes and special values" +.SS "Integer Suffixes and Special Values" In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers\. There must be no space between the integer and the suffix\. . .TP @@ -66,7 +69,7 @@ Multiply the integer by 1,024 (2^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accep \fBMiB\fR Multiply the integer by 1,048,576 (2^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\. . -.SS "Operation mode" +.SS "Operation Mode" If multiple operation mode options are given, the last one takes effect\. . .TP @@ -79,244 +82,209 @@ Decompress\. . .TP \fB\-t\fR, \fB\-\-test\fR -Test the integrity of compressed \fIfiles\fR\. This option is equivalent to \fB\-\-decompress \-\-stdout\fR except that the decompressed data is discarded instead of being written to standard output\. No files are created or removed\. +Test the integrity of compressed \fIfiles\fR\. This option is equivalent to \fB\-\-decompress \-\-stdout > /dev/null\fR, decompressed data is discarded and checksummed for errors\. No files are created or removed\. . .TP \fB\-b#\fR -Benchmark file(s) using compression level # +Benchmark file(s) using compression level \fI#\fR\. See \fIBENCHMARK\fR below for a description of this operation\. . .TP -\fB\-\-train FILEs\fR -Use FILEs as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\. +\fB\-\-train FILES\fR +Use \fIFILES\fR as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\. See \fIDICTIONARY BUILDER\fR below for a description of this operation\. . .TP \fB\-l\fR, \fB\-\-list\fR -Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command can be augmented with the \fB\-v\fR modifier\. +Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command\'s output can be augmented with the \fB\-v\fR modifier\. . -.SS "Operation modifiers" +.SS "Operation Modifiers" . -.TP -\fB\-#\fR -\fB#\fR compression level [1\-19] (default: 3) +.IP "\(bu" 4 +\fB\-#\fR: selects \fB#\fR compression level [1\-19] (default: 3)\. Higher compression levels \fIgenerally\fR produce higher compression ratio at the expense of speed and memory\. A rough rule of thumb is that compression speed is expected to be divided by 2 every 2 levels\. Technically, each level is mapped to a set of advanced parameters (that can also be modified individually, see below)\. Because the compressor\'s behavior highly depends on the content to compress, there\'s no guarantee of a smooth progression from one level to another\. . -.TP -\fB\-\-fast[=#]\fR -switch to ultra\-fast compression levels\. If \fB=#\fR is not present, it defaults to \fB1\fR\. The higher the value, the faster the compression speed, at the cost of some compression ratio\. This setting overwrites compression level if one was set previously\. Similarly, if a compression level is set after \fB\-\-fast\fR, it overrides it\. +.IP "\(bu" 4 +\fB\-\-ultra\fR: unlocks high compression levels 20+ (maximum 22), using a lot more memory\. Note that decompression will also require more memory when using these levels\. . -.TP -\fB\-\-ultra\fR -unlocks high compression levels 20+ (maximum 22), using a lot more memory\. Note that decompression will also require more memory when using these levels\. +.IP "\(bu" 4 +\fB\-\-fast[=#]\fR: switch to ultra\-fast compression levels\. If \fB=#\fR is not present, it defaults to \fB1\fR\. The higher the value, the faster the compression speed, at the cost of some compression ratio\. This setting overwrites compression level if one was set previously\. Similarly, if a compression level is set after \fB\-\-fast\fR, it overrides it\. . -.TP -\fB\-\-long[=#]\fR -enables long distance matching with \fB#\fR \fBwindowLog\fR, if not \fB#\fR is not present it defaults to \fB27\fR\. This increases the window size (\fBwindowLog\fR) and memory usage for both the compressor and decompressor\. This setting is designed to improve the compression ratio for files with long matches at a large distance\. +.IP "\(bu" 4 +\fB\-T#\fR, \fB\-\-threads=#\fR: Compress using \fB#\fR working threads (default: between 1 and 4 depending on physical CPU cores; see \fBZSTD_NBTHREADS\fR below)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. In all cases, the nb of threads is capped to \fBZSTDMT_NBWORKERS_MAX\fR, which is either 64 in 32\-bit mode, or 256 for 64\-bit environments\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\. . -.IP -Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. +.IP "\(bu" 4 +\fB\-\-single\-thread\fR: Use a single thread for both I/O and compression\. As compression is serialized with I/O, this can be slightly slower\. Single\-thread mode features significantly lower memory usage, which can be useful for systems with limited amount of memory, such as 32\-bit systems\. . -.TP -\fB\-T#\fR, \fB\-\-threads=#\fR -Compress using \fB#\fR working threads (default: 1)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. In all cases, the nb of threads is capped to ZSTDMT_NBTHREADS_MAX==200\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\. +.IP +Note 1: this mode is the only available one when multithread support is disabled\. . -.TP -\fB\-\-single\-thread\fR -Does not spawn a thread for compression, use a single thread for both I/O and compression\. In this mode, compression is serialized with I/O, which is slightly slower\. (This is different from \fB\-T1\fR, which spawns 1 compression thread in parallel of I/O)\. This mode is the only one available when multithread support is disabled\. Single\-thread mode features lower memory usage\. Final compressed result is slightly different from \fB\-T1\fR\. +.IP +Note 2: this mode is different from \fB\-T1\fR, which spawns 1 compression thread in parallel with I/O\. Final compressed result is also slightly different from \fB\-T1\fR\. . -.TP -\fB\-\-adapt[=min=#,max=#]\fR -\fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. \fInote\fR : at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\. +.IP "\(bu" 4 +\fB\-\-auto\-threads={physical,logical} (default: physical)\fR: When using a default amount of threads via \fB\-T0\fR, choose the default based on the number of detected physical or logical cores\. . -.TP -\fB\-\-rsyncable\fR -\fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and the faster compression levels will see a small compression speed hit\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your milage may vary\. +.IP "\(bu" 4 +\fB\-\-adapt[=min=#,max=#]\fR: \fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MiB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. . -.TP -\fB\-D file\fR -use \fBfile\fR as Dictionary to compress or decompress FILE(s) +.IP +\fINote\fR: at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\. . -.TP -\fB\-\-no\-dictID\fR -do not store dictionary ID within frame header (dictionary compression)\. The decoder will have to rely on implicit knowledge about which dictionary to use, it won\'t be able to check if it\'s correct\. +.IP "\(bu" 4 +\fB\-\-long[=#]\fR: enables long distance matching with \fB#\fR \fBwindowLog\fR, if \fB#\fR is not present it defaults to \fB27\fR\. This increases the window size (\fBwindowLog\fR) and memory usage for both the compressor and decompressor\. This setting is designed to improve the compression ratio for files with long matches at a large distance\. . -.TP -\fB\-o file\fR -save result into \fBfile\fR (only possible with a single \fIINPUT\-FILE\fR) +.IP +Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. . -.TP -\fB\-f\fR, \fB\-\-force\fR -overwrite output without prompting, and (de)compress symbolic links +.IP "\(bu" 4 +\fB\-\-max\fR: set advanced parameters to maximum compression\. warning: this setting is very slow and uses a lot of resources\. It\'s inappropriate for 32\-bit mode and therefore disabled in this mode\. . -.TP -\fB\-c\fR, \fB\-\-stdout\fR -force write to standard output, even if it is the console +.IP "\(bu" 4 +\fB\-D DICT\fR: use \fBDICT\fR as Dictionary to compress or decompress FILE(s) . -.TP -\fB\-\-[no\-]sparse\fR -enable / disable sparse FS support, to make files with many zeroes smaller on disk\. Creating sparse files may save disk space and speed up decompression by reducing the amount of disk I/O\. default: enabled when output is into a file, and disabled when output is stdout\. This setting overrides default and can force sparse mode over stdout\. +.IP "\(bu" 4 +\fB\-\-patch\-from FILE\fR: Specify the file to be used as a reference point for zstd\'s diff engine\. This is effectively dictionary compression with some convenient parameter selection, namely that \fIwindowSize\fR > \fIsrcSize\fR\. . -.TP -\fB\-\-rm\fR -remove source file(s) after successful compression or decompression +.IP +Note: cannot use both this and \fB\-D\fR together\. . -.TP -\fB\-k\fR, \fB\-\-keep\fR -keep source file(s) after successful compression or decompression\. This is the default behavior\. +.IP +Note: \fB\-\-long\fR mode will be automatically activated if \fIchainLog\fR < \fIfileLog\fR (\fIfileLog\fR being the \fIwindowLog\fR required to cover the whole file)\. You can also manually force it\. . -.TP -\fB\-r\fR -operate recursively on directories +.IP +Note: up to level 15, you can use \fB\-\-patch\-from\fR in \fB\-\-single\-thread\fR mode to improve compression ratio marginally at the cost of speed\. Using \'\-\-single\-thread\' above level 15 will lead to lower compression ratios\. . -.TP -\fB\-\-format=FORMAT\fR -compress and decompress in other formats\. If compiled with support, zstd can compress to or decompress from other compression algorithm formats\. Possibly available options are \fBzstd\fR, \fBgzip\fR, \fBxz\fR, \fBlzma\fR, and \fBlz4\fR\. If no such format is provided, \fBzstd\fR is the default\. +.IP +Note: for level 19, you can get increased compression ratio at the cost of speed by specifying \fB\-\-zstd=targetLength=\fR to be something large (i\.e\. 4096), and by setting a large \fB\-\-zstd=chainLog=\fR\. . -.TP -\fB\-h\fR/\fB\-H\fR, \fB\-\-help\fR -display help/long help and exit +.IP "\(bu" 4 +\fB\-\-rsyncable\fR: \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and a potential impact to compression speed, perceptible at higher speeds, for example when combining \fB\-\-rsyncable\fR with many parallel worker threads\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary\. . -.TP -\fB\-V\fR, \fB\-\-version\fR -display version number and exit\. Advanced : \fB\-vV\fR also displays supported formats\. \fB\-vvV\fR also displays POSIX support\. +.IP "\(bu" 4 +\fB\-C\fR, \fB\-\-[no\-]check\fR: add integrity check computed from uncompressed data (default: enabled) . -.TP -\fB\-v\fR -verbose mode +.IP "\(bu" 4 +\fB\-\-[no\-]content\-size\fR: enable / disable whether or not the original size of the file is placed in the header of the compressed file\. The default option is \fB\-\-content\-size\fR (meaning that the original size will be placed in the header)\. . -.TP -\fB\-q\fR, \fB\-\-quiet\fR -suppress warnings, interactivity, and notifications\. specify twice to suppress errors too\. +.IP "\(bu" 4 +\fB\-\-no\-dictID\fR: do not store dictionary ID within frame header (dictionary compression)\. The decoder will have to rely on implicit knowledge about which dictionary to use, it won\'t be able to check if it\'s correct\. . -.TP -\fB\-C\fR, \fB\-\-[no\-]check\fR -add integrity check computed from uncompressed data (default: enabled) +.IP "\(bu" 4 +\fB\-M#\fR, \fB\-\-memory=#\fR: Set a memory usage limit\. By default, \fBzstd\fR uses 128 MiB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can override this manually if need be in either direction (i\.e\. you can increase or decrease it)\. . -.TP -\fB\-\-\fR -All arguments after \fB\-\-\fR are treated as files +.IP +This is also used during compression when using with \fB\-\-patch\-from=\fR\. In this case, this parameter overrides that maximum size allowed for a dictionary\. (128 MiB)\. . -.SH "DICTIONARY BUILDER" -\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\. +.IP +Additionally, this can be used to limit memory for dictionary training\. This parameter overrides the default limit of 2 GiB\. zstd will load training samples up to the memory limit and ignore the rest\. . -.TP -\fB\-\-train FILEs\fR -Use FILEs as training set to create a dictionary\. The training set should contain a lot of small files (> 100), and weight typically 100x the target dictionary size (for example, 10 MB for a 100 KB dictionary)\. +.IP "\(bu" 4 +\fB\-\-stream\-size=#\fR: Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\. . -.IP -Supports multithreading if \fBzstd\fR is compiled with threading support\. Additional parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\. +.IP "\(bu" 4 +\fB\-\-size\-hint=#\fR: When handling input from a stream, \fBzstd\fR must guess how large the source size will be when optimizing compression parameters\. If the stream size is relatively small, this guess may be a poor one, resulting in a higher compression ratio than expected\. This feature allows for controlling the guess when needed\. Exact guesses result in better compression ratios\. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation\. . -.TP -\fB\-o file\fR -Dictionary saved into \fBfile\fR (default name: dictionary)\. +.IP "\(bu" 4 +\fB\-\-target\-compressed\-block\-size=#\fR: Attempt to produce compressed blocks of approximately this size\. This will split larger blocks in order to approach this target\. This feature is notably useful for improved latency, when the receiver can leverage receiving early incomplete data\. This parameter defines a loose target: compressed blocks will target this size "on average", but individual blocks can still be larger or smaller\. Enabling this feature can decrease compression speed by up to ~10% at level 1\. Higher levels will see smaller relative speed regression, becoming invisible at higher settings\. . -.TP -\fB\-\-maxdict=#\fR -Limit dictionary to specified size (default: 112640)\. +.IP "\(bu" 4 +\fB\-f\fR, \fB\-\-force\fR: disable input and output checks\. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc\. During decompression and when the output destination is stdout, pass\-through unrecognized formats as\-is\. . -.TP -\fB\-#\fR -Use \fB#\fR compression level during training (optional)\. Will generate statistics more tuned for selected compression level, resulting in a \fIsmall\fR compression ratio improvement for this level\. +.IP "\(bu" 4 +\fB\-c\fR, \fB\-\-stdout\fR: write to standard output (even if it is the console); keep original files (disable \fB\-\-rm\fR)\. . -.TP -\fB\-B#\fR -Split input files in blocks of size # (default: no split) +.IP "\(bu" 4 +\fB\-o FILE\fR: save result into \fBFILE\fR\. Note that this operation is in conflict with \fB\-c\fR\. If both operations are present on the command line, the last expressed one wins\. . -.TP -\fB\-\-dictID=#\fR -A dictionary ID is a locally unique ID that a decoder can use to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to give a precise number instead\. Short numbers have an advantage : an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. However, it\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. +.IP "\(bu" 4 +\fB\-\-[no\-]sparse\fR: enable / disable sparse FS support, to make files with many zeroes smaller on disk\. Creating sparse files may save disk space and speed up decompression by reducing the amount of disk I/O\. default: enabled when output is into a file, and disabled when output is stdout\. This setting overrides default and can force sparse mode over stdout\. . -.TP -\fB\-\-train\-cover[=k#,d=#,steps=#,split=#]\fR -Select parameters for the default dictionary builder algorithm named cover\. If \fId\fR is not specified, then it tries \fId\fR = 6 and \fId\fR = 8\. If \fIk\fR is not specified, then it tries \fIsteps\fR values in the range [50, 2000]\. If \fIsteps\fR is not specified, then the default value of 40 is used\. If \fIsplit\fR is not specified or split <= 0, then the default value of 100 is used\. Requires that \fId\fR <= \fIk\fR\. +.IP "\(bu" 4 +\fB\-\-[no\-]pass\-through\fR enable / disable passing through uncompressed files as\-is\. During decompression when pass\-through is enabled, unrecognized formats will be copied as\-is from the input to the output\. By default, pass\-through will occur when the output destination is stdout and the force (\fB\-f\fR) option is set\. . -.IP -Selects segments of size \fIk\fR with highest score to put in the dictionary\. The score of a segment is computed by the sum of the frequencies of all the subsegments of size \fId\fR\. Generally \fId\fR should be in the range [6, 8], occasionally up to 16, but the algorithm will run faster with d <= \fI8\fR\. Good values for \fIk\fR vary widely based on the input data, but a safe range is [2 * \fId\fR, 2000]\. If \fIsplit\fR is 100, all input samples are used for both training and testing to find optimal \fId\fR and \fIk\fR to build dictionary\. Supports multithreading if \fBzstd\fR is compiled with threading support\. +.IP "\(bu" 4 +\fB\-\-rm\fR: remove source file(s) after successful compression or decompression\. This command is silently ignored if output is \fBstdout\fR\. If used in combination with \fB\-o\fR, triggers a confirmation prompt (which can be silenced with \fB\-f\fR), as this is a destructive operation\. . -.IP -Examples: +.IP "\(bu" 4 +\fB\-k\fR, \fB\-\-keep\fR: keep source file(s) after successful compression or decompression\. This is the default behavior\. . -.IP -\fBzstd \-\-train\-cover FILEs\fR +.IP "\(bu" 4 +\fB\-r\fR: operate recursively on directories\. It selects all files in the named directory and all its subdirectories\. This can be useful both to reduce command line typing, and to circumvent shell expansion limitations, when there are a lot of files and naming breaks the maximum size of a command line\. . -.IP -\fBzstd \-\-train\-cover=k=50,d=8 FILEs\fR +.IP "\(bu" 4 +\fB\-\-filelist FILE\fR read a list of files to process as content from \fBFILE\fR\. Format is compatible with \fBls\fR output, with one file per line\. . -.IP -\fBzstd \-\-train\-cover=d=8,steps=500 FILEs\fR +.IP "\(bu" 4 +\fB\-\-output\-dir\-flat DIR\fR: resulting files are stored into target \fBDIR\fR directory, instead of same directory as origin file\. Be aware that this command can introduce name collision issues, if multiple files, from different directories, end up having the same name\. Collision resolution ensures first file with a given name will be present in \fBDIR\fR, while in combination with \fB\-f\fR, the last file will be present instead\. . -.IP -\fBzstd \-\-train\-cover=k=50 FILEs\fR +.IP "\(bu" 4 +\fB\-\-output\-dir\-mirror DIR\fR: similar to \fB\-\-output\-dir\-flat\fR, the output files are stored underneath target \fBDIR\fR directory, but this option will replicate input directory hierarchy into output \fBDIR\fR\. . .IP -\fBzstd \-\-train\-cover=k=50,split=60 FILEs\fR +If input directory contains "\.\.", the files in this directory will be ignored\. If input directory is an absolute directory (i\.e\. "/var/tmp/abc"), it will be stored into the "output\-dir/var/tmp/abc"\. If there are multiple input files or directories, name collision resolution will follow the same rules as \fB\-\-output\-dir\-flat\fR\. . -.TP -\fB\-\-train\-fastcover[=k#,d=#,f=#,steps=#,split=#,accel=#]\fR -Same as cover but with extra parameters \fIf\fR and \fIaccel\fR and different default value of split If \fIsplit\fR is not specified, then it tries \fIsplit\fR = 75\. If \fIf\fR is not specified, then it tries \fIf\fR = 20\. Requires that 0 < \fIf\fR < 32\. If \fIaccel\fR is not specified, then it tries \fIaccel\fR = 1\. Requires that 0 < \fIaccel\fR <= 10\. Requires that \fId\fR = 6 or \fId\fR = 8\. +.IP "\(bu" 4 +\fB\-\-format=FORMAT\fR: compress and decompress in other formats\. If compiled with support, zstd can compress to or decompress from other compression algorithm formats\. Possibly available options are \fBzstd\fR, \fBgzip\fR, \fBxz\fR, \fBlzma\fR, and \fBlz4\fR\. If no such format is provided, \fBzstd\fR is the default\. . -.IP -\fIf\fR is log of size of array that keeps track of frequency of subsegments of size \fId\fR\. The subsegment is hashed to an index in the range [0,2^\fIf\fR \- 1]\. It is possible that 2 different subsegments are hashed to the same index, and they are considered as the same subsegment when computing frequency\. Using a higher \fIf\fR reduces collision but takes longer\. +.IP "\(bu" 4 +\fB\-h\fR/\fB\-H\fR, \fB\-\-help\fR: display help/long help and exit . -.IP -Examples: +.IP "\(bu" 4 +\fB\-V\fR, \fB\-\-version\fR: display version number and immediately exit\. note that, since it exits, flags specified after \fB\-V\fR are effectively ignored\. Advanced: \fB\-vV\fR also displays supported formats\. \fB\-vvV\fR also displays POSIX support\. \fB\-qV\fR will only display the version number, suitable for machine reading\. . -.IP -\fBzstd \-\-train\-fastcover FILEs\fR +.IP "\(bu" 4 +\fB\-v\fR, \fB\-\-verbose\fR: verbose mode, display more information . -.IP -\fBzstd \-\-train\-fastcover=d=8,f=15,accel=2 FILEs\fR +.IP "\(bu" 4 +\fB\-q\fR, \fB\-\-quiet\fR: suppress warnings, interactivity, and notifications\. specify twice to suppress errors too\. . -.TP -\fB\-\-train\-legacy[=selectivity=#]\fR -Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its possible maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\. +.IP "\(bu" 4 +\fB\-\-no\-progress\fR: do not display the progress bar, but keep all other messages\. . -.IP -Examples: +.IP "\(bu" 4 +\fB\-\-show\-default\-cparams\fR: shows the default compression parameters that will be used for a particular input file, based on the provided compression level and the input size\. If the provided file is not a regular file (e\.g\. a pipe), this flag will output the parameters used for inputs of unknown size\. . -.IP -\fBzstd \-\-train\-legacy FILEs\fR +.IP "\(bu" 4 +\fB\-\-exclude\-compressed\fR: only compress files that are not already compressed\. . -.IP -\fBzstd \-\-train\-legacy=selectivity=8 FILEs\fR +.IP "\(bu" 4 +\fB\-\-\fR: All arguments after \fB\-\-\fR are treated as files . -.SH "BENCHMARK" +.IP "" 0 . -.TP -\fB\-b#\fR -benchmark file(s) using compression level # +.SS "gzip Operation Modifiers" +When invoked via a \fBgzip\fR symlink, \fBzstd\fR will support further options that intend to mimic the \fBgzip\fR behavior: . .TP -\fB\-e#\fR -benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\fR (inclusive) +\fB\-n\fR, \fB\-\-no\-name\fR +do not store the original filename and timestamps when compressing a file\. This is the default behavior and hence a no\-op\. . .TP -\fB\-i#\fR -minimum evaluation time, in seconds (default: 3s), benchmark mode only +\fB\-\-best\fR +alias to the option \fB\-9\fR\. . -.TP -\fB\-B#\fR, \fB\-\-block\-size=#\fR -cut file(s) into independent blocks of size # (default: no block) +.SS "Environment Variables" +Employing environment variables to set parameters has security implications\. Therefore, this avenue is intentionally limited\. Only \fBZSTD_CLEVEL\fR and \fBZSTD_NBTHREADS\fR are currently supported\. They set the default compression level and number of threads to use during compression, respectively\. . -.TP -\fB\-\-priority=rt\fR -set process priority to real\-time +.P +\fBZSTD_CLEVEL\fR can be used to set the level between 1 and 19 (the "normal" range)\. If the value of \fBZSTD_CLEVEL\fR is not a valid integer, it will be ignored with a warning message\. \fBZSTD_CLEVEL\fR just replaces the default compression level (\fB3\fR)\. . .P -\fBOutput Format:\fR CompressionLevel#Filename : IntputSize \-> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed +\fBZSTD_NBTHREADS\fR can be used to set the number of threads \fBzstd\fR will attempt to use during compression\. If the value of \fBZSTD_NBTHREADS\fR is not a valid unsigned integer, it will be ignored with a warning message\. \fBZSTD_NBTHREADS\fR has a default value of \fBmax(1, min(4, nbCores/4))\fR, and is capped at ZSTDMT_NBWORKERS_MAX==200\. \fBzstd\fR must be compiled with multithread support for this variable to have any effect\. . .P -\fBMethodology:\fR For both compression and decompression speed, the entire input is compressed/decompressed in\-memory to measure speed\. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy\. +They can both be overridden by corresponding command line arguments: \fB\-#\fR for compression level and \fB\-T#\fR for number of compression threads\. . .SH "ADVANCED COMPRESSION OPTIONS" +\fBzstd\fR provides 22 predefined regular compression levels plus the fast levels\. A compression level is translated internally into multiple advanced parameters that control the behavior of the compressor (one can observe the result of this translation with \fB\-\-show\-default\-cparams\fR)\. These advanced parameters can be overridden using advanced compression options\. . .SS "\-\-zstd[=options]:" -\fBzstd\fR provides 22 predefined compression levels\. The selected or default predefined compression level can be changed with advanced compression options\. The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR: +The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR: . .TP \fBstrategy\fR=\fIstrat\fR, \fBstrat\fR=\fIstrat\fR Specify a strategy used by a match finder\. . .IP -There are 9 strategies numbered from 1 to 9, from faster to stronger: 1=ZSTD_fast, 2=ZSTD_dfast, 3=ZSTD_greedy, 4=ZSTD_lazy, 5=ZSTD_lazy2, 6=ZSTD_btlazy2, 7=ZSTD_btopt, 8=ZSTD_btultra, 9=ZSTD_btultra2\. +There are 9 strategies numbered from 1 to 9, from fastest to strongest: 1=\fBZSTD_fast\fR, 2=\fBZSTD_dfast\fR, 3=\fBZSTD_greedy\fR, 4=\fBZSTD_lazy\fR, 5=\fBZSTD_lazy2\fR, 6=\fBZSTD_btlazy2\fR, 7=\fBZSTD_btopt\fR, 8=\fBZSTD_btultra\fR, 9=\fBZSTD_btultra2\fR\. . .TP \fBwindowLog\fR=\fIwlog\fR, \fBwlog\fR=\fIwlog\fR @@ -333,20 +301,20 @@ Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \ Specify the maximum number of bits for a hash table\. . .IP -Bigger hash tables cause less collisions which usually makes compression faster, but requires more memory during compression\. +Bigger hash tables cause fewer collisions which usually makes compression faster, but requires more memory during compression\. . .IP -The minimum \fIhlog\fR is 6 (64 B) and the maximum is 26 (128 MiB)\. +The minimum \fIhlog\fR is 6 (64 entries / 256 B) and the maximum is 30 (1B entries / 4 GiB)\. . .TP \fBchainLog\fR=\fIclog\fR, \fBclog\fR=\fIclog\fR -Specify the maximum number of bits for a hash chain or a binary tree\. +Specify the maximum number of bits for the secondary search structure, whose form depends on the selected \fBstrategy\fR\. . .IP -Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the ZSTD_fast strategy\. +Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the \fBZSTD_fast\fR \fBstrategy\fR, which only has the primary hash table\. . .IP -The minimum \fIclog\fR is 6 (64 B) and the maximum is 28 (256 MiB)\. +The minimum \fIclog\fR is 6 (64 entries / 256 B) and the maximum is 29 (512M entries / 2 GiB) on 32\-bit platforms and 30 (1B entries / 4 GiB) on 64\-bit platforms\. . .TP \fBsearchLog\fR=\fIslog\fR, \fBslog\fR=\fIslog\fR @@ -356,7 +324,7 @@ Specify the maximum number of searches in a hash chain or a binary tree using lo More searches increases the chance to find a match which usually increases compression ratio but decreases compression speed\. . .IP -The minimum \fIslog\fR is 1 and the maximum is 26\. +The minimum \fIslog\fR is 1 and the maximum is \'windowLog\' \- 1\. . .TP \fBminMatch\fR=\fImml\fR, \fBmml\fR=\fImml\fR @@ -369,27 +337,40 @@ Larger search lengths usually decrease compression ratio but improve decompressi The minimum \fImml\fR is 3 and the maximum is 7\. . .TP -\fBtargetLen\fR=\fItlen\fR, \fBtlen\fR=\fItlen\fR +\fBtargetLength\fR=\fItlen\fR, \fBtlen\fR=\fItlen\fR The impact of this field vary depending on selected strategy\. . .IP -For ZSTD_btopt, ZSTD_btultra and ZSTD_btultra2, it specifies the minimum match length that causes match finder to stop searching\. A larger \fBtargetLen\fR usually improves compression ratio but decreases compression speed\. +For \fBZSTD_btopt\fR, \fBZSTD_btultra\fR and \fBZSTD_btultra2\fR, it specifies the minimum match length that causes match finder to stop searching\. A larger \fBtargetLength\fR usually improves compression ratio but decreases compression speed\. . .IP -For ZSTD_fast, it triggers ultra\-fast mode when > 0\. The value represents the amount of data skipped between match sampling\. Impact is reversed : a larger \fBtargetLen\fR increases compression speed but decreases compression ratio\. +For \fBZSTD_fast\fR, it triggers ultra\-fast mode when > 0\. The value represents the amount of data skipped between match sampling\. Impact is reversed: a larger \fBtargetLength\fR increases compression speed but decreases compression ratio\. . .IP For all other strategies, this field has no impact\. . .IP -The minimum \fItlen\fR is 0 and the maximum is 999\. +The minimum \fItlen\fR is 0 and the maximum is 128 KiB\. . .TP \fBoverlapLog\fR=\fIovlog\fR, \fBovlog\fR=\fIovlog\fR Determine \fBoverlapSize\fR, amount of data reloaded from previous job\. This parameter is only available when multithreading is enabled\. Reloading more data improves compression ratio, but decreases speed\. . .IP -The minimum \fIovlog\fR is 0, and the maximum is 9\. 1 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the reloaded amount by a factor 2\. For example, 8 means "windowSize/2", and 6 means "windowSize/8"\. Value 0 is special and means "default" : \fIovlog\fR is automatically determined by \fBzstd\fR\. In which case, \fIovlog\fR will range from 6 to 9, depending on selected \fIstrat\fR\. +The minimum \fIovlog\fR is 0, and the maximum is 9\. 1 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the reloaded amount by a factor 2\. For example, 8 means "windowSize/2", and 6 means "windowSize/8"\. Value 0 is special and means "default": \fIovlog\fR is automatically determined by \fBzstd\fR\. In which case, \fIovlog\fR will range from 6 to 9, depending on selected \fIstrat\fR\. +. +.TP +\fBldmHashRateLog\fR=\fIlhrlog\fR, \fBlhrlog\fR=\fIlhrlog\fR +Specify the frequency of inserting entries into the long distance matching hash table\. +. +.IP +This option is ignored unless long distance matching is enabled\. +. +.IP +Larger values will improve compression speed\. Deviating far from the default value will likely result in a decrease in compression ratio\. +. +.IP +The default value varies between 4 and 7, depending on \fBstrategy\fR\. . .TP \fBldmHashLog\fR=\fIlhlog\fR, \fBlhlog\fR=\fIlhlog\fR @@ -402,7 +383,7 @@ This option is ignored unless long distance matching is enabled\. Bigger hash tables usually improve compression ratio at the expense of more memory during compression and a decrease in compression speed\. . .IP -The minimum \fIlhlog\fR is 6 and the maximum is 26 (default: 20)\. +The minimum \fIlhlog\fR is 6 and the maximum is 30 (default: \fBwindowLog \- ldmHashRateLog\fR)\. . .TP \fBldmMinMatch\fR=\fIlmml\fR, \fBlmml\fR=\fIlmml\fR @@ -415,7 +396,7 @@ This option is ignored unless long distance matching is enabled\. Larger/very small values usually decrease compression ratio\. . .IP -The minimum \fIlmml\fR is 4 and the maximum is 4096 (default: 64)\. +The minimum \fIlmml\fR is 4 and the maximum is 4096 (default: 32 to 64, depending on \fBstrategy\fR)\. . .TP \fBldmBucketSizeLog\fR=\fIlblog\fR, \fBlblog\fR=\fIlblog\fR @@ -428,29 +409,169 @@ This option is ignored unless long distance matching is enabled\. Larger bucket sizes improve collision resolution but decrease compression speed\. . .IP -The minimum \fIlblog\fR is 0 and the maximum is 8 (default: 3)\. +The minimum \fIlblog\fR is 1 and the maximum is 8 (default: 4 to 8, depending on \fBstrategy\fR)\. +. +.SS "Example" +The following parameters sets advanced compression options to something similar to predefined level 19 for files bigger than 256 KB: +. +.P +\fB\-\-zstd\fR=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 +. +.SS "\-B#:" +Specify the size of each compression job\. This parameter is only available when multi\-threading is enabled\. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads\. Default job size varies depending on compression level (generally \fB4 * windowSize\fR)\. \fB\-B#\fR makes it possible to manually select a custom size\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 512 KB, or \fBoverlapSize\fR, whichever is largest\. Different job sizes will lead to non\-identical compressed frames\. +. +.SH "DICTIONARY BUILDER" +\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then, during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\. . .TP -\fBldmHashRateLog\fR=\fIlhrlog\fR, \fBlhrlog\fR=\fIlhrlog\fR -Specify the frequency of inserting entries into the long distance matching hash table\. +\fB\-\-train FILEs\fR +Use FILEs as training set to create a dictionary\. The training set should ideally contain a lot of samples (> 100), and weight typically 100x the target dictionary size (for example, ~10 MB for a 100 KB dictionary)\. \fB\-\-train\fR can be combined with \fB\-r\fR to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits\. . .IP -This option is ignored unless long distance matching is enabled\. +Since dictionary compression is mostly effective for small files, the expectation is that the training set will only contain small files\. In the case where some samples happen to be large, only the first 128 KiB of these samples will be used for training\. . .IP -Larger values will improve compression speed\. Deviating far from the default value will likely result in a decrease in compression ratio\. +\fB\-\-train\fR supports multithreading if \fBzstd\fR is compiled with threading support (default)\. Additional advanced parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The slower cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Default \fB\-\-train\fR is equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\. +. +.TP +\fB\-o FILE\fR +Dictionary saved into \fBFILE\fR (default name: dictionary)\. +. +.TP +\fB\-\-maxdict=#\fR +Limit dictionary to specified size (default: 112640 bytes)\. As usual, quantities are expressed in bytes by default, and it\'s possible to employ suffixes (like \fBKB\fR or \fBMB\fR) to specify larger values\. +. +.TP +\fB\-#\fR +Use \fB#\fR compression level during training (optional)\. Will generate statistics more tuned for selected compression level, resulting in a \fIsmall\fR compression ratio improvement for this level\. +. +.TP +\fB\-B#\fR +Split input files into blocks of size # (default: no split) +. +.TP +\fB\-M#\fR, \fB\-\-memory=#\fR +Limit the amount of sample data loaded for training (default: 2 GB)\. Note that the default (2 GB) is also the maximum\. This parameter can be useful in situations where the training set size is not well controlled and could be potentially very large\. Since speed of the training process is directly correlated to the size of the training sample set, a smaller sample set leads to faster training\. . .IP -The default value is \fBwlog \- lhlog\fR\. +In situations where the training set is larger than maximum memory, the CLI will randomly select samples among the available ones, up to the maximum allowed memory budget\. This is meant to improve dictionary relevance by mitigating the potential impact of clustering, such as selecting only files from the beginning of a list sorted by modification date, or sorted by alphabetical order\. The randomization process is deterministic, so training of the same list of files with the same parameters will lead to the creation of the same dictionary\. . -.SS "Example" -The following parameters sets advanced compression options to something similar to predefined level 19 for files bigger than 256 KB: +.TP +\fB\-\-dictID=#\fR +A dictionary ID is a locally unique ID\. The decoder will use this value to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to provide an explicit number ID instead\. It\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. Note that short numbers have an advantage: an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. +. +.IP +Note that RFC8878 reserves IDs less than 32768 and greater than or equal to 2^31, so they should not be used in public\. +. +.TP +\fB\-\-train\-cover[=k#,d=#,steps=#,split=#,shrink[=#]]\fR +Select parameters for the default dictionary builder algorithm named cover\. If \fId\fR is not specified, then it tries \fId\fR = 6 and \fId\fR = 8\. If \fIk\fR is not specified, then it tries \fIsteps\fR values in the range [50, 2000]\. If \fIsteps\fR is not specified, then the default value of 40 is used\. If \fIsplit\fR is not specified or split <= 0, then the default value of 100 is used\. Requires that \fId\fR <= \fIk\fR\. If \fIshrink\fR flag is not used, then the default value for \fIshrinkDict\fR of 0 is used\. If \fIshrink\fR is not specified, then the default value for \fIshrinkDictMaxRegression\fR of 1 is used\. +. +.IP +Selects segments of size \fIk\fR with highest score to put in the dictionary\. The score of a segment is computed by the sum of the frequencies of all the subsegments of size \fId\fR\. Generally \fId\fR should be in the range [6, 8], occasionally up to 16, but the algorithm will run faster with d <= \fI8\fR\. Good values for \fIk\fR vary widely based on the input data, but a safe range is [2 * \fId\fR, 2000]\. If \fIsplit\fR is 100, all input samples are used for both training and testing to find optimal \fId\fR and \fIk\fR to build dictionary\. Supports multithreading if \fBzstd\fR is compiled with threading support\. Having \fIshrink\fR enabled takes a truncated dictionary of minimum size and doubles in size until compression ratio of the truncated dictionary is at most \fIshrinkDictMaxRegression%\fR worse than the compression ratio of the largest dictionary\. +. +.IP +Examples: +. +.IP +\fBzstd \-\-train\-cover FILEs\fR +. +.IP +\fBzstd \-\-train\-cover=k=50,d=8 FILEs\fR +. +.IP +\fBzstd \-\-train\-cover=d=8,steps=500 FILEs\fR +. +.IP +\fBzstd \-\-train\-cover=k=50 FILEs\fR +. +.IP +\fBzstd \-\-train\-cover=k=50,split=60 FILEs\fR +. +.IP +\fBzstd \-\-train\-cover=shrink FILEs\fR +. +.IP +\fBzstd \-\-train\-cover=shrink=2 FILEs\fR +. +.TP +\fB\-\-train\-fastcover[=k#,d=#,f=#,steps=#,split=#,accel=#]\fR +Same as cover but with extra parameters \fIf\fR and \fIaccel\fR and different default value of split If \fIsplit\fR is not specified, then it tries \fIsplit\fR = 75\. If \fIf\fR is not specified, then it tries \fIf\fR = 20\. Requires that 0 < \fIf\fR < 32\. If \fIaccel\fR is not specified, then it tries \fIaccel\fR = 1\. Requires that 0 < \fIaccel\fR <= 10\. Requires that \fId\fR = 6 or \fId\fR = 8\. +. +.IP +\fIf\fR is log of size of array that keeps track of frequency of subsegments of size \fId\fR\. The subsegment is hashed to an index in the range [0,2^\fIf\fR \- 1]\. It is possible that 2 different subsegments are hashed to the same index, and they are considered as the same subsegment when computing frequency\. Using a higher \fIf\fR reduces collision but takes longer\. +. +.IP +Examples: +. +.IP +\fBzstd \-\-train\-fastcover FILEs\fR +. +.IP +\fBzstd \-\-train\-fastcover=d=8,f=15,accel=2 FILEs\fR +. +.TP +\fB\-\-train\-legacy[=selectivity=#]\fR +Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its achievable maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\. +. +.IP +Examples: +. +.IP +\fBzstd \-\-train\-legacy FILEs\fR +. +.IP +\fBzstd \-\-train\-legacy=selectivity=8 FILEs\fR +. +.SH "BENCHMARK" +The \fBzstd\fR CLI provides a benchmarking mode that can be used to easily find suitable compression parameters, or alternatively to benchmark a computer\'s performance\. \fBzstd \-b [FILE(s)]\fR will benchmark \fBzstd\fR for both compression and decompression using default compression level\. Note that results are very dependent on the content being compressed\. . .P -\fB\-\-zstd\fR=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 +It\'s possible to pass multiple files to the benchmark, and even a directory with \fB\-r DIRECTORY\fR\. When no \fBFILE\fR is provided, the benchmark will use a procedurally generated \fBlorem ipsum\fR text\. . -.SS "\-B#:" -Select the size of each compression job\. This parameter is available only when multi\-threading is enabled\. Default value is \fB4 * windowSize\fR, which means it varies depending on compression level\. \fB\-B#\fR makes it possible to select a custom value\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 1 MB, or \fBoverlapSize\fR, whichever is largest\. +.P +Benchmarking will employ \fBmax(1, min(4, nbCores/4))\fR worker threads by default in order to match the behavior of the normal CLI I/O\. +. +.IP "\(bu" 4 +\fB\-b#\fR: benchmark file(s) using compression level # +. +.IP "\(bu" 4 +\fB\-e#\fR: benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\fR (inclusive) +. +.IP "\(bu" 4 +\fB\-d\fR: benchmark decompression speed only (requires providing a zstd\-compressed content) +. +.IP "\(bu" 4 +\fB\-i#\fR: minimum evaluation time, in seconds (default: 3s), benchmark mode only +. +.IP "\(bu" 4 +\fB\-B#\fR, \fB\-\-block\-size=#\fR: cut file(s) into independent chunks of size # (default: no chunking) +. +.IP "\(bu" 4 +\fB\-S\fR: output one benchmark result per input file (default: consolidated result) +. +.IP "\(bu" 4 +\fB\-D dictionary\fR benchmark using dictionary +. +.IP "\(bu" 4 +\fB\-\-priority=rt\fR: set process priority to real\-time (Windows) +. +.IP "" 0 +. +.P +Beyond compression levels, benchmarking is also compatible with other parameters, such as number of threads (\fB\-T#\fR), advanced compression parameters (\fB\-\-zstd=###\fR), dictionary compression (\fB\-D dictionary\fR), or even disabling checksum verification for example\. +. +.P +\fBOutput Format:\fR CompressionLevel#Filename: InputSize \-> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed +. +.P +\fBMethodology:\fR For speed measurement, the entire input is compressed/decompressed in\-memory to measure speed\. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy\. +. +.SH "SEE ALSO" +\fBzstdgrep\fR(1), \fBzstdless\fR(1), \fBgzip\fR(1), \fBxz\fR(1) +. +.P +The \fIzstandard\fR format is specified in Y\. Collet, "Zstandard Compression and the \'application/zstd\' Media Type", https://www\.ietf\.org/rfc/rfc8878\.txt, Internet RFC 8878 (February 2021)\. . .SH "BUGS" Report bugs at: https://github\.com/facebook/zstd/issues diff --git a/programs/zstd.1.md b/programs/zstd.1.md index 93c6fa40010..bb9258d571b 100644 --- a/programs/zstd.1.md +++ b/programs/zstd.1.md @@ -4,7 +4,7 @@ zstd(1) -- zstd, zstdmt, unzstd, zstdcat - Compress or decompress .zst files SYNOPSIS -------- -`zstd` [*OPTIONS*] [-|_INPUT-FILE_] [-o _OUTPUT-FILE_] +`zstd` [] [-|] [-o ] `zstdmt` is equivalent to `zstd -T0` @@ -16,32 +16,33 @@ SYNOPSIS DESCRIPTION ----------- `zstd` is a fast lossless compression algorithm and data compression tool, -with command line syntax similar to `gzip (1)` and `xz (1)`. +with command line syntax similar to `gzip`(1) and `xz`(1). It is based on the **LZ77** family, with further FSE & huff0 entropy stages. `zstd` offers highly configurable compression speed, -with fast modes at > 200 MB/s per core, -and strong modes nearing lzma compression ratios. -It also features a very fast decoder, with speeds > 500 MB/s per core. +from fast modes at > 200 MB/s per core, +to strong modes with excellent compression ratios. +It also features a very fast decoder, with speeds > 500 MB/s per core, +which remains roughly stable at all compression settings. `zstd` command line syntax is generally similar to gzip, -but features the following differences : +but features the following few differences: - Source files are preserved by default. It's possible to remove them automatically by using the `--rm` command. - When compressing a single file, `zstd` displays progress notifications and result summary by default. Use `-q` to turn them off. - - `zstd` does not accept input from console, - but it properly accepts `stdin` when it's not the console. - `zstd` displays a short help page when command line is an error. Use `-q` to turn it off. + - `zstd` does not accept input from console, + though it does accept `stdin` when it's not the console. + - `zstd` does not store the input's filename or attributes, only its contents. -`zstd` compresses or decompresses each _file_ according to the selected -operation mode. +`zstd` processes each _file_ according to the selected operation mode. If no _files_ are given or _file_ is `-`, `zstd` reads from standard input and writes the processed data to standard output. `zstd` will refuse to write compressed data to standard output -if it is a terminal : it will display an error message and skip the _file_. +if it is a terminal: it will display an error message and skip the file. Similarly, `zstd` will refuse to read compressed data from standard input if it is a terminal. @@ -53,14 +54,15 @@ whose name is derived from the source _file_ name: * When decompressing, the `.zst` suffix is removed from the source filename to get the target filename -### Concatenation with .zst files -It is possible to concatenate `.zst` files as is. -`zstd` will decompress such files as if they were a single `.zst` file. +### Concatenation with .zst Files +It is possible to concatenate multiple `.zst` files. `zstd` will decompress +such agglomerated file as if it was a single `.zst` file. OPTIONS ------- -### Integer suffixes and special values +### Integer Suffixes and Special Values + In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers. There must be no space between the integer and the suffix. @@ -72,7 +74,8 @@ There must be no space between the integer and the suffix. Multiply the integer by 1,048,576 (2\^20). `Mi`, `M`, and `MB` are accepted as synonyms for `MiB`. -### Operation mode +### Operation Mode + If multiple operation mode options are given, the last one takes effect. @@ -85,23 +88,36 @@ the last one takes effect. Decompress. * `-t`, `--test`: Test the integrity of compressed _files_. - This option is equivalent to `--decompress --stdout` except that the - decompressed data is discarded instead of being written to standard output. + This option is equivalent to `--decompress --stdout > /dev/null`, + decompressed data is discarded and checksummed for errors. No files are created or removed. * `-b#`: - Benchmark file(s) using compression level # -* `--train FILEs`: - Use FILEs as a training set to create a dictionary. + Benchmark file(s) using compression level _#_. + See _BENCHMARK_ below for a description of this operation. +* `--train FILES`: + Use _FILES_ as a training set to create a dictionary. The training set should contain a lot of small files (> 100). + See _DICTIONARY BUILDER_ below for a description of this operation. * `-l`, `--list`: Display information related to a zstd compressed file, such as size, ratio, and checksum. Some of these fields may not be available. - This command can be augmented with the `-v` modifier. + This command's output can be augmented with the `-v` modifier. -### Operation modifiers +### Operation Modifiers * `-#`: - `#` compression level \[1-19] (default: 3) + selects `#` compression level \[1-19\] (default: 3). + Higher compression levels *generally* produce higher compression ratio at the expense of speed and memory. + A rough rule of thumb is that compression speed is expected to be divided by 2 every 2 levels. + Technically, each level is mapped to a set of advanced parameters (that can also be modified individually, see below). + Because the compressor's behavior highly depends on the content to compress, there's no guarantee of a smooth progression from one level to another. +* `--ultra`: + unlocks high compression levels 20+ (maximum 22), using a lot more memory. + Decompression will also need more memory when using these levels. +* `--max`: + set advanced parameters to reach maximum compression. + warning: this setting is very slow and uses a lot of resources. + It's inappropriate for 32-bit mode and therefore disabled in this mode. * `--fast[=#]`: switch to ultra-fast compression levels. If `=#` is not present, it defaults to `1`. @@ -109,61 +125,128 @@ the last one takes effect. at the cost of some compression ratio. This setting overwrites compression level if one was set previously. Similarly, if a compression level is set after `--fast`, it overrides it. -* `--ultra`: - unlocks high compression levels 20+ (maximum 22), using a lot more memory. - Note that decompression will also require more memory when using these levels. -* `--long[=#]`: - enables long distance matching with `#` `windowLog`, if not `#` is not - present it defaults to `27`. - This increases the window size (`windowLog`) and memory usage for both the - compressor and decompressor. - This setting is designed to improve the compression ratio for files with - long matches at a large distance. - - Note: If `windowLog` is set to larger than 27, `--long=windowLog` or - `--memory=windowSize` needs to be passed to the decompressor. * `-T#`, `--threads=#`: - Compress using `#` working threads (default: 1). + Compress using `#` working threads (default: between 1 and 4 depending on physical CPU cores; see `ZSTD_NBTHREADS` below). If `#` is 0, attempt to detect and use the number of physical CPU cores. - In all cases, the nb of threads is capped to ZSTDMT_NBTHREADS_MAX==200. + In all cases, the nb of threads is capped to `ZSTDMT_NBWORKERS_MAX`, + which is either 64 in 32-bit mode, or 256 for 64-bit environments. This modifier does nothing if `zstd` is compiled without multithread support. * `--single-thread`: - Does not spawn a thread for compression, use a single thread for both I/O and compression. - In this mode, compression is serialized with I/O, which is slightly slower. - (This is different from `-T1`, which spawns 1 compression thread in parallel of I/O). - This mode is the only one available when multithread support is disabled. - Single-thread mode features lower memory usage. - Final compressed result is slightly different from `-T1`. -* `--adapt[=min=#,max=#]` : + Use a single thread for both I/O and compression. + As compression is serialized with I/O, this can be slightly slower. + Single-thread mode features significantly lower memory usage, + which can be useful for systems with limited amount of memory, such as 32-bit systems. + + Note 1: this mode is the only available one when multithread support is disabled. + + Note 2: this mode is different from `-T1`, which spawns 1 compression thread in parallel with I/O. + Final compressed result is also slightly different from `-T1`. +* `--auto-threads={physical,logical} (default: physical)`: + When using a default amount of threads via `-T0`, choose the default based on the number + of detected physical or logical cores. +* `--adapt[=min=#,max=#]`: `zstd` will dynamically adapt compression level to perceived I/O conditions. Compression level adaptation can be observed live by using command `-v`. Adaptation can be constrained between supplied `min` and `max` levels. The feature works when combined with multi-threading and `--long` mode. It does not work with `--single-thread`. - It sets window size to 8 MB by default (can be changed manually, see `wlog`). + It sets window size to 8 MiB by default (can be changed manually, see `wlog`). Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible. - _note_ : at the time of this writing, `--adapt` can remain stuck at low speed + + _Note_: at the time of this writing, `--adapt` can remain stuck at low speed when combined with multiple worker threads (>=2). -* `--rsyncable` : +* `--long[=#]`: + enables long distance matching with `#` `windowLog`, if `#` is not + present it defaults to `27`. + This increases the window size (`windowLog`) and memory usage for both the + compressor and decompressor. + This setting is designed to improve the compression ratio for files with + long matches at a large distance. + + Note: If `windowLog` is set to larger than 27, `--long=windowLog` or + `--memory=windowSize` needs to be passed to the decompressor. +* `-D DICT`: + use `DICT` as Dictionary to compress or decompress FILE(s) +* `--patch-from FILE`: + Specify the file to be used as a reference point for zstd's diff engine. + This is effectively dictionary compression with some convenient parameter + selection, namely that _windowSize_ > _srcSize_. + + Note: cannot use both this and `-D` together. + + Note: `--long` mode will be automatically activated if _chainLog_ < _fileLog_ + (_fileLog_ being the _windowLog_ required to cover the whole file). You + can also manually force it. + + Note: up to level 15, you can use `--patch-from` in `--single-thread` mode + to improve compression ratio marginally at the cost of speed. Using + '--single-thread' above level 15 will lead to lower compression + ratios. + + Note: for level 19, you can get increased compression ratio at the cost + of speed by specifying `--zstd=targetLength=` to be something large + (i.e. 4096), and by setting a large `--zstd=chainLog=`. +* `--rsyncable`: `zstd` will periodically synchronize the compression state to make the - compressed file more rsync-friendly. There is a negligible impact to - compression ratio, and the faster compression levels will see a small - compression speed hit. + compressed file more rsync-friendly. + There is a negligible impact to compression ratio, + and a potential impact to compression speed, perceptible at higher speeds, + for example when combining `--rsyncable` with many parallel worker threads. This feature does not work with `--single-thread`. You probably don't want to use it with long range mode, since it will decrease the effectiveness of - the synchronization points, but your milage may vary. -* `-D file`: - use `file` as Dictionary to compress or decompress FILE(s) + the synchronization points, but your mileage may vary. +* `-C`, `--[no-]check`: + add integrity check computed from uncompressed data (default: enabled) +* `--[no-]content-size`: + enable / disable whether or not the original size of the file is placed in + the header of the compressed file. The default option is + `--content-size` (meaning that the original size will be placed in the header). * `--no-dictID`: do not store dictionary ID within frame header (dictionary compression). The decoder will have to rely on implicit knowledge about which dictionary to use, it won't be able to check if it's correct. -* `-o file`: - save result into `file` (only possible with a single _INPUT-FILE_) +* `-M#`, `--memory=#`: + Set a memory usage limit. By default, `zstd` uses 128 MiB for decompression + as the maximum amount of memory the decompressor is allowed to use, but you can + override this manually if need be in either direction (i.e. you can increase or + decrease it). + + This is also used during compression when using with `--patch-from=`. In this case, + this parameter overrides that maximum size allowed for a dictionary. (128 MiB). + + Additionally, this can be used to limit memory for dictionary training. This parameter + overrides the default limit of 2 GiB. zstd will load training samples up to the memory limit + and ignore the rest. +* `--stream-size=#`: + Sets the pledged source size of input coming from a stream. This value must be exact, as it + will be included in the produced frame header. Incorrect stream sizes will cause an error. + This information will be used to better optimize compression parameters, resulting in + better and potentially faster compression, especially for smaller source sizes. +* `--size-hint=#`: + When handling input from a stream, `zstd` must guess how large the source size + will be when optimizing compression parameters. If the stream size is relatively + small, this guess may be a poor one, resulting in a higher compression ratio than + expected. This feature allows for controlling the guess when needed. + Exact guesses result in better compression ratios. Overestimates result in slightly + degraded compression ratios, while underestimates may result in significant degradation. +* `--target-compressed-block-size=#`: + Attempt to produce compressed blocks of approximately this size. + This will split larger blocks in order to approach this target. + This feature is notably useful for improved latency, when the receiver can leverage receiving early incomplete data. + This parameter defines a loose target: compressed blocks will target this size "on average", but individual blocks can still be larger or smaller. + Enabling this feature can decrease compression speed by up to ~10% at level 1. + Higher levels will see smaller relative speed regression, becoming invisible at higher settings. * `-f`, `--force`: - overwrite output without prompting, and (de)compress symbolic links + disable input and output checks. Allows overwriting existing files, input + from console, output to stdout, operating on links, block devices, etc. + During decompression and when the output destination is stdout, pass-through + unrecognized formats as-is. * `-c`, `--stdout`: - force write to standard output, even if it is the console + write to standard output (even if it is the console); keep original files (disable `--rm`). +* `-o FILE`: + save result into `FILE`. + Note that this operation is in conflict with `-c`. + If both operations are present on the command line, the last expressed one wins. * `--[no-]sparse`: enable / disable sparse FS support, to make files with many zeroes smaller on disk. @@ -172,13 +255,46 @@ the last one takes effect. default: enabled when output is into a file, and disabled when output is stdout. This setting overrides default and can force sparse mode over stdout. +* `--[no-]pass-through` + enable / disable passing through uncompressed files as-is. During + decompression when pass-through is enabled, unrecognized formats will be + copied as-is from the input to the output. By default, pass-through will + occur when the output destination is stdout and the force (`-f`) option is + set. * `--rm`: - remove source file(s) after successful compression or decompression + remove source file(s) after successful compression or decompression. + This command is silently ignored if output is `stdout`. + If used in combination with `-o`, + triggers a confirmation prompt (which can be silenced with `-f`), as this is a destructive operation. * `-k`, `--keep`: keep source file(s) after successful compression or decompression. This is the default behavior. * `-r`: - operate recursively on directories + operate recursively on directories. + It selects all files in the named directory and all its subdirectories. + This can be useful both to reduce command line typing, + and to circumvent shell expansion limitations, + when there are a lot of files and naming breaks the maximum size of a command line. +* `--filelist FILE` + read a list of files to process as content from `FILE`. + Format is compatible with `ls` output, with one file per line. +* `--output-dir-flat DIR`: + resulting files are stored into target `DIR` directory, + instead of same directory as origin file. + Be aware that this command can introduce name collision issues, + if multiple files, from different directories, end up having the same name. + Collision resolution ensures first file with a given name will be present in `DIR`, + while in combination with `-f`, the last file will be present instead. +* `--output-dir-mirror DIR`: + similar to `--output-dir-flat`, + the output files are stored underneath target `DIR` directory, + but this option will replicate input directory hierarchy into output `DIR`. + + If input directory contains "..", the files in this directory will be ignored. + If input directory is an absolute directory (i.e. "/var/tmp/abc"), + it will be stored into the "output-dir/var/tmp/abc". + If there are multiple input files or directories, + name collision resolution will follow the same rules as `--output-dir-flat`. * `--format=FORMAT`: compress and decompress in other formats. If compiled with support, zstd can compress to or decompress from other compression algorithm @@ -187,152 +303,65 @@ the last one takes effect. * `-h`/`-H`, `--help`: display help/long help and exit * `-V`, `--version`: - display version number and exit. - Advanced : `-vV` also displays supported formats. + display version number and immediately exit. + note that, since it exits, flags specified after `-V` are effectively ignored. + Advanced: `-vV` also displays supported formats. `-vvV` also displays POSIX support. -* `-v`: - verbose mode + `-qV` will only display the version number, suitable for machine reading. +* `-v`, `--verbose`: + verbose mode, display more information * `-q`, `--quiet`: suppress warnings, interactivity, and notifications. specify twice to suppress errors too. * `--no-progress`: do not display the progress bar, but keep all other messages. -* `-C`, `--[no-]check`: - add integrity check computed from uncompressed data (default: enabled) +* `--show-default-cparams`: + shows the default compression parameters that will be used for a particular input file, based on the provided compression level and the input size. + If the provided file is not a regular file (e.g. a pipe), this flag will output the parameters used for inputs of unknown size. +* `--exclude-compressed`: + only compress files that are not already compressed. * `--`: All arguments after `--` are treated as files -DICTIONARY BUILDER ------------------- -`zstd` offers _dictionary_ compression, -which greatly improves efficiency on small files and messages. -It's possible to train `zstd` with a set of samples, -the result of which is saved into a file called a `dictionary`. -Then during compression and decompression, reference the same dictionary, -using command `-D dictionaryFileName`. -Compression of small files similar to the sample set will be greatly improved. +### gzip Operation Modifiers +When invoked via a `gzip` symlink, `zstd` will support further +options that intend to mimic the `gzip` behavior: -* `--train FILEs`: - Use FILEs as training set to create a dictionary. - The training set should contain a lot of small files (> 100), - and weight typically 100x the target dictionary size - (for example, 10 MB for a 100 KB dictionary). +* `-n`, `--no-name`: + do not store the original filename and timestamps when compressing + a file. This is the default behavior and hence a no-op. +* `--best`: + alias to the option `-9`. - Supports multithreading if `zstd` is compiled with threading support. - Additional parameters can be specified with `--train-fastcover`. - The legacy dictionary builder can be accessed with `--train-legacy`. - The cover dictionary builder can be accessed with `--train-cover`. - Equivalent to `--train-fastcover=d=8,steps=4`. -* `-o file`: - Dictionary saved into `file` (default name: dictionary). -* `--maxdict=#`: - Limit dictionary to specified size (default: 112640). -* `-#`: - Use `#` compression level during training (optional). - Will generate statistics more tuned for selected compression level, - resulting in a _small_ compression ratio improvement for this level. -* `-B#`: - Split input files in blocks of size # (default: no split) -* `--dictID=#`: - A dictionary ID is a locally unique ID that a decoder can use to verify it is - using the right dictionary. - By default, zstd will create a 4-bytes random number ID. - It's possible to give a precise number instead. - Short numbers have an advantage : an ID < 256 will only need 1 byte in the - compressed frame header, and an ID < 65536 will only need 2 bytes. - This compares favorably to 4 bytes default. - However, it's up to the dictionary manager to not assign twice the same ID to - 2 different dictionaries. -* `--train-cover[=k#,d=#,steps=#,split=#]`: - Select parameters for the default dictionary builder algorithm named cover. - If _d_ is not specified, then it tries _d_ = 6 and _d_ = 8. - If _k_ is not specified, then it tries _steps_ values in the range [50, 2000]. - If _steps_ is not specified, then the default value of 40 is used. - If _split_ is not specified or split <= 0, then the default value of 100 is used. - Requires that _d_ <= _k_. - - Selects segments of size _k_ with highest score to put in the dictionary. - The score of a segment is computed by the sum of the frequencies of all the - subsegments of size _d_. - Generally _d_ should be in the range [6, 8], occasionally up to 16, but the - algorithm will run faster with d <= _8_. - Good values for _k_ vary widely based on the input data, but a safe range is - [2 * _d_, 2000]. - If _split_ is 100, all input samples are used for both training and testing - to find optimal _d_ and _k_ to build dictionary. - Supports multithreading if `zstd` is compiled with threading support. - - Examples: - - `zstd --train-cover FILEs` - - `zstd --train-cover=k=50,d=8 FILEs` - - `zstd --train-cover=d=8,steps=500 FILEs` - - `zstd --train-cover=k=50 FILEs` - - `zstd --train-cover=k=50,split=60 FILEs` - -* `--train-fastcover[=k#,d=#,f=#,steps=#,split=#,accel=#]`: - Same as cover but with extra parameters _f_ and _accel_ and different default value of split - If _split_ is not specified, then it tries _split_ = 75. - If _f_ is not specified, then it tries _f_ = 20. - Requires that 0 < _f_ < 32. - If _accel_ is not specified, then it tries _accel_ = 1. - Requires that 0 < _accel_ <= 10. - Requires that _d_ = 6 or _d_ = 8. - - _f_ is log of size of array that keeps track of frequency of subsegments of size _d_. - The subsegment is hashed to an index in the range [0,2^_f_ - 1]. - It is possible that 2 different subsegments are hashed to the same index, and they are considered as the same subsegment when computing frequency. - Using a higher _f_ reduces collision but takes longer. - - Examples: - - `zstd --train-fastcover FILEs` - - `zstd --train-fastcover=d=8,f=15,accel=2 FILEs` - -* `--train-legacy[=selectivity=#]`: - Use legacy dictionary builder algorithm with the given dictionary - _selectivity_ (default: 9). - The smaller the _selectivity_ value, the denser the dictionary, - improving its efficiency but reducing its possible maximum size. - `--train-legacy=s=#` is also accepted. - - Examples: - - `zstd --train-legacy FILEs` - - `zstd --train-legacy=selectivity=8 FILEs` +### Environment Variables +Employing environment variables to set parameters has security implications. +Therefore, this avenue is intentionally limited. +Only `ZSTD_CLEVEL` and `ZSTD_NBTHREADS` are currently supported. +They set the default compression level and number of threads to use during compression, respectively. -BENCHMARK ---------- +`ZSTD_CLEVEL` can be used to set the level between 1 and 19 (the "normal" range). +If the value of `ZSTD_CLEVEL` is not a valid integer, it will be ignored with a warning message. +`ZSTD_CLEVEL` just replaces the default compression level (`3`). -* `-b#`: - benchmark file(s) using compression level # -* `-e#`: - benchmark file(s) using multiple compression levels, from `-b#` to `-e#` (inclusive) -* `-i#`: - minimum evaluation time, in seconds (default: 3s), benchmark mode only -* `-B#`, `--block-size=#`: - cut file(s) into independent blocks of size # (default: no block) -* `--priority=rt`: - set process priority to real-time +`ZSTD_NBTHREADS` can be used to set the number of threads `zstd` will attempt to use during compression. +If the value of `ZSTD_NBTHREADS` is not a valid unsigned integer, it will be ignored with a warning message. +`ZSTD_NBTHREADS` has a default value of `max(1, min(4, nbCores/4))`, and is capped at ZSTDMT_NBWORKERS_MAX==200. +`zstd` must be compiled with multithread support for this variable to have any effect. -**Output Format:** CompressionLevel#Filename : IntputSize -> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed +They can both be overridden by corresponding command line arguments: +`-#` for compression level and `-T#` for number of compression threads. -**Methodology:** For both compression and decompression speed, the entire input is compressed/decompressed in-memory to measure speed. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy. ADVANCED COMPRESSION OPTIONS ---------------------------- +`zstd` provides 22 predefined regular compression levels plus the fast levels. +A compression level is translated internally into multiple advanced parameters that control the behavior of the compressor +(one can observe the result of this translation with `--show-default-cparams`). +These advanced parameters can be overridden using advanced compression options. + ### --zstd[=options]: -`zstd` provides 22 predefined compression levels. -The selected or default predefined compression level can be changed with -advanced compression options. The _options_ are provided as a comma-separated list. You may specify only the options you want to change and the rest will be taken from the selected or default compression level. @@ -341,10 +370,10 @@ The list of available _options_: - `strategy`=_strat_, `strat`=_strat_: Specify a strategy used by a match finder. - There are 9 strategies numbered from 1 to 9, from faster to stronger: - 1=ZSTD\_fast, 2=ZSTD\_dfast, 3=ZSTD\_greedy, - 4=ZSTD\_lazy, 5=ZSTD\_lazy2, 6=ZSTD\_btlazy2, - 7=ZSTD\_btopt, 8=ZSTD\_btultra, 9=ZSTD\_btultra2. + There are 9 strategies numbered from 1 to 9, from fastest to strongest: + 1=`ZSTD_fast`, 2=`ZSTD_dfast`, 3=`ZSTD_greedy`, + 4=`ZSTD_lazy`, 5=`ZSTD_lazy2`, 6=`ZSTD_btlazy2`, + 7=`ZSTD_btopt`, 8=`ZSTD_btultra`, 9=`ZSTD_btultra2`. - `windowLog`=_wlog_, `wlog`=_wlog_: Specify the maximum number of bits for a match distance. @@ -361,21 +390,23 @@ The list of available _options_: - `hashLog`=_hlog_, `hlog`=_hlog_: Specify the maximum number of bits for a hash table. - Bigger hash tables cause less collisions which usually makes compression + Bigger hash tables cause fewer collisions which usually makes compression faster, but requires more memory during compression. - The minimum _hlog_ is 6 (64 B) and the maximum is 26 (128 MiB). + The minimum _hlog_ is 6 (64 entries / 256 B) and the maximum is 30 (1B entries / 4 GiB). - `chainLog`=_clog_, `clog`=_clog_: - Specify the maximum number of bits for a hash chain or a binary tree. + Specify the maximum number of bits for the secondary search structure, + whose form depends on the selected `strategy`. Higher numbers of bits increases the chance to find a match which usually improves compression ratio. It also slows down compression speed and increases memory requirements for compression. - This option is ignored for the ZSTD_fast strategy. + This option is ignored for the `ZSTD_fast` `strategy`, which only has the primary hash table. - The minimum _clog_ is 6 (64 B) and the maximum is 28 (256 MiB). + The minimum _clog_ is 6 (64 entries / 256 B) and the maximum is 29 (512M entries / 2 GiB) on 32-bit platforms + and 30 (1B entries / 4 GiB) on 64-bit platforms. - `searchLog`=_slog_, `slog`=_slog_: Specify the maximum number of searches in a hash chain or a binary tree @@ -384,7 +415,7 @@ The list of available _options_: More searches increases the chance to find a match which usually increases compression ratio but decreases compression speed. - The minimum _slog_ is 1 and the maximum is 26. + The minimum _slog_ is 1 and the maximum is 'windowLog' - 1. - `minMatch`=_mml_, `mml`=_mml_: Specify the minimum searched length of a match in a hash table. @@ -394,22 +425,22 @@ The list of available _options_: The minimum _mml_ is 3 and the maximum is 7. -- `targetLen`=_tlen_, `tlen`=_tlen_: +- `targetLength`=_tlen_, `tlen`=_tlen_: The impact of this field vary depending on selected strategy. - For ZSTD\_btopt, ZSTD\_btultra and ZSTD\_btultra2, it specifies + For `ZSTD_btopt`, `ZSTD_btultra` and `ZSTD_btultra2`, it specifies the minimum match length that causes match finder to stop searching. - A larger `targetLen` usually improves compression ratio + A larger `targetLength` usually improves compression ratio but decreases compression speed. - For ZSTD\_fast, it triggers ultra-fast mode when > 0. + For `ZSTD_fast`, it triggers ultra-fast mode when > 0. The value represents the amount of data skipped between match sampling. - Impact is reversed : a larger `targetLen` increases compression speed + Impact is reversed: a larger `targetLength` increases compression speed but decreases compression ratio. For all other strategies, this field has no impact. - The minimum _tlen_ is 0 and the maximum is 999. + The minimum _tlen_ is 0 and the maximum is 128 KiB. - `overlapLog`=_ovlog_, `ovlog`=_ovlog_: Determine `overlapSize`, amount of data reloaded from previous job. @@ -421,9 +452,20 @@ The list of available _options_: 9 means "full overlap", meaning up to `windowSize` is reloaded from previous job. Reducing _ovlog_ by 1 reduces the reloaded amount by a factor 2. For example, 8 means "windowSize/2", and 6 means "windowSize/8". - Value 0 is special and means "default" : _ovlog_ is automatically determined by `zstd`. + Value 0 is special and means "default": _ovlog_ is automatically determined by `zstd`. In which case, _ovlog_ will range from 6 to 9, depending on selected _strat_. +- `ldmHashRateLog`=_lhrlog_, `lhrlog`=_lhrlog_: + Specify the frequency of inserting entries into the long distance matching + hash table. + + This option is ignored unless long distance matching is enabled. + + Larger values will improve compression speed. Deviating far from the + default value will likely result in a decrease in compression ratio. + + The default value varies between 4 and 7, depending on `strategy`. + - `ldmHashLog`=_lhlog_, `lhlog`=_lhlog_: Specify the maximum size for a hash table used for long distance matching. @@ -432,7 +474,7 @@ The list of available _options_: Bigger hash tables usually improve compression ratio at the expense of more memory during compression and a decrease in compression speed. - The minimum _lhlog_ is 6 and the maximum is 26 (default: 20). + The minimum _lhlog_ is 6 and the maximum is 30 (default: `windowLog - ldmHashRateLog`). - `ldmMinMatch`=_lmml_, `lmml`=_lmml_: Specify the minimum searched length of a match for long distance matching. @@ -441,7 +483,7 @@ The list of available _options_: Larger/very small values usually decrease compression ratio. - The minimum _lmml_ is 4 and the maximum is 4096 (default: 64). + The minimum _lmml_ is 4 and the maximum is 4096 (default: 32 to 64, depending on `strategy`). - `ldmBucketSizeLog`=_lblog_, `lblog`=_lblog_: Specify the size of each bucket for the hash table used for long distance @@ -452,18 +494,8 @@ The list of available _options_: Larger bucket sizes improve collision resolution but decrease compression speed. - The minimum _lblog_ is 0 and the maximum is 8 (default: 3). + The minimum _lblog_ is 1 and the maximum is 8 (default: 4 to 8, depending on `strategy`). -- `ldmHashRateLog`=_lhrlog_, `lhrlog`=_lhrlog_: - Specify the frequency of inserting entries into the long distance matching - hash table. - - This option is ignored unless long distance matching is enabled. - - Larger values will improve compression speed. Deviating far from the - default value will likely result in a decrease in compression ratio. - - The default value is `wlog - lhlog`. ### Example The following parameters sets advanced compression options to something @@ -471,13 +503,207 @@ similar to predefined level 19 for files bigger than 256 KB: `--zstd`=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 -### -B#: -Select the size of each compression job. -This parameter is available only when multi-threading is enabled. -Default value is `4 * windowSize`, which means it varies depending on compression level. -`-B#` makes it possible to select a custom value. +### --jobsize=#: +Specify the size of each compression job. +This parameter is only meaningful when multi-threading is enabled. +Each compression job is run in parallel, so this value can indirectly impact the nb of active threads. +Default job size varies depending on compression level (generally `4 * windowSize`). +`--jobsize=#` makes it possible to manually select a custom size. Note that job size must respect a minimum value which is enforced transparently. -This minimum is either 1 MB, or `overlapSize`, whichever is largest. +This minimum is either 512 KB, or `overlapSize`, whichever is largest. +Different job sizes will lead to non-identical compressed frames. + + +DICTIONARY BUILDER +------------------ +`zstd` offers _dictionary_ compression, +which greatly improves efficiency on small files and messages. +It's possible to train `zstd` with a set of samples, +the result of which is saved into a file called a `dictionary`. +Then, during compression and decompression, reference the same dictionary, +using command `-D dictionaryFileName`. +Compression of small files similar to the sample set will be greatly improved. + +* `--train FILEs`: + Use FILEs as training set to create a dictionary. + The training set should ideally contain a lot of samples (> 100), + and weight typically 100x the target dictionary size + (for example, ~10 MB for a 100 KB dictionary). + `--train` can be combined with `-r` to indicate a directory rather than listing all the files, + which can be useful to circumvent shell expansion limits. + + Since dictionary compression is mostly effective for small files, + the expectation is that the training set will only contain small files. + In the case where some samples happen to be large, + only the first 128 KiB of these samples will be used for training. + + `--train` supports multithreading if `zstd` is compiled with threading support (default). + Additional advanced parameters can be specified with `--train-fastcover`. + The legacy dictionary builder can be accessed with `--train-legacy`. + The slower cover dictionary builder can be accessed with `--train-cover`. + Default `--train` is equivalent to `--train-fastcover=d=8,steps=4`. + +* `-o FILE`: + Dictionary saved into `FILE` (default name: dictionary). +* `--maxdict=#`: + Limit dictionary to specified size (default: 112640 bytes). + As usual, quantities are expressed in bytes by default, + and it's possible to employ suffixes (like `KB` or `MB`) + to specify larger values. +* `-#`: + Use `#` compression level during training (optional). + Will generate statistics more tuned for selected compression level, + resulting in a _small_ compression ratio improvement for this level. +* `--split=#`: + Split input files into independent chunks of size # (default: no split) +* `-M#`, `--memory=#`: + Limit the amount of sample data loaded for training (default: 2 GB). + Note that the default (2 GB) is also the maximum. + This parameter can be useful in situations where the training set size + is not well controlled and could be potentially very large. + Since speed of the training process is directly correlated to + the size of the training sample set, + a smaller sample set leads to faster training. + + In situations where the training set is larger than maximum memory, + the CLI will randomly select samples among the available ones, + up to the maximum allowed memory budget. + This is meant to improve dictionary relevance + by mitigating the potential impact of clustering, + such as selecting only files from the beginning of a list + sorted by modification date, or sorted by alphabetical order. + The randomization process is deterministic, so + training of the same list of files with the same parameters + will lead to the creation of the same dictionary. + +* `--dictID=#`: + A dictionary ID is a locally unique ID. + The decoder will use this value to verify it is using the right dictionary. + By default, zstd will create a 4-bytes random number ID. + It's possible to provide an explicit number ID instead. + It's up to the dictionary manager to not assign twice the same ID to + 2 different dictionaries. + Note that short numbers have an advantage: + an ID < 256 will only need 1 byte in the compressed frame header, + and an ID < 65536 will only need 2 bytes. + This compares favorably to 4 bytes default. + + Note that RFC8878 reserves IDs less than 32768 and greater than or equal to 2\^31, so they should not be used in public. + +* `--train-cover[=k#,d=#,steps=#,split=#,shrink[=#]]`: + Select parameters for the default dictionary builder algorithm named cover. + If _d_ is not specified, then it tries _d_ = 6 and _d_ = 8. + If _k_ is not specified, then it tries _steps_ values in the range [50, 2000]. + If _steps_ is not specified, then the default value of 40 is used. + If _split_ is not specified or split <= 0, then the default value of 100 is used. + Requires that _d_ <= _k_. + If _shrink_ flag is not used, then the default value for _shrinkDict_ of 0 is used. + If _shrink_ is not specified, then the default value for _shrinkDictMaxRegression_ of 1 is used. + + Selects segments of size _k_ with highest score to put in the dictionary. + The score of a segment is computed by the sum of the frequencies of all the + subsegments of size _d_. + Generally _d_ should be in the range [6, 8], occasionally up to 16, but the + algorithm will run faster with d <= _8_. + Good values for _k_ vary widely based on the input data, but a safe range is + [2 * _d_, 2000]. + If _split_ is 100, all input samples are used for both training and testing + to find optimal _d_ and _k_ to build dictionary. + Supports multithreading if `zstd` is compiled with threading support. + Having _shrink_ enabled takes a truncated dictionary of minimum size and doubles + in size until compression ratio of the truncated dictionary is at most + _shrinkDictMaxRegression%_ worse than the compression ratio of the largest dictionary. + + Examples: + + `zstd --train-cover FILEs` + + `zstd --train-cover=k=50,d=8 FILEs` + + `zstd --train-cover=d=8,steps=500 FILEs` + + `zstd --train-cover=k=50 FILEs` + + `zstd --train-cover=k=50,split=60 FILEs` + + `zstd --train-cover=shrink FILEs` + + `zstd --train-cover=shrink=2 FILEs` + +* `--train-fastcover[=k#,d=#,f=#,steps=#,split=#,accel=#]`: + Same as cover but with extra parameters _f_ and _accel_ and different default value of split + If _split_ is not specified, then it tries _split_ = 75. + If _f_ is not specified, then it tries _f_ = 20. + Requires that 0 < _f_ < 32. + If _accel_ is not specified, then it tries _accel_ = 1. + Requires that 0 < _accel_ <= 10. + Requires that _d_ = 6 or _d_ = 8. + + _f_ is log of size of array that keeps track of frequency of subsegments of size _d_. + The subsegment is hashed to an index in the range [0,2^_f_ - 1]. + It is possible that 2 different subsegments are hashed to the same index, and they are considered as the same subsegment when computing frequency. + Using a higher _f_ reduces collision but takes longer. + + Examples: + + `zstd --train-fastcover FILEs` + + `zstd --train-fastcover=d=8,f=15,accel=2 FILEs` + +* `--train-legacy[=selectivity=#]`: + Use legacy dictionary builder algorithm with the given dictionary + _selectivity_ (default: 9). + The smaller the _selectivity_ value, the denser the dictionary, + improving its efficiency but reducing its achievable maximum size. + `--train-legacy=s=#` is also accepted. + + Examples: + + `zstd --train-legacy FILEs` + + `zstd --train-legacy=selectivity=8 FILEs` + + +BENCHMARK +--------- +The `zstd` CLI provides a benchmarking mode that can be used to easily find suitable compression parameters, or alternatively to benchmark a computer's performance. +`zstd -b [FILE(s)]` will benchmark `zstd` for both compression and decompression using default compression level. +Note that results are very dependent on the content being compressed. + +It's possible to pass multiple files to the benchmark, and even a directory with `-r DIRECTORY`. +When no `FILE` is provided, the benchmark will use a procedurally generated `lorem ipsum` text. + +Benchmarking will employ `max(1, min(4, nbCores/4))` worker threads by default in order to match the behavior of the normal CLI I/O. + +* `-b#`: + benchmark file(s) using compression level # +* `-e#`: + benchmark file(s) using multiple compression levels, from `-b#` to `-e#` (inclusive) +* `-d`: + benchmark decompression speed only (requires providing a zstd-compressed content) +* `-i#`: + minimum evaluation time, in seconds (default: 3s), benchmark mode only +* `--split=#`: + split input file(s) into independent chunks of size # (default: no chunking) +* `-S`: + output one benchmark result per input file (default: consolidated result) +* `-D dictionary` + benchmark using dictionary +* `--priority=rt`: + set process priority to real-time (Windows) + +Beyond compression levels, benchmarking is also compatible with other parameters, such as number of threads (`-T#`), advanced compression parameters (`--zstd=###`), dictionary compression (`-D dictionary`), or even disabling checksum verification for example. + +**Output Format:** CompressionLevel#Filename: InputSize -> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed + +**Methodology:** For speed measurement, the entire input is compressed/decompressed in-memory to measure speed. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy. + + +SEE ALSO +-------- +`zstdgrep`(1), `zstdless`(1), `gzip`(1), `xz`(1) + +The format is specified in Y. Collet, "Zstandard Compression and the 'application/zstd' Media Type", https://www.ietf.org/rfc/rfc8878.txt, Internet RFC 8878 (February 2021). BUGS ---- diff --git a/programs/zstdcli.c b/programs/zstdcli.c index c0dd925cefa..01760ff8c45 100644 --- a/programs/zstdcli.c +++ b/programs/zstdcli.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,6 +8,29 @@ * You may select, at your option, one of the above-listed licenses. */ +/*-************************************ +* Dependencies +**************************************/ +#include "platform.h" /* PLATFORM_POSIX_VERSION */ +#include "util.h" /* UTIL_HAS_CREATEFILELIST, UTIL_createFileList, UTIL_isConsole */ +#include /* getenv */ +#include /* strcmp, strlen */ +#include /* fprintf(), stdin, stdout, stderr */ +#include /* assert */ + +#include "fileio.h" /* stdinmark, stdoutmark, ZSTD_EXTENSION */ +#ifndef ZSTD_NOBENCH +# include "benchzstd.h" /* BMK_benchFilesAdvanced */ +#endif +#ifndef ZSTD_NODICT +# include "dibio.h" /* ZDICT_cover_params_t, DiB_trainFromFiles() */ +#endif +#ifndef ZSTD_NOTRACE +# include "zstdcli_trace.h" +#endif +#include "../lib/zstd.h" /* ZSTD_VERSION_STRING, ZSTD_minCLevel, ZSTD_maxCLevel */ +#include "fileio_asyncio.h" +#include "fileio_common.h" /*-************************************ * Tuning parameters @@ -20,37 +43,21 @@ # define ZSTDCLI_CLEVEL_MAX 19 /* without using --ultra */ #endif - - -/*-************************************ -* Dependencies -**************************************/ -#include "platform.h" /* IS_CONSOLE, PLATFORM_POSIX_VERSION */ -#include "util.h" /* UTIL_HAS_CREATEFILELIST, UTIL_createFileList */ -#include /* fprintf(), stdin, stdout, stderr */ -#include /* getenv */ -#include /* strcmp, strlen */ -#include /* errno */ -#include "fileio.h" /* stdinmark, stdoutmark, ZSTD_EXTENSION */ -#ifndef ZSTD_NOBENCH -# include "benchzstd.h" /* BMK_benchFiles */ +#ifndef ZSTDCLI_NBTHREADS_DEFAULT +#define ZSTDCLI_NBTHREADS_DEFAULT (unsigned)(MAX(1, MIN(4, UTIL_countLogicalCores() / 4))) #endif -#ifndef ZSTD_NODICT -# include "dibio.h" /* ZDICT_cover_params_t, DiB_trainFromFiles() */ -#endif -#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_minCLevel */ -#include "zstd.h" /* ZSTD_VERSION_STRING, ZSTD_maxCLevel */ +static unsigned init_nbWorkers(unsigned defaultNbWorkers); /*-************************************ * Constants **************************************/ -#define COMPRESSOR_NAME "zstd command line interface" +#define COMPRESSOR_NAME "Zstandard CLI" #ifndef ZSTD_VERSION # define ZSTD_VERSION "v" ZSTD_VERSION_STRING #endif #define AUTHOR "Yann Collet" -#define WELCOME_MESSAGE "*** %s %i-bits %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR +#define WELCOME_MESSAGE "*** %s (%i-bit) %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR #define ZSTD_ZSTDMT "zstdmt" #define ZSTD_UNZSTD "unzstd" @@ -87,128 +94,240 @@ static U32 g_ldmBucketSizeLog = LDM_PARAM_DEFAULT; #define DEFAULT_ACCEL 1 +#define NBWORKERS_AUTOCPU 0 +#define NBWORKERS_UNSET UINT_MAX typedef enum { cover, fastCover, legacy } dictType; /*-************************************ * Display Macros **************************************/ -#define DISPLAY(...) fprintf(g_displayOut, __VA_ARGS__) +#undef DISPLAYLEVEL #define DISPLAYLEVEL(l, ...) { if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } } static int g_displayLevel = DISPLAY_LEVEL_DEFAULT; /* 0 : no display, 1: errors, 2 : + result + interaction + warnings, 3 : + progression, 4 : + information */ -static FILE* g_displayOut; +/*-************************************ +* Check Version (when CLI linked to dynamic library) +**************************************/ + +/* Due to usage of experimental symbols and capabilities by the CLI, + * the CLI must be linked against a dynamic library of same version */ +static void checkLibVersion(void) +{ + if (strcmp(ZSTD_VERSION_STRING, ZSTD_versionString())) { + DISPLAYLEVEL(1, "Error : incorrect library version (expecting : %s ; actual : %s ) \n", + ZSTD_VERSION_STRING, ZSTD_versionString()); + DISPLAYLEVEL(1, "Please update library to version %s, or use stand-alone zstd binary \n", + ZSTD_VERSION_STRING); + exit(1); + } +} + + +/*! exeNameMatch() : + @return : a non-zero value if exeName matches test, excluding the extension + */ +static int exeNameMatch(const char* exeName, const char* test) +{ + return !strncmp(exeName, test, strlen(test)) && + (exeName[strlen(test)] == '\0' || exeName[strlen(test)] == '.'); +} + /*-************************************ * Command Line **************************************/ -static int usage(const char* programName) +/* print help either in `stderr` or `stdout` depending on originating request + * error (badUsage) => stderr + * help (usageAdvanced) => stdout + */ +static void usage(FILE* f, const char* programName) { - DISPLAY( "Usage : \n"); - DISPLAY( " %s [args] [FILE(s)] [-o file] \n", programName); - DISPLAY( "\n"); - DISPLAY( "FILE : a filename \n"); - DISPLAY( " with no FILE, or when FILE is - , read standard input\n"); - DISPLAY( "Arguments : \n"); + DISPLAY_F(f, "Compress or decompress the INPUT file(s); reads from STDIN if INPUT is `-` or not provided.\n\n"); + DISPLAY_F(f, "Usage: %s [OPTIONS...] [INPUT... | -] [-o OUTPUT]\n\n", programName); + DISPLAY_F(f, "Options:\n"); + DISPLAY_F(f, " -o OUTPUT Write output to a single file, OUTPUT.\n"); + DISPLAY_F(f, " -k, --keep Preserve INPUT file(s). [Default] \n"); + DISPLAY_F(f, " --rm Remove INPUT file(s) after successful (de)compression to file.\n"); +#ifdef ZSTD_GZCOMPRESS + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + DISPLAY_F(f, " -n, --no-name Do not store original filename when compressing.\n\n"); + } +#endif + DISPLAY_F(f, "\n"); #ifndef ZSTD_NOCOMPRESS - DISPLAY( " -# : # compression level (1-%d, default: %d) \n", ZSTDCLI_CLEVEL_MAX, ZSTDCLI_CLEVEL_DEFAULT); + DISPLAY_F(f, " -# Desired compression level, where `#` is a number between 1 and %d;\n", ZSTDCLI_CLEVEL_MAX); + DISPLAY_F(f, " lower numbers provide faster compression, higher numbers yield\n"); + DISPLAY_F(f, " better compression ratios. [Default: %d]\n\n", ZSTDCLI_CLEVEL_DEFAULT); #endif #ifndef ZSTD_NODECOMPRESS - DISPLAY( " -d : decompression \n"); + DISPLAY_F(f, " -d, --decompress Perform decompression.\n"); #endif - DISPLAY( " -D file: use `file` as Dictionary \n"); - DISPLAY( " -o file: result stored into `file` (only if 1 input file) \n"); - DISPLAY( " -f : overwrite output without prompting and (de)compress links \n"); - DISPLAY( "--rm : remove source file(s) after successful de/compression \n"); - DISPLAY( " -k : preserve source file(s) (default) \n"); - DISPLAY( " -h/-H : display help/long help and exit \n"); - return 0; + DISPLAY_F(f, " -D DICT Use DICT as the dictionary for compression or decompression.\n\n"); + DISPLAY_F(f, " -f, --force Disable input and output checks. Allows overwriting existing files,\n"); + DISPLAY_F(f, " receiving input from the console, printing output to STDOUT, and\n"); + DISPLAY_F(f, " operating on links, block devices, etc. Unrecognized formats will be\n"); + DISPLAY_F(f, " passed-through through as-is.\n\n"); + + DISPLAY_F(f, " -h Display short usage and exit.\n"); + DISPLAY_F(f, " -H, --help Display full help and exit.\n"); + DISPLAY_F(f, " -V, --version Display the program version and exit.\n"); + DISPLAY_F(f, "\n"); } -static int usage_advanced(const char* programName) +static void usageAdvanced(const char* programName) { - DISPLAY(WELCOME_MESSAGE); - usage(programName); - DISPLAY( "\n"); - DISPLAY( "Advanced arguments : \n"); - DISPLAY( " -V : display Version number and exit \n"); - DISPLAY( " -v : verbose mode; specify multiple times to increase verbosity\n"); - DISPLAY( " -q : suppress warnings; specify twice to suppress errors too\n"); - DISPLAY( " -c : force write to standard output, even if it is the console\n"); - DISPLAY( " -l : print information about zstd compressed files \n"); + DISPLAYOUT(WELCOME_MESSAGE); + DISPLAYOUT("\n"); + usage(stdout, programName); + DISPLAYOUT("Advanced options:\n"); + DISPLAYOUT(" -c, --stdout Write to STDOUT (even if it is a console) and keep the INPUT file(s).\n\n"); + + DISPLAYOUT(" -v, --verbose Enable verbose output; pass multiple times to increase verbosity.\n"); + DISPLAYOUT(" -q, --quiet Suppress warnings; pass twice to suppress errors.\n"); +#ifndef ZSTD_NOTRACE + DISPLAYOUT(" --trace LOG Log tracing information to LOG.\n"); +#endif + DISPLAYOUT("\n"); + DISPLAYOUT(" --[no-]progress Forcibly show/hide the progress counter. NOTE: Any (de)compressed\n"); + DISPLAYOUT(" output to terminal will mix with progress counter text.\n\n"); + +#ifdef UTIL_HAS_CREATEFILELIST + DISPLAYOUT(" -r Operate recursively on directories.\n"); + DISPLAYOUT(" --filelist LIST Read a list of files to operate on from LIST.\n"); + DISPLAYOUT(" --output-dir-flat DIR Store processed files in DIR.\n"); +#endif + +#ifdef UTIL_HAS_MIRRORFILELIST + DISPLAYOUT(" --output-dir-mirror DIR Store processed files in DIR, respecting original directory structure.\n"); +#endif + if (AIO_supported()) + DISPLAYOUT(" --[no-]asyncio Use asynchronous IO. [Default: Enabled]\n"); + + DISPLAYOUT("\n"); #ifndef ZSTD_NOCOMPRESS - DISPLAY( "--ultra : enable levels beyond %i, up to %i (requires more memory)\n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel()); - DISPLAY( "--long[=#]: enable long distance matching with given window log (default: %u)\n", g_defaultMaxWindowLog); - DISPLAY( "--fast[=#]: switch to ultra fast compression level (default: %u)\n", 1); - DISPLAY( "--adapt : dynamically adapt compression level to I/O conditions \n"); -#ifdef ZSTD_MULTITHREAD - DISPLAY( " -T# : spawns # compression threads (default: 1, 0==# cores) \n"); - DISPLAY( " -B# : select size of each job (default: 0==automatic) \n"); - DISPLAY( " --rsyncable : compress using a rsync-friendly method (-B sets block size) \n"); + DISPLAYOUT(" --[no-]check Add XXH64 integrity checksums during compression. [Default: Add, Validate]\n"); +#ifndef ZSTD_NODECOMPRESS + DISPLAYOUT(" If `-d` is present, ignore/validate checksums during decompression.\n"); #endif - DISPLAY( "--no-dictID : don't write dictID into header (dictionary compression)\n"); - DISPLAY( "--[no-]check : integrity check (default: enabled) \n"); - DISPLAY( "--[no-]compress-literals : force (un)compressed literals \n"); +#else +#ifdef ZSTD_NOCOMPRESS + DISPLAYOUT(" --[no-]check Ignore/validate checksums during decompression. [Default: Validate]"); #endif -#ifdef UTIL_HAS_CREATEFILELIST - DISPLAY( " -r : operate recursively on directories \n"); +#endif /* ZSTD_NOCOMPRESS */ + + DISPLAYOUT("\n"); + DISPLAYOUT(" -- Treat remaining arguments after `--` as files.\n"); + +#ifndef ZSTD_NOCOMPRESS + DISPLAYOUT("\n"); + DISPLAYOUT("Advanced compression options:\n"); + DISPLAYOUT(" --ultra Enable levels beyond %i, up to %i; requires more memory.\n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel()); + DISPLAYOUT(" --fast[=#] Use to very fast compression levels. [Default: %u]\n", 1); +#ifdef ZSTD_GZCOMPRESS + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + DISPLAYOUT(" --best Compatibility alias for `-9`.\n"); + } #endif - DISPLAY( "--format=zstd : compress files to the .zst format (default) \n"); + DISPLAYOUT(" --adapt Dynamically adapt compression level to I/O conditions.\n"); + DISPLAYOUT(" --long[=#] Enable long distance matching with window log #. [Default: %u]\n", g_defaultMaxWindowLog); + DISPLAYOUT(" --patch-from=REF Use REF as the reference point for Zstandard's diff engine. \n"); + DISPLAYOUT(" --patch-apply Equivalent for `-d --patch-from` \n\n"); +# ifdef ZSTD_MULTITHREAD + DISPLAYOUT(" -T# Spawn # compression threads. [Default: %u; pass 0 for core count.]\n", init_nbWorkers(ZSTDCLI_NBTHREADS_DEFAULT)); + DISPLAYOUT(" --single-thread Share a single thread for I/O and compression (slightly different than `-T1`).\n"); + DISPLAYOUT(" --auto-threads={physical|logical}\n"); + DISPLAYOUT(" Use physical/logical cores when using `-T0`. [Default: Physical]\n\n"); + DISPLAYOUT(" --jobsize=# Set job size to #. [Default: 0 (automatic)]\n"); + DISPLAYOUT(" --rsyncable Compress using a rsync-friendly method (`--jobsize=#` sets unit size). \n"); + DISPLAYOUT("\n"); +# endif + DISPLAYOUT(" --exclude-compressed Only compress files that are not already compressed.\n\n"); + + DISPLAYOUT(" --stream-size=# Specify size of streaming input from STDIN.\n"); + DISPLAYOUT(" --size-hint=# Optimize compression parameters for streaming input of approximately size #.\n"); + DISPLAYOUT(" --target-compressed-block-size=#\n"); + DISPLAYOUT(" Generate compressed blocks of approximately # size.\n\n"); + DISPLAYOUT(" --no-dictID Don't write `dictID` into the header (dictionary compression only).\n"); + DISPLAYOUT(" --[no-]compress-literals Force (un)compressed literals.\n"); + DISPLAYOUT(" --[no-]row-match-finder Explicitly enable/disable the fast, row-based matchfinder for\n"); + DISPLAYOUT(" the 'greedy', 'lazy', and 'lazy2' strategies.\n"); + + DISPLAYOUT("\n"); + DISPLAYOUT(" --format=zstd Compress files to the `.zst` format. [Default]\n"); + DISPLAYOUT(" --[no-]mmap-dict Memory-map dictionary file rather than mallocing and loading all at once\n"); #ifdef ZSTD_GZCOMPRESS - DISPLAY( "--format=gzip : compress files to the .gz format \n"); + DISPLAYOUT(" --format=gzip Compress files to the `.gz` format.\n"); #endif #ifdef ZSTD_LZMACOMPRESS - DISPLAY( "--format=xz : compress files to the .xz format \n"); - DISPLAY( "--format=lzma : compress files to the .lzma format \n"); + DISPLAYOUT(" --format=xz Compress files to the `.xz` format.\n"); + DISPLAYOUT(" --format=lzma Compress files to the `.lzma` format.\n"); #endif #ifdef ZSTD_LZ4COMPRESS - DISPLAY( "--format=lz4 : compress files to the .lz4 format \n"); + DISPLAYOUT( " --format=lz4 Compress files to the `.lz4` format.\n"); #endif +#endif /* !ZSTD_NOCOMPRESS */ + #ifndef ZSTD_NODECOMPRESS - DISPLAY( "--test : test compressed file integrity \n"); -#if ZSTD_SPARSE_DEFAULT - DISPLAY( "--[no-]sparse : sparse mode (default: enabled on file, disabled on stdout)\n"); -#else - DISPLAY( "--[no-]sparse : sparse mode (default: disabled)\n"); -#endif -#endif - DISPLAY( " -M# : Set a memory usage limit for decompression \n"); - DISPLAY( "--no-progress : do not display the progress bar \n"); - DISPLAY( "-- : All arguments after \"--\" are treated as files \n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Advanced decompression options:\n"); + DISPLAYOUT(" -l Print information about Zstandard-compressed files.\n"); + DISPLAYOUT(" --test Test compressed file integrity.\n"); + DISPLAYOUT(" -M# Set the memory usage limit to # megabytes.\n"); +# if ZSTD_SPARSE_DEFAULT + DISPLAYOUT(" --[no-]sparse Enable sparse mode. [Default: Enabled for files, disabled for STDOUT.]\n"); +# else + DISPLAYOUT(" --[no-]sparse Enable sparse mode. [Default: Disabled]\n"); +# endif + { + char const* passThroughDefault = "Disabled"; + if (exeNameMatch(programName, ZSTD_CAT) || + exeNameMatch(programName, ZSTD_ZCAT) || + exeNameMatch(programName, ZSTD_GZCAT)) { + passThroughDefault = "Enabled"; + } + DISPLAYOUT(" --[no-]pass-through Pass through uncompressed files as-is. [Default: %s]\n", passThroughDefault); + } +#endif /* ZSTD_NODECOMPRESS */ + #ifndef ZSTD_NODICT - DISPLAY( "\n"); - DISPLAY( "Dictionary builder : \n"); - DISPLAY( "--train ## : create a dictionary from a training set of files \n"); - DISPLAY( "--train-cover[=k=#,d=#,steps=#,split=#] : use the cover algorithm with optional args\n"); - DISPLAY( "--train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#] : use the fast cover algorithm with optional args\n"); - DISPLAY( "--train-legacy[=s=#] : use the legacy algorithm with selectivity (default: %u)\n", g_defaultSelectivityLevel); - DISPLAY( " -o file : `file` is dictionary name (default: %s) \n", g_defaultDictName); - DISPLAY( "--maxdict=# : limit dictionary to specified size (default: %u) \n", g_defaultMaxDictSize); - DISPLAY( "--dictID=# : force dictionary ID to specified value (default: random)\n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Dictionary builder:\n"); + DISPLAYOUT(" --train Create a dictionary from a training set of files.\n\n"); + DISPLAYOUT(" --train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]]\n"); + DISPLAYOUT(" Use the cover algorithm (with optional arguments).\n"); + DISPLAYOUT(" --train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]]\n"); + DISPLAYOUT(" Use the fast cover algorithm (with optional arguments).\n\n"); + DISPLAYOUT(" --train-legacy[=s=#] Use the legacy algorithm with selectivity #. [Default: %u]\n", g_defaultSelectivityLevel); + DISPLAYOUT(" -o NAME Use NAME as dictionary name. [Default: %s]\n", g_defaultDictName); + DISPLAYOUT(" --maxdict=# Limit dictionary to specified size #. [Default: %u]\n", g_defaultMaxDictSize); + DISPLAYOUT(" --dictID=# Force dictionary ID to #. [Default: Random]\n"); #endif + #ifndef ZSTD_NOBENCH - DISPLAY( "\n"); - DISPLAY( "Benchmark arguments : \n"); - DISPLAY( " -b# : benchmark file(s), using # compression level (default: %d) \n", ZSTDCLI_CLEVEL_DEFAULT); - DISPLAY( " -e# : test all compression levels from -bX to # (default: 1)\n"); - DISPLAY( " -i# : minimum evaluation time in seconds (default: 3s) \n"); - DISPLAY( " -B# : cut file into independent blocks of size # (default: no block)\n"); - DISPLAY( "--priority=rt : set process priority to real-time \n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Benchmark options:\n"); + DISPLAYOUT(" -b# Perform benchmarking with compression level #. [Default: %d]\n", ZSTDCLI_CLEVEL_DEFAULT); + DISPLAYOUT(" -e# Test all compression levels up to #; starting level is `-b#`. [Default: 1]\n"); + DISPLAYOUT(" -i# Set the minimum evaluation to time # seconds. [Default: 3]\n"); + DISPLAYOUT(" --split=# Split input into independent chunks of size #. [Default: No chunking]\n"); + DISPLAYOUT(" -S Output one benchmark result per input file. [Default: Consolidated result]\n"); + DISPLAYOUT(" -D dictionary Benchmark using dictionary \n"); + DISPLAYOUT(" --priority=rt Set process priority to real-time.\n"); #endif - return 0; + } -static int badusage(const char* programName) +static void badUsage(const char* programName, const char* parameter) { - DISPLAYLEVEL(1, "Incorrect parameters\n"); - if (g_displayLevel >= 2) usage(programName); - return 1; + DISPLAYLEVEL(1, "Incorrect parameter: %s \n", parameter); + if (g_displayLevel >= 2) usage(stderr, programName); } static void waitEnter(void) { int unused; - DISPLAY("Press enter to continue...\n"); + DISPLAY("Press enter to continue... \n"); unused = getchar(); (void)unused; } @@ -221,18 +340,9 @@ static const char* lastNameFromPath(const char* path) return name; } -/*! exeNameMatch() : - @return : a non-zero value if exeName matches test, excluding the extension - */ -static int exeNameMatch(const char* exeName, const char* test) -{ - return !strncmp(exeName, test, strlen(test)) && - (exeName[strlen(test)] == '\0' || exeName[strlen(test)] == '.'); -} - static void errorOut(const char* msg) { - DISPLAY("%s \n", msg); exit(1); + DISPLAYLEVEL(1, "%s \n", msg); exit(1); } /*! readU32FromCharChecked() : @@ -244,10 +354,12 @@ static int readU32FromCharChecked(const char** stringPtr, unsigned* value) { unsigned result = 0; while ((**stringPtr >='0') && (**stringPtr <='9')) { - unsigned const max = (((unsigned)(-1)) / 10) - 1; + unsigned const max = ((unsigned)(-1)) / 10; + unsigned last = result; if (result > max) return 1; /* overflow error */ result *= 10; result += (unsigned)(**stringPtr - '0'); + if (result < last) return 1; /* overflow error */ (*stringPtr)++ ; } if ((**stringPtr=='K') || (**stringPtr=='M')) { @@ -272,18 +384,80 @@ static int readU32FromCharChecked(const char** stringPtr, unsigned* value) * Will also modify `*stringPtr`, advancing it to position where it stopped reading. * Note : function will exit() program if digit sequence overflows */ static unsigned readU32FromChar(const char** stringPtr) { - static const char errorMsg[] = "error: numeric value too large"; + static const char errorMsg[] = "error: numeric value overflows 32-bit unsigned int"; unsigned result; if (readU32FromCharChecked(stringPtr, &result)) { errorOut(errorMsg); } return result; } +/*! readIntFromChar() : + * @return : signed integer value read from input in `char` format. + * allows and interprets K, KB, KiB, M, MB and MiB suffix. + * Will also modify `*stringPtr`, advancing it to position where it stopped reading. + * Note : function will exit() program if digit sequence overflows */ +static int readIntFromChar(const char** stringPtr) { + static const char errorMsg[] = "error: numeric value overflows 32-bit int"; + int sign = 1; + unsigned result; + if (**stringPtr=='-') { + (*stringPtr)++; + sign = -1; + } + if (readU32FromCharChecked(stringPtr, &result)) { errorOut(errorMsg); } + return (int) result * sign; +} + +/*! readSizeTFromCharChecked() : + * @return 0 if success, and store the result in *value. + * allows and interprets K, KB, KiB, M, MB and MiB suffix. + * Will also modify `*stringPtr`, advancing it to position where it stopped reading. + * @return 1 if an overflow error occurs */ +static int readSizeTFromCharChecked(const char** stringPtr, size_t* value) +{ + size_t result = 0; + while ((**stringPtr >='0') && (**stringPtr <='9')) { + size_t const max = ((size_t)(-1)) / 10; + size_t last = result; + if (result > max) return 1; /* overflow error */ + result *= 10; + result += (size_t)(**stringPtr - '0'); + if (result < last) return 1; /* overflow error */ + (*stringPtr)++ ; + } + if ((**stringPtr=='K') || (**stringPtr=='M')) { + size_t const maxK = ((size_t)(-1)) >> 10; + if (result > maxK) return 1; /* overflow error */ + result <<= 10; + if (**stringPtr=='M') { + if (result > maxK) return 1; /* overflow error */ + result <<= 10; + } + (*stringPtr)++; /* skip `K` or `M` */ + if (**stringPtr=='i') (*stringPtr)++; + if (**stringPtr=='B') (*stringPtr)++; + } + *value = result; + return 0; +} + +/*! readSizeTFromChar() : + * @return : size_t value read from input in `char` format. + * allows and interprets K, KB, KiB, M, MB and MiB suffix. + * Will also modify `*stringPtr`, advancing it to position where it stopped reading. + * Note : function will exit() program if digit sequence overflows */ +static size_t readSizeTFromChar(const char** stringPtr) { + static const char errorMsg[] = "error: numeric value overflows size_t"; + size_t result; + if (readSizeTFromCharChecked(stringPtr, &result)) { errorOut(errorMsg); } + return result; +} + /** longCommandWArg() : * check if *stringPtr is the same as longCommand. * If yes, @return 1 and advances *stringPtr to the position which immediately follows longCommand. * @return 0 and doesn't modify *stringPtr otherwise. */ -static unsigned longCommandWArg(const char** stringPtr, const char* longCommand) +static int longCommandWArg(const char** stringPtr, const char* longCommand) { size_t const comSize = strlen(longCommand); int const result = !strncmp(*stringPtr, longCommand, comSize); @@ -293,6 +467,8 @@ static unsigned longCommandWArg(const char** stringPtr, const char* longCommand) #ifndef ZSTD_NODICT + +static const unsigned kDefaultRegression = 1; /** * parseCoverParameters() : * reads cover parameters from *stringPtr (e.g. "--train-cover=k=48,d=8,steps=32") into *params @@ -311,10 +487,23 @@ static unsigned parseCoverParameters(const char* stringPtr, ZDICT_cover_params_t params->splitPoint = (double)splitPercentage / 100.0; if (stringPtr[0]==',') { stringPtr++; continue; } else break; } + if (longCommandWArg(&stringPtr, "shrink")) { + params->shrinkDictMaxRegression = kDefaultRegression; + params->shrinkDict = 1; + if (stringPtr[0]=='=') { + stringPtr++; + params->shrinkDictMaxRegression = readU32FromChar(&stringPtr); + } + if (stringPtr[0]==',') { + stringPtr++; + continue; + } + else break; + } return 0; } if (stringPtr[0] != 0) return 0; - DISPLAYLEVEL(4, "cover: k=%u\nd=%u\nsteps=%u\nsplit=%u\n", params->k, params->d, params->steps, (unsigned)(params->splitPoint * 100)); + DISPLAYLEVEL(4, "cover: k=%u\nd=%u\nsteps=%u\nsplit=%u\nshrink%u\n", params->k, params->d, params->steps, (unsigned)(params->splitPoint * 100), params->shrinkDictMaxRegression); return 1; } @@ -338,10 +527,23 @@ static unsigned parseFastCoverParameters(const char* stringPtr, ZDICT_fastCover_ params->splitPoint = (double)splitPercentage / 100.0; if (stringPtr[0]==',') { stringPtr++; continue; } else break; } + if (longCommandWArg(&stringPtr, "shrink")) { + params->shrinkDictMaxRegression = kDefaultRegression; + params->shrinkDict = 1; + if (stringPtr[0]=='=') { + stringPtr++; + params->shrinkDictMaxRegression = readU32FromChar(&stringPtr); + } + if (stringPtr[0]==',') { + stringPtr++; + continue; + } + else break; + } return 0; } if (stringPtr[0] != 0) return 0; - DISPLAYLEVEL(4, "cover: k=%u\nd=%u\nf=%u\nsteps=%u\nsplit=%u\naccel=%u\n", params->k, params->d, params->f, params->steps, (unsigned)(params->splitPoint * 100), params->accel); + DISPLAYLEVEL(4, "cover: k=%u\nd=%u\nf=%u\nsteps=%u\nsplit=%u\naccel=%u\nshrink=%u\n", params->k, params->d, params->f, params->steps, (unsigned)(params->splitPoint * 100), params->accel, params->shrinkDictMaxRegression); return 1; } @@ -367,6 +569,8 @@ static ZDICT_cover_params_t defaultCoverParams(void) params.d = 8; params.steps = 4; params.splitPoint = 1.0; + params.shrinkDict = 0; + params.shrinkDictMaxRegression = kDefaultRegression; return params; } @@ -379,13 +583,15 @@ static ZDICT_fastCover_params_t defaultFastCoverParams(void) params.steps = 4; params.splitPoint = 0.75; /* different from default splitPoint of cover */ params.accel = DEFAULT_ACCEL; + params.shrinkDict = 0; + params.shrinkDictMaxRegression = kDefaultRegression; return params; } #endif /** parseAdaptParameters() : - * reads adapt parameters from *stringPtr (e.g. "--zstd=min=1,max=19) and store them into adaptMinPtr and adaptMaxPtr. + * reads adapt parameters from *stringPtr (e.g. "--adapt=min=1,max=19) and store them into adaptMinPtr and adaptMaxPtr. * Both adaptMinPtr and adaptMaxPtr must be already allocated and correctly initialized. * There is no guarantee that any of these values will be updated. * @return 1 means that parsing was successful, @@ -394,8 +600,8 @@ static ZDICT_fastCover_params_t defaultFastCoverParams(void) static unsigned parseAdaptParameters(const char* stringPtr, int* adaptMinPtr, int* adaptMaxPtr) { for ( ; ;) { - if (longCommandWArg(&stringPtr, "min=")) { *adaptMinPtr = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; } - if (longCommandWArg(&stringPtr, "max=")) { *adaptMaxPtr = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; } + if (longCommandWArg(&stringPtr, "min=")) { *adaptMinPtr = readIntFromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; } + if (longCommandWArg(&stringPtr, "max=")) { *adaptMaxPtr = readIntFromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; } DISPLAYLEVEL(4, "invalid compression parameter \n"); return 0; } @@ -432,50 +638,129 @@ static unsigned parseCompressionParameters(const char* stringPtr, ZSTD_compressi return 0; } - DISPLAYLEVEL(4, "windowLog=%d, chainLog=%d, hashLog=%d, searchLog=%d \n", params->windowLog, params->chainLog, params->hashLog, params->searchLog); - DISPLAYLEVEL(4, "minMatch=%d, targetLength=%d, strategy=%d \n", params->minMatch, params->targetLength, params->strategy); if (stringPtr[0] != 0) return 0; /* check the end of string */ return 1; } +static void setMaxCompression(ZSTD_compressionParameters* params) +{ + params->windowLog = ZSTD_WINDOWLOG_MAX; + params->chainLog = ZSTD_CHAINLOG_MAX; + params->hashLog = ZSTD_HASHLOG_MAX; + params->searchLog = ZSTD_SEARCHLOG_MAX; + params->minMatch = ZSTD_MINMATCH_MIN; + params->targetLength = ZSTD_TARGETLENGTH_MAX; + params->strategy = ZSTD_STRATEGY_MAX; + g_overlapLog = ZSTD_OVERLAPLOG_MAX; + g_ldmHashLog = ZSTD_LDM_HASHLOG_MAX; + g_ldmHashRateLog = 0; /* automatically derived */ + g_ldmMinMatch = 16; /* heuristic */ + g_ldmBucketSizeLog = ZSTD_LDM_BUCKETSIZELOG_MAX; +} + static void printVersion(void) { - DISPLAY(WELCOME_MESSAGE); + if (g_displayLevel < DISPLAY_LEVEL_DEFAULT) { + DISPLAYOUT("%s\n", ZSTD_VERSION_STRING); + return; + } + + DISPLAYOUT(WELCOME_MESSAGE); + if (g_displayLevel >= 3) { /* format support */ - DISPLAYLEVEL(3, "*** supports: zstd"); -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>0) && (ZSTD_LEGACY_SUPPORT<8) - DISPLAYLEVEL(3, ", zstd legacy v0.%d+", ZSTD_LEGACY_SUPPORT); -#endif -#ifdef ZSTD_GZCOMPRESS - DISPLAYLEVEL(3, ", gzip"); -#endif -#ifdef ZSTD_LZ4COMPRESS - DISPLAYLEVEL(3, ", lz4"); -#endif -#ifdef ZSTD_LZMACOMPRESS - DISPLAYLEVEL(3, ", lzma, xz "); -#endif - DISPLAYLEVEL(3, "\n"); - /* posix support */ -#ifdef _POSIX_C_SOURCE - DISPLAYLEVEL(4, "_POSIX_C_SOURCE defined: %ldL\n", (long) _POSIX_C_SOURCE); -#endif -#ifdef _POSIX_VERSION - DISPLAYLEVEL(4, "_POSIX_VERSION defined: %ldL \n", (long) _POSIX_VERSION); -#endif -#ifdef PLATFORM_POSIX_VERSION - DISPLAYLEVEL(4, "PLATFORM_POSIX_VERSION defined: %ldL\n", (long) PLATFORM_POSIX_VERSION); -#endif + DISPLAYOUT("*** supports: zstd"); + #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>0) && (ZSTD_LEGACY_SUPPORT<8) + DISPLAYOUT(", zstd legacy v0.%d+", ZSTD_LEGACY_SUPPORT); + #endif + #ifdef ZSTD_GZCOMPRESS + DISPLAYOUT(", gzip"); + #endif + #ifdef ZSTD_LZ4COMPRESS + DISPLAYOUT(", lz4"); + #endif + #ifdef ZSTD_LZMACOMPRESS + DISPLAYOUT(", lzma, xz "); + #endif + DISPLAYOUT("\n"); + if (g_displayLevel >= 4) { + /* library versions */ + DISPLAYOUT("zlib version %s\n", FIO_zlibVersion()); + DISPLAYOUT("lz4 version %s\n", FIO_lz4Version()); + DISPLAYOUT("lzma version %s\n", FIO_lzmaVersion()); + + #ifdef ZSTD_MULTITHREAD + DISPLAYOUT("supports Multithreading \n"); + #else + DISPLAYOUT("single-thread operations only \n"); + #endif + + /* posix support */ + #ifdef _POSIX_C_SOURCE + DISPLAYOUT("_POSIX_C_SOURCE defined: %ldL\n", (long) _POSIX_C_SOURCE); + #endif + #ifdef _POSIX_VERSION + DISPLAYOUT("_POSIX_VERSION defined: %ldL \n", (long) _POSIX_VERSION); + #endif + #ifdef PLATFORM_POSIX_VERSION + DISPLAYOUT("PLATFORM_POSIX_VERSION defined: %ldL\n", (long) PLATFORM_POSIX_VERSION); + #endif + + if (!ZSTD_isDeterministicBuild()) { + DISPLAYOUT("non-deterministic build\n"); + } } } } +#define ZSTD_NB_STRATEGIES 9 +static const char* ZSTD_strategyMap[ZSTD_NB_STRATEGIES + 1] = { "", "ZSTD_fast", + "ZSTD_dfast", "ZSTD_greedy", "ZSTD_lazy", "ZSTD_lazy2", "ZSTD_btlazy2", + "ZSTD_btopt", "ZSTD_btultra", "ZSTD_btultra2"}; + +#ifndef ZSTD_NOCOMPRESS + +static void printDefaultCParams(const char* filename, const char* dictFileName, int cLevel) { + unsigned long long fileSize = UTIL_getFileSize(filename); + const size_t dictSize = dictFileName != NULL ? (size_t)UTIL_getFileSize(dictFileName) : 0; + const ZSTD_compressionParameters cParams = ZSTD_getCParams(cLevel, fileSize, dictSize); + if (fileSize != UTIL_FILESIZE_UNKNOWN) DISPLAY("%s (%llu bytes)\n", filename, fileSize); + else DISPLAY("%s (src size unknown)\n", filename); + DISPLAY(" - windowLog : %u\n", cParams.windowLog); + DISPLAY(" - chainLog : %u\n", cParams.chainLog); + DISPLAY(" - hashLog : %u\n", cParams.hashLog); + DISPLAY(" - searchLog : %u\n", cParams.searchLog); + DISPLAY(" - minMatch : %u\n", cParams.minMatch); + DISPLAY(" - targetLength : %u\n", cParams.targetLength); + assert(cParams.strategy < ZSTD_NB_STRATEGIES + 1); + DISPLAY(" - strategy : %s (%u)\n", ZSTD_strategyMap[(int)cParams.strategy], (unsigned)cParams.strategy); +} + +static void printActualCParams(const char* filename, const char* dictFileName, int cLevel, const ZSTD_compressionParameters* cParams) { + unsigned long long fileSize = UTIL_getFileSize(filename); + const size_t dictSize = dictFileName != NULL ? (size_t)UTIL_getFileSize(dictFileName) : 0; + ZSTD_compressionParameters actualCParams = ZSTD_getCParams(cLevel, fileSize, dictSize); + assert(g_displayLevel >= 4); + actualCParams.windowLog = cParams->windowLog == 0 ? actualCParams.windowLog : cParams->windowLog; + actualCParams.chainLog = cParams->chainLog == 0 ? actualCParams.chainLog : cParams->chainLog; + actualCParams.hashLog = cParams->hashLog == 0 ? actualCParams.hashLog : cParams->hashLog; + actualCParams.searchLog = cParams->searchLog == 0 ? actualCParams.searchLog : cParams->searchLog; + actualCParams.minMatch = cParams->minMatch == 0 ? actualCParams.minMatch : cParams->minMatch; + actualCParams.targetLength = cParams->targetLength == 0 ? actualCParams.targetLength : cParams->targetLength; + actualCParams.strategy = cParams->strategy == 0 ? actualCParams.strategy : cParams->strategy; + DISPLAY("--zstd=wlog=%d,clog=%d,hlog=%d,slog=%d,mml=%d,tlen=%d,strat=%d\n", + actualCParams.windowLog, actualCParams.chainLog, actualCParams.hashLog, actualCParams.searchLog, + actualCParams.minMatch, actualCParams.targetLength, actualCParams.strategy); +} + +#endif + /* Environment variables for parameter setting */ #define ENV_CLEVEL "ZSTD_CLEVEL" +#define ENV_NBWORKERS "ZSTD_NBTHREADS" /* takes lower precedence than directly specifying -T# in the CLI */ -/* functions that pick up environment variables */ +/* pick up environment variable */ static int init_cLevel(void) { const char* const env = getenv(ENV_CLEVEL); - if (env) { - const char *ptr = env; + if (env != NULL) { + const char* ptr = env; int sign = 1; if (*ptr == '-') { sign = -1; @@ -487,26 +772,84 @@ static int init_cLevel(void) { if ((*ptr>='0') && (*ptr<='9')) { unsigned absLevel; if (readU32FromCharChecked(&ptr, &absLevel)) { - DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: numeric value too large\n", ENV_CLEVEL, env); + DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: numeric value too large \n", ENV_CLEVEL, env); return ZSTDCLI_CLEVEL_DEFAULT; } else if (*ptr == 0) { - return sign * absLevel; - } - } + return sign * (int)absLevel; + } } - DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: not a valid integer value\n", ENV_CLEVEL, env); + DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: not a valid integer value \n", ENV_CLEVEL, env); } return ZSTDCLI_CLEVEL_DEFAULT; } +static unsigned init_nbWorkers(unsigned defaultNbWorkers) { +#ifdef ZSTD_MULTITHREAD + const char* const env = getenv(ENV_NBWORKERS); + if (env != NULL) { + const char* ptr = env; + if ((*ptr>='0') && (*ptr<='9')) { + unsigned nbThreads; + if (readU32FromCharChecked(&ptr, &nbThreads)) { + DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: numeric value too large \n", ENV_NBWORKERS, env); + return defaultNbWorkers; + } else if (*ptr == 0) { + return nbThreads; + } + } + DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: not a valid unsigned value \n", ENV_NBWORKERS, env); + } + + return defaultNbWorkers; +#else + (void)defaultNbWorkers; + return 1; +#endif +} + +#define NEXT_FIELD(ptr) { \ + if (*argument == '=') { \ + ptr = ++argument; \ + argument += strlen(ptr); \ + } else { \ + argNb++; \ + if (argNb >= argCount) { \ + DISPLAYLEVEL(1, "error: missing command argument \n"); \ + CLEAN_RETURN(1); \ + } \ + ptr = argv[argNb]; \ + assert(ptr != NULL); \ + if (ptr[0]=='-') { \ + DISPLAYLEVEL(1, "error: command cannot be separated from its argument by another command \n"); \ + CLEAN_RETURN(1); \ +} } } + +#define NEXT_UINT32(_varu32) { \ + const char* __nb; \ + NEXT_FIELD(__nb); \ + _varu32 = readU32FromChar(&__nb); \ + if(*__nb != 0) { \ + errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed"); \ + } \ +} + +#define NEXT_TSIZE(_varTsize) { \ + const char* __nb; \ + NEXT_FIELD(__nb); \ + _varTsize = readSizeTFromChar(&__nb); \ + if(*__nb != 0) { \ + errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed"); \ + } \ +} + typedef enum { zom_compress, zom_decompress, zom_test, zom_bench, zom_train, zom_list } zstd_operation_mode; #define CLEAN_RETURN(i) { operationResult = (i); goto _end; } #ifdef ZSTD_NOCOMPRESS /* symbols from compression library are not defined and should not be invoked */ -# define MINCLEVEL -50 +# define MINCLEVEL -99 # define MAXCLEVEL 22 #else # define MINCLEVEL ZSTD_minCLevel() @@ -517,51 +860,61 @@ int main(int argCount, const char* argv[]) { int argNb, followLinks = 0, + allowBlockDevices = 0, + forceStdin = 0, forceStdout = 0, - lastCommand = 0, + hasStdout = 0, ldmFlag = 0, main_pause = 0, - nbWorkers = 0, adapt = 0, adaptMin = MINCLEVEL, adaptMax = MAXCLEVEL, rsyncable = 0, - nextArgumentIsOutFileName = 0, - nextArgumentIsMaxDict = 0, - nextArgumentIsDictID = 0, nextArgumentsAreFiles = 0, - nextEntryIsDictionary = 0, operationResult = 0, separateFiles = 0, setRealTimePrio = 0, singleThread = 0, - ultra=0; - double compressibility = 0.5; + defaultLogicalCores = 0, + showDefaultCParams = 0, + contentSize = 1, + removeSrcFile = 0, + cLevel = init_cLevel(), + ultra = 0, + cLevelLast = MINCLEVEL - 1, /* for benchmark range */ + setThreads_non1 = 0; + unsigned nbWorkers = init_nbWorkers(NBWORKERS_UNSET); + ZSTD_ParamSwitch_e mmapDict = ZSTD_ps_auto; + ZSTD_ParamSwitch_e useRowMatchFinder = ZSTD_ps_auto; + FIO_compressionType_t cType = FIO_zstdCompression; + double compressibility = -1.0; /* lorem ipsum generator */ unsigned bench_nbSeconds = 3; /* would be better if this value was synchronized from bench */ - size_t blockSize = 0; + size_t chunkSize = 0; FIO_prefs_t* const prefs = FIO_createPreferences(); + FIO_ctx_t* const fCtx = FIO_createContext(); + FIO_progressSetting_e progress = FIO_ps_auto; zstd_operation_mode operation = zom_compress; ZSTD_compressionParameters compressionParams; - int cLevel; - int cLevelLast = -1000000000; unsigned recursive = 0; unsigned memLimit = 0; - const char** filenameTable = (const char**)malloc(argCount * sizeof(const char*)); /* argCount >= 1 */ - unsigned filenameIdx = 0; + FileNamesTable* filenames = UTIL_allocateFileNamesTable((size_t)argCount); /* argCount >= 1 */ + FileNamesTable* file_of_names = UTIL_allocateFileNamesTable((size_t)argCount); /* argCount >= 1 */ const char* programName = argv[0]; const char* outFileName = NULL; + const char* outDirName = NULL; + const char* outMirroredDirName = NULL; const char* dictFileName = NULL; + const char* patchFromDictFileName = NULL; const char* suffix = ZSTD_EXTENSION; unsigned maxDictSize = g_defaultMaxDictSize; unsigned dictID = 0; + size_t streamSrcSize = 0; + size_t targetCBlockSize = 0; + size_t srcSizeHint = 0; + size_t nbInputFileNames = 0; int dictCLevel = g_defaultDictCLevel; unsigned dictSelect = g_defaultSelectivityLevel; -#ifdef UTIL_HAS_CREATEFILELIST - const char** extendedFileList = NULL; - char* fileNamesBuf = NULL; - unsigned fileNamesNb; -#endif #ifndef ZSTD_NODICT ZDICT_cover_params_t coverParams = defaultCoverParams(); ZDICT_fastCover_params_t fastCoverParams = defaultFastCoverParams(); @@ -570,35 +923,33 @@ int main(int argCount, const char* argv[]) #ifndef ZSTD_NOBENCH BMK_advancedParams_t benchParams = BMK_initAdvancedParams(); #endif - ZSTD_literalCompressionMode_e literalCompressionMode = ZSTD_lcm_auto; - + ZSTD_ParamSwitch_e literalCompressionMode = ZSTD_ps_auto; /* init */ + checkLibVersion(); (void)recursive; (void)cLevelLast; /* not used when ZSTD_NOBENCH set */ - (void)memLimit; /* not used when ZSTD_NODECOMPRESS set */ - if (filenameTable==NULL) { DISPLAY("zstd: %s \n", strerror(errno)); exit(1); } - filenameTable[0] = stdinmark; - g_displayOut = stderr; - cLevel = init_cLevel(); + (void)memLimit; + assert(argCount >= 1); + if ((filenames==NULL) || (file_of_names==NULL)) { DISPLAYLEVEL(1, "zstd: allocation error \n"); exit(1); } programName = lastNameFromPath(programName); -#ifdef ZSTD_MULTITHREAD - nbWorkers = 1; -#endif /* preset behaviors */ - if (exeNameMatch(programName, ZSTD_ZSTDMT)) nbWorkers=0, singleThread=0; + if (exeNameMatch(programName, ZSTD_ZSTDMT)) nbWorkers=NBWORKERS_AUTOCPU, singleThread=0; if (exeNameMatch(programName, ZSTD_UNZSTD)) operation=zom_decompress; - if (exeNameMatch(programName, ZSTD_CAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; outFileName=stdoutmark; g_displayLevel=1; } /* supports multiple formats */ - if (exeNameMatch(programName, ZSTD_ZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; outFileName=stdoutmark; g_displayLevel=1; } /* behave like zcat, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_GZ)) { suffix = GZ_EXTENSION; FIO_setCompressionType(prefs, FIO_gzipCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like gzip */ - if (exeNameMatch(programName, ZSTD_GUNZIP)) { operation=zom_decompress; FIO_setRemoveSrcFile(prefs, 1); } /* behave like gunzip, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_GZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; outFileName=stdoutmark; g_displayLevel=1; } /* behave like gzcat, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_LZMA)) { suffix = LZMA_EXTENSION; FIO_setCompressionType(prefs, FIO_lzmaCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like lzma */ - if (exeNameMatch(programName, ZSTD_UNLZMA)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_lzmaCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like unlzma, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; FIO_setCompressionType(prefs, FIO_xzCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like xz */ - if (exeNameMatch(programName, ZSTD_UNXZ)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_xzCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like unxz, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_LZ4)) { suffix = LZ4_EXTENSION; FIO_setCompressionType(prefs, FIO_lz4Compression); } /* behave like lz4 */ - if (exeNameMatch(programName, ZSTD_UNLZ4)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_lz4Compression); } /* behave like unlz4, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_CAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* supports multiple formats */ + if (exeNameMatch(programName, ZSTD_ZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* behave like zcat, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + suffix = GZ_EXTENSION; cType = FIO_gzipCompression; removeSrcFile=1; + dictCLevel = cLevel = 6; /* gzip default is -6 */ + } + if (exeNameMatch(programName, ZSTD_GUNZIP)) { operation=zom_decompress; removeSrcFile=1; } /* behave like gunzip, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_GZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* behave like gzcat, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_LZMA)) { suffix = LZMA_EXTENSION; cType = FIO_lzmaCompression; removeSrcFile=1; } /* behave like lzma */ + if (exeNameMatch(programName, ZSTD_UNLZMA)) { operation=zom_decompress; cType = FIO_lzmaCompression; removeSrcFile=1; } /* behave like unlzma, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; cType = FIO_xzCompression; removeSrcFile=1; } /* behave like xz */ + if (exeNameMatch(programName, ZSTD_UNXZ)) { operation=zom_decompress; cType = FIO_xzCompression; removeSrcFile=1; } /* behave like unxz, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_LZ4)) { suffix = LZ4_EXTENSION; cType = FIO_lz4Compression; } /* behave like lz4 */ + if (exeNameMatch(programName, ZSTD_UNLZ4)) { operation=zom_decompress; cType = FIO_lz4Compression; } /* behave like unlz4, also supports multiple formats */ memset(&compressionParams, 0, sizeof(compressionParams)); /* init crash handler */ @@ -607,369 +958,442 @@ int main(int argCount, const char* argv[]) /* command switches */ for (argNb=1; argNb