diff --git a/.github/workflows/post-release.yml b/.github/workflows/post-release.yml
index a44a675d..6524124d 100644
--- a/.github/workflows/post-release.yml
+++ b/.github/workflows/post-release.yml
@@ -4,9 +4,13 @@ on:
branches: [master]
types: [released]
+defaults:
+ run:
+ shell: bash -euv -o pipefail {0}
+
jobs:
post-release:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
# trigger post-release in dependency repo, this indirection allows the
# dependency repo to be updated often without affecting this repo. At
@@ -21,6 +25,7 @@ jobs:
event_type: "post-release",
client_payload: {
repo: env.GITHUB_REPOSITORY,
- version: "${{github.event.release.tag_name}}"}}' \
- | tee /dev/stderr)"
+ version: "${{github.event.release.tag_name}}",
+ },
+ }' | tee /dev/stderr)"
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index c38b8de6..b2ead2e4 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -5,9 +5,13 @@ on:
branches: [master]
types: [completed]
+defaults:
+ run:
+ shell: bash -euv -o pipefail {0}
+
jobs:
release:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
# need to manually check for a couple things
# - tests passed?
@@ -31,8 +35,22 @@ jobs:
with:
workflow: ${{github.event.workflow_run.name}}
run_id: ${{github.event.workflow_run.id}}
- name: results
- path: results
+ name: sizes
+ path: sizes
+ - uses: dawidd6/action-download-artifact@v2
+ continue-on-error: true
+ with:
+ workflow: ${{github.event.workflow_run.name}}
+ run_id: ${{github.event.workflow_run.id}}
+ name: cov
+ path: cov
+ - uses: dawidd6/action-download-artifact@v2
+ continue-on-error: true
+ with:
+ workflow: ${{github.event.workflow_run.name}}
+ run_id: ${{github.event.workflow_run.id}}
+ name: bench
+ path: bench
- name: find-version
run: |
@@ -68,79 +86,119 @@ jobs:
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV
# try to find results from tests
- - name: collect-results
+ - name: create-table
run: |
# previous results to compare against?
[ -n "$LFS_PREV_VERSION" ] && curl -sS \
- "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
- `status/$LFS_PREV_VERSION?per_page=100" \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/$LFS_PREV_VERSION`
+ `?per_page=100" \
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
- >> prev-results.json \
+ >> prev-status.json \
|| true
# build table for GitHub
- echo "
" >> results.txt
- echo "" >> results.txt
- echo "" >> results.txt
- echo "Configuration " >> results.txt
- for r in Code Stack Structs Coverage
- do
- echo "$r " >> results.txt
- done
- echo " " >> results.txt
- echo " " >> results.txt
+ declare -A table
- echo "" >> results.txt
+ # sizes table
+ i=0
+ j=0
for c in "" readonly threadsafe migrate error-asserts
do
- echo "" >> results.txt
+ # per-config results
c_or_default=${c:-default}
- echo "${c_or_default^} " >> results.txt
- for r in code stack structs
+ c_camel=${c_or_default^}
+ table[$i,$j]=$c_camel
+ ((j+=1))
+
+ for s in code stack struct
do
- # per-config results
- echo "" >> results.txt
- [ -e results/thumb${c:+-$c}.csv ] && ( \
+ f=sizes/thumb${c:+-$c}.$s.csv
+ [ -e $f ] && table[$i,$j]=$( \
export PREV="$(jq -re '
- select(.context == "'"results (thumb${c:+, $c}) / $r"'").description
- | capture("(?[0-9∞]+)").result' \
- prev-results.json || echo 0)"
- ./scripts/summary.py results/thumb${c:+-$c}.csv -f $r -Y | awk '
- NR==2 {printf "%s B",$2}
- NR==2 && ENVIRON["PREV"]+0 != 0 {
- printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
- NR==2 {printf "\n"}' \
- | sed -e 's/ /\ /g' \
- >> results.txt)
- echo " " >> results.txt
+ select(.context == "'"sizes (thumb${c:+, $c}) / $s"'").description
+ | capture("(?[0-9∞]+)").prev' \
+ prev-status.json || echo 0)"
+ ./scripts/summary.py $f --max=stack_limit -Y \
+ | awk '
+ NR==2 {$1=0; printf "%s B",$NF}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
+ }' \
+ | sed -e 's/ /\ /g')
+ ((j+=1))
done
- # coverage results
- if [ -z $c ]
- then
- echo "" >> results.txt
- [ -e results/coverage.csv ] && ( \
- export PREV="$(jq -re '
- select(.context == "results / coverage").description
- | capture("(?[0-9\\.]+)").result' \
- prev-results.json || echo 0)"
- ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
- NR==2 {printf "%.1f%% of %d lines",$4,$3}
+ ((j=0, i+=1))
+ done
+
+ # coverage table
+ i=0
+ j=4
+ for s in lines branches
+ do
+ table[$i,$j]=${s^}
+ ((j+=1))
+
+ f=cov/cov.csv
+ [ -e $f ] && table[$i,$j]=$( \
+ export PREV="$(jq -re '
+ select(.context == "'"cov / $s"'").description
+ | capture("(?[0-9]+)/(?[0-9]+)")
+ | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
+ prev-status.json || echo 0)"
+ ./scripts/cov.py -u $f -f$s -Y \
+ | awk -F '[ /%]+' -v s=$s '
+ NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
NR==2 && ENVIRON["PREV"]+0 != 0 {
- printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
- NR==2 {printf "\n"}' \
- | sed -e 's/ /\ /g' \
- >> results.txt)
- echo " " >> results.txt
- fi
- echo " " >> results.txt
+ printf " (%+.1f%%)",$4-ENVIRON["PREV"]
+ }' \
+ | sed -e 's/ /\ /g')
+ ((j=4, i+=1))
done
- echo " " >> results.txt
- echo "
" >> results.txt
- cat results.txt
+ # benchmark table
+ i=3
+ j=4
+ for s in readed proged erased
+ do
+ table[$i,$j]=${s^}
+ ((j+=1))
+
+ f=bench/bench.csv
+ [ -e $f ] && table[$i,$j]=$( \
+ export PREV="$(jq -re '
+ select(.context == "'"bench / $s"'").description
+ | capture("(?[0-9]+)").prev' \
+ prev-status.json || echo 0)"
+ ./scripts/summary.py $f -f$s=bench_$s -Y \
+ | awk '
+ NR==2 {$1=0; printf "%s B",$NF}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
+ }' \
+ | sed -e 's/ /\ /g')
+ ((j=4, i+=1))
+ done
+
+ # build the actual table
+ echo "| | Code | Stack | Structs | | Coverage |" >> table.txt
+ echo "|:--|-----:|------:|--------:|:--|---------:|" >> table.txt
+ for ((i=0; i<6; i++))
+ do
+ echo -n "|" >> table.txt
+ for ((j=0; j<6; j++))
+ do
+ echo -n " " >> table.txt
+ [[ i -eq 2 && j -eq 5 ]] && echo -n "**Benchmarks**" >> table.txt
+ echo -n "${table[$i,$j]:-}" >> table.txt
+ echo -n " |" >> table.txt
+ done
+ echo >> table.txt
+ done
+
+ cat table.txt
# find changes from history
- - name: collect-changes
+ - name: create-changes
run: |
[ -n "$LFS_PREV_VERSION" ] || exit 0
# use explicit link to github.amrom.workers.devmit so that release notes can
@@ -164,7 +222,7 @@ jobs:
git config user.email ${{secrets.BOT_EMAIL}}
git fetch "https://github.com/$GITHUB_REPOSITORY.git" \
"v$LFS_VERSION_MAJOR-prefix" || true
- ./scripts/prefix.py "lfs$LFS_VERSION_MAJOR"
+ ./scripts/changeprefix.py --git "lfs" "lfs$LFS_VERSION_MAJOR"
git branch "v$LFS_VERSION_MAJOR-prefix" $( \
git commit-tree $(git write-tree) \
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
@@ -182,15 +240,18 @@ jobs:
run: |
# create release and patch version tag (vN.N.N)
# only draft if not a patch release
- [ -e results.txt ] && export RESULTS="$(cat results.txt)"
- [ -e changes.txt ] && export CHANGES="$(cat changes.txt)"
+ [ -e table.txt ] && cat table.txt >> release.txt
+ echo >> release.txt
+ [ -e changes.txt ] && cat changes.txt >> release.txt
+ cat release.txt
+
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \
- -d "$(jq -n '{
+ -d "$(jq -n --rawfile release release.txt '{
tag_name: env.LFS_VERSION,
name: env.LFS_VERSION | rtrimstr(".0"),
target_commitish: "${{github.event.workflow_run.head_sha}}",
draft: env.LFS_VERSION | endswith(".0"),
- body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \
- | tee /dev/stderr)"
+ body: $release,
+ }' | tee /dev/stderr)"
diff --git a/.github/workflows/status.yml b/.github/workflows/status.yml
index d28b17cc..8bd3990c 100644
--- a/.github/workflows/status.yml
+++ b/.github/workflows/status.yml
@@ -4,11 +4,15 @@ on:
workflows: [test]
types: [completed]
+defaults:
+ run:
+ shell: bash -euv -o pipefail {0}
+
jobs:
+ # forward custom statuses
status:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
- # custom statuses?
- uses: dawidd6/action-download-artifact@v2
continue-on-error: true
with:
@@ -50,6 +54,47 @@ jobs:
state: env.STATE,
context: env.CONTEXT,
description: env.DESCRIPTION,
- target_url: env.TARGET_URL}' \
- | tee /dev/stderr)"
+ target_url: env.TARGET_URL,
+ }' | tee /dev/stderr)"
+ done
+
+ # forward custom pr-comments
+ comment:
+ runs-on: ubuntu-22.04
+
+ # only run on success (we don't want garbage comments!)
+ if: ${{github.event.workflow_run.conclusion == 'success'}}
+
+ steps:
+ # generated comment?
+ - uses: dawidd6/action-download-artifact@v2
+ continue-on-error: true
+ with:
+ workflow: ${{github.event.workflow_run.name}}
+ run_id: ${{github.event.workflow_run.id}}
+ name: comment
+ path: comment
+ - name: update-comment
+ continue-on-error: true
+ run: |
+ ls comment
+ for s in $(shopt -s nullglob ; echo comment/*.json)
+ do
+ export NUMBER="$(jq -er '.number' $s)"
+ export BODY="$(jq -er '.body' $s)"
+
+ # check that the comment was from the most recent commit on the
+ # pull request
+ [ "$(curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/pulls/$NUMBER" \
+ | jq -er '.head.sha')" \
+ == ${{github.event.workflow_run.head_sha}} ] || continue
+
+ # update comment
+ curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/issues/`
+ `$NUMBER/comments" \
+ -d "$(jq -n '{
+ body: env.BODY,
+ }' | tee /dev/stderr)"
done
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 81f00c1e..2cee3528 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,14 +1,20 @@
name: test
on: [push, pull_request]
+defaults:
+ run:
+ shell: bash -euv -o pipefail {0}
+
env:
CFLAGS: -Werror
MAKEFLAGS: -j
+ TESTFLAGS: -k
+ BENCHFLAGS:
jobs:
# run tests
test:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
@@ -18,315 +24,506 @@ jobs:
- uses: actions/checkout@v2
- name: install
run: |
- # need a few additional tools
- #
- # note this includes gcc-10, which is required for -fcallgraph-info=su
+ # need a few things
sudo apt-get update -qq
- sudo apt-get install -qq gcc-10 python3 python3-pip lcov
- sudo pip3 install toml
- echo "CC=gcc-10" >> $GITHUB_ENV
- gcc-10 --version
- lcov --version
+ sudo apt-get install -qq gcc python3 python3-pip
+ pip3 install toml
+ gcc --version
python3 --version
- # need newer lcov version for gcc-10
- #sudo apt-get remove lcov
- #wget https://launchpad.net/ubuntu/+archive/primary/+files/lcov_1.15-1_all.deb
- #sudo apt install ./lcov_1.15-1_all.deb
- #lcov --version
- #which lcov
- #ls -lha /usr/bin/lcov
- wget https://github.com/linux-test-project/lcov/releases/download/v1.15/lcov-1.15.tar.gz
- tar xf lcov-1.15.tar.gz
- sudo make -C lcov-1.15 install
-
- # setup a ram-backed disk to speed up reentrant tests
- mkdir disks
- sudo mount -t tmpfs -o size=100m tmpfs disks
- TESTFLAGS="$TESTFLAGS --disk=disks/disk"
-
- # collect coverage
- mkdir -p coverage
- TESTFLAGS="$TESTFLAGS --coverage=`
- `coverage/${{github.job}}-${{matrix.arch}}.info"
-
- echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV
-
# cross-compile with ARM Thumb (32-bit, little-endian)
- name: install-thumb
if: ${{matrix.arch == 'thumb'}}
run: |
sudo apt-get install -qq \
- gcc-10-arm-linux-gnueabi \
+ gcc-arm-linux-gnueabi \
libc6-dev-armel-cross \
qemu-user
- echo "CC=arm-linux-gnueabi-gcc-10 -mthumb --static" >> $GITHUB_ENV
+ echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
echo "EXEC=qemu-arm" >> $GITHUB_ENV
- arm-linux-gnueabi-gcc-10 --version
+ arm-linux-gnueabi-gcc --version
qemu-arm -version
# cross-compile with MIPS (32-bit, big-endian)
- name: install-mips
if: ${{matrix.arch == 'mips'}}
run: |
sudo apt-get install -qq \
- gcc-10-mips-linux-gnu \
+ gcc-mips-linux-gnu \
libc6-dev-mips-cross \
qemu-user
- echo "CC=mips-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
+ echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
echo "EXEC=qemu-mips" >> $GITHUB_ENV
- mips-linux-gnu-gcc-10 --version
+ mips-linux-gnu-gcc --version
qemu-mips -version
# cross-compile with PowerPC (32-bit, big-endian)
- name: install-powerpc
if: ${{matrix.arch == 'powerpc'}}
run: |
sudo apt-get install -qq \
- gcc-10-powerpc-linux-gnu \
+ gcc-powerpc-linux-gnu \
libc6-dev-powerpc-cross \
qemu-user
- echo "CC=powerpc-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
+ echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
echo "EXEC=qemu-ppc" >> $GITHUB_ENV
- powerpc-linux-gnu-gcc-10 --version
+ powerpc-linux-gnu-gcc --version
qemu-ppc -version
+ # does littlefs compile?
+ - name: test-build
+ run: |
+ make clean
+ make build
+
# make sure example can at least compile
- name: test-example
run: |
+ make clean
sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
- make all CFLAGS+=" \
+ CFLAGS="$CFLAGS \
-Duser_provided_block_device_read=NULL \
-Duser_provided_block_device_prog=NULL \
-Duser_provided_block_device_erase=NULL \
-Duser_provided_block_device_sync=NULL \
- -include stdio.h"
+ -include stdio.h" \
+ make all
rm test.c
- # test configurations
- # normal+reentrant tests
- - name: test-default
- run: |
- make clean
- make test TESTFLAGS+="-nrk"
- # NOR flash: read/prog = 1 block = 4KiB
- - name: test-nor
- run: |
- make clean
- make test TESTFLAGS+="-nrk \
- -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
- # SD/eMMC: read/prog = 512 block = 512
- - name: test-emmc
+ # run the tests!
+ - name: test
run: |
make clean
- make test TESTFLAGS+="-nrk \
- -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
- # NAND flash: read/prog = 4KiB block = 32KiB
- - name: test-nand
+ make test
+
+ # collect coverage info
+ #
+ # Note the goal is to maximize coverage in the small, easy-to-run
+ # tests, so we intentionally exclude more aggressive powerloss testing
+ # from coverage results
+ - name: cov
+ if: ${{matrix.arch == 'x86_64'}}
run: |
- make clean
- make test TESTFLAGS+="-nrk \
- -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
- # other extreme geometries that are useful for various corner cases
- - name: test-no-intrinsics
+ make lfs.cov.csv
+ ./scripts/cov.py -u lfs.cov.csv
+ mkdir -p cov
+ cp lfs.cov.csv cov/cov.csv
+
+ # find compile-time measurements
+ - name: sizes
run: |
make clean
- make test TESTFLAGS+="-nrk \
- -DLFS_NO_INTRINSICS"
- - name: test-byte-writes
- # it just takes too long to test byte-level writes when in qemu,
- # should be plenty covered by the other configurations
- if: ${{matrix.arch == 'x86_64'}}
+ CFLAGS="$CFLAGS \
+ -DLFS_NO_ASSERT \
+ -DLFS_NO_DEBUG \
+ -DLFS_NO_WARN \
+ -DLFS_NO_ERROR" \
+ make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
+ ./scripts/structs.py -u lfs.structs.csv
+ ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
+ -bfunction \
+ -fcode=code_size \
+ -fdata=data_size \
+ -fstack=stack_limit --max=stack_limit
+ mkdir -p sizes
+ cp lfs.code.csv sizes/${{matrix.arch}}.code.csv
+ cp lfs.data.csv sizes/${{matrix.arch}}.data.csv
+ cp lfs.stack.csv sizes/${{matrix.arch}}.stack.csv
+ cp lfs.structs.csv sizes/${{matrix.arch}}.structs.csv
+ - name: sizes-readonly
run: |
make clean
- make test TESTFLAGS+="-nrk \
- -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
- - name: test-block-cycles
+ CFLAGS="$CFLAGS \
+ -DLFS_NO_ASSERT \
+ -DLFS_NO_DEBUG \
+ -DLFS_NO_WARN \
+ -DLFS_NO_ERROR \
+ -DLFS_READONLY" \
+ make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
+ ./scripts/structs.py -u lfs.structs.csv
+ ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
+ -bfunction \
+ -fcode=code_size \
+ -fdata=data_size \
+ -fstack=stack_limit --max=stack_limit
+ mkdir -p sizes
+ cp lfs.code.csv sizes/${{matrix.arch}}-readonly.code.csv
+ cp lfs.data.csv sizes/${{matrix.arch}}-readonly.data.csv
+ cp lfs.stack.csv sizes/${{matrix.arch}}-readonly.stack.csv
+ cp lfs.structs.csv sizes/${{matrix.arch}}-readonly.structs.csv
+ - name: sizes-threadsafe
run: |
make clean
- make test TESTFLAGS+="-nrk \
- -DLFS_BLOCK_CYCLES=1"
- - name: test-odd-block-count
+ CFLAGS="$CFLAGS \
+ -DLFS_NO_ASSERT \
+ -DLFS_NO_DEBUG \
+ -DLFS_NO_WARN \
+ -DLFS_NO_ERROR \
+ -DLFS_THREADSAFE" \
+ make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
+ ./scripts/structs.py -u lfs.structs.csv
+ ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
+ -bfunction \
+ -fcode=code_size \
+ -fdata=data_size \
+ -fstack=stack_limit --max=stack_limit
+ mkdir -p sizes
+ cp lfs.code.csv sizes/${{matrix.arch}}-threadsafe.code.csv
+ cp lfs.data.csv sizes/${{matrix.arch}}-threadsafe.data.csv
+ cp lfs.stack.csv sizes/${{matrix.arch}}-threadsafe.stack.csv
+ cp lfs.structs.csv sizes/${{matrix.arch}}-threadsafe.structs.csv
+ - name: sizes-migrate
run: |
make clean
- make test TESTFLAGS+="-nrk \
- -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
- - name: test-odd-block-size
+ CFLAGS="$CFLAGS \
+ -DLFS_NO_ASSERT \
+ -DLFS_NO_DEBUG \
+ -DLFS_NO_WARN \
+ -DLFS_NO_ERROR \
+ -DLFS_MIGRATE" \
+ make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
+ ./scripts/structs.py -u lfs.structs.csv
+ ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
+ -bfunction \
+ -fcode=code_size \
+ -fdata=data_size \
+ -fstack=stack_limit --max=stack_limit
+ mkdir -p sizes
+ cp lfs.code.csv sizes/${{matrix.arch}}-migrate.code.csv
+ cp lfs.data.csv sizes/${{matrix.arch}}-migrate.data.csv
+ cp lfs.stack.csv sizes/${{matrix.arch}}-migrate.stack.csv
+ cp lfs.structs.csv sizes/${{matrix.arch}}-migrate.structs.csv
+ - name: sizes-error-asserts
run: |
make clean
- make test TESTFLAGS+="-nrk \
- -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
+ CFLAGS="$CFLAGS \
+ -DLFS_NO_DEBUG \
+ -DLFS_NO_WARN \
+ -DLFS_NO_ERROR \
+ -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \
+ make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
+ ./scripts/structs.py -u lfs.structs.csv
+ ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
+ -bfunction \
+ -fcode=code_size \
+ -fdata=data_size \
+ -fstack=stack_limit --max=stack_limit
+ mkdir -p sizes
+ cp lfs.code.csv sizes/${{matrix.arch}}-error-asserts.code.csv
+ cp lfs.data.csv sizes/${{matrix.arch}}-error-asserts.data.csv
+ cp lfs.stack.csv sizes/${{matrix.arch}}-error-asserts.stack.csv
+ cp lfs.structs.csv sizes/${{matrix.arch}}-error-asserts.structs.csv
- # upload coverage for later coverage
- - name: upload-coverage
+ # create size statuses
+ - name: upload-sizes
uses: actions/upload-artifact@v2
with:
- name: coverage
- path: coverage
- retention-days: 1
-
- # update results
- - name: results
+ name: sizes
+ path: sizes
+ - name: status-sizes
run: |
- mkdir -p results
- make clean
- make lfs.csv \
- CFLAGS+=" \
- -DLFS_NO_ASSERT \
- -DLFS_NO_DEBUG \
- -DLFS_NO_WARN \
- -DLFS_NO_ERROR"
- cp lfs.csv results/${{matrix.arch}}.csv
- ./scripts/summary.py results/${{matrix.arch}}.csv
- - name: results-readonly
- run: |
- mkdir -p results
- make clean
- make lfs.csv \
- CFLAGS+=" \
- -DLFS_NO_ASSERT \
- -DLFS_NO_DEBUG \
- -DLFS_NO_WARN \
- -DLFS_NO_ERROR \
- -DLFS_READONLY"
- cp lfs.csv results/${{matrix.arch}}-readonly.csv
- ./scripts/summary.py results/${{matrix.arch}}-readonly.csv
- - name: results-threadsafe
- run: |
- mkdir -p results
- make clean
- make lfs.csv \
- CFLAGS+=" \
- -DLFS_NO_ASSERT \
- -DLFS_NO_DEBUG \
- -DLFS_NO_WARN \
- -DLFS_NO_ERROR \
- -DLFS_THREADSAFE"
- cp lfs.csv results/${{matrix.arch}}-threadsafe.csv
- ./scripts/summary.py results/${{matrix.arch}}-threadsafe.csv
- - name: results-migrate
- run: |
- mkdir -p results
- make clean
- make lfs.csv \
- CFLAGS+=" \
- -DLFS_NO_ASSERT \
- -DLFS_NO_DEBUG \
- -DLFS_NO_WARN \
- -DLFS_NO_ERROR \
- -DLFS_MIGRATE"
- cp lfs.csv results/${{matrix.arch}}-migrate.csv
- ./scripts/summary.py results/${{matrix.arch}}-migrate.csv
- - name: results-error-asserts
- run: |
- mkdir -p results
- make clean
- make lfs.csv \
- CFLAGS+=" \
- -DLFS_NO_DEBUG \
- -DLFS_NO_WARN \
- -DLFS_NO_ERROR \
- -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'"
- cp lfs.csv results/${{matrix.arch}}-error-asserts.csv
- ./scripts/summary.py results/${{matrix.arch}}-error-asserts.csv
- - name: upload-results
+ mkdir -p status
+ for f in $(shopt -s nullglob ; echo sizes/*.csv)
+ do
+ # skip .data.csv as it should always be zero
+ [[ $f == *.data.csv ]] && continue
+ export STEP="sizes$(echo $f \
+ | sed -n 's/[^-.]*-\([^.]*\)\..*csv/-\1/p')"
+ export CONTEXT="sizes (${{matrix.arch}}$(echo $f \
+ | sed -n 's/[^-.]*-\([^.]*\)\..*csv/, \1/p')) / $(echo $f \
+ | sed -n 's/[^.]*\.\(.*\)\.csv/\1/p')"
+ export PREV="$(curl -sS \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
+ `?per_page=100" \
+ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
+ | select(.context == env.CONTEXT).description
+ | capture("(?[0-9∞]+)").prev' \
+ || echo 0)"
+ export DESCRIPTION="$(./scripts/summary.py $f --max=stack_limit -Y \
+ | awk '
+ NR==2 {$1=0; printf "%s B",$NF}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
+ }')"
+ jq -n '{
+ state: "success",
+ context: env.CONTEXT,
+ description: env.DESCRIPTION,
+ target_job: "${{github.job}} (${{matrix.arch}})",
+ target_step: env.STEP,
+ }' | tee status/$(basename $f .csv).json
+ done
+ - name: upload-status-sizes
uses: actions/upload-artifact@v2
with:
- name: results
- path: results
+ name: status
+ path: status
+ retention-days: 1
- # create statuses with results
- - name: collect-status
+ # create cov statuses
+ - name: upload-cov
+ if: ${{matrix.arch == 'x86_64'}}
+ uses: actions/upload-artifact@v2
+ with:
+ name: cov
+ path: cov
+ - name: status-cov
+ if: ${{matrix.arch == 'x86_64'}}
run: |
mkdir -p status
- for f in $(shopt -s nullglob ; echo results/*.csv)
+ f=cov/cov.csv
+ for s in lines branches
do
- export STEP="results$(
- echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p')"
- for r in code stack structs
- do
- export CONTEXT="results (${{matrix.arch}}$(
- echo $f | sed -n 's/[^-]*-\(.*\).csv/, \1/p')) / $r"
- export PREV="$(curl -sS \
- "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
- | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
- | select(.context == env.CONTEXT).description
- | capture("(?[0-9∞]+)").result' \
- || echo 0)"
- export DESCRIPTION="$(./scripts/summary.py $f -f $r -Y | awk '
- NR==2 {printf "%s B",$2}
+ export STEP="cov"
+ export CONTEXT="cov / $s"
+ export PREV="$(curl -sS \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
+ `?per_page=100" \
+ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
+ | select(.context == env.CONTEXT).description
+ | capture("(?[0-9]+)/(?[0-9]+)")
+ | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
+ || echo 0)"
+ export DESCRIPTION="$(./scripts/cov.py -u $f -f$s -Y \
+ | awk -F '[ /%]+' -v s=$s '
+ NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
NR==2 && ENVIRON["PREV"]+0 != 0 {
- printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
- jq -n '{
- state: "success",
- context: env.CONTEXT,
- description: env.DESCRIPTION,
- target_job: "${{github.job}} (${{matrix.arch}})",
- target_step: env.STEP}' \
- | tee status/$r-${{matrix.arch}}$(
- echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p').json
- done
+ printf " (%+.1f%%)",$4-ENVIRON["PREV"]
+ }')"
+ jq -n '{
+ state: "success",
+ context: env.CONTEXT,
+ description: env.DESCRIPTION,
+ target_job: "${{github.job}} (${{matrix.arch}})",
+ target_step: env.STEP,
+ }' | tee status/$(basename $f .csv)-$s.json
done
- - name: upload-status
+ - name: upload-status-sizes
+ if: ${{matrix.arch == 'x86_64'}}
uses: actions/upload-artifact@v2
with:
name: status
path: status
retention-days: 1
- # run under Valgrind to check for memory errors
- valgrind:
- runs-on: ubuntu-20.04
+ # run as many exhaustive tests as fits in GitHub's time limits
+ #
+ # this grows exponentially, so it doesn't turn out to be that many
+ test-pls:
+ runs-on: ubuntu-22.04
+ strategy:
+ fail-fast: false
+ matrix:
+ pls: [1, 2]
+
steps:
- uses: actions/checkout@v2
- name: install
run: |
- # need toml, also pip3 isn't installed by default?
+ # need a few things
sudo apt-get update -qq
- sudo apt-get install -qq python3 python3-pip
- sudo pip3 install toml
- - name: install-valgrind
+ sudo apt-get install -qq gcc python3 python3-pip
+ pip3 install toml
+ gcc --version
+ python3 --version
+ - name: test-pls
+ if: ${{matrix.pls <= 1}}
+ run: |
+ TESTFLAGS="$TESTFLAGS -P${{matrix.pls}}" make test
+ # >=2pls takes multiple days to run fully, so we can only
+ # run a subset of tests, these are the most important
+ - name: test-limited-pls
+ if: ${{matrix.pls > 1}}
+ run: |
+ TESTFLAGS="$TESTFLAGS -P${{matrix.pls}} test_dirs test_relocations" \
+ make test
+
+ # run with LFS_NO_INTRINSICS to make sure that works
+ test-no-intrinsics:
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v2
+ - name: install
+ run: |
+ # need a few things
+ sudo apt-get update -qq
+ sudo apt-get install -qq gcc python3 python3-pip
+ pip3 install toml
+ gcc --version
+ python3 --version
+ - name: test-no-intrinsics
+ run: |
+ CFLAGS="$CFLAGS -DLFS_NO_INTRINSICS" make test
+
+ # run under Valgrind to check for memory errors
+ test-valgrind:
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v2
+ - name: install
run: |
+ # need a few things
sudo apt-get update -qq
- sudo apt-get install -qq valgrind
+ sudo apt-get install -qq gcc python3 python3-pip valgrind
+ pip3 install toml
+ gcc --version
+ python3 --version
valgrind --version
- # normal tests, we don't need to test all geometries
+ # Valgrind takes a while with diminishing value, so only test
+ # on one geometry
- name: test-valgrind
- run: make test TESTFLAGS+="-k --valgrind"
+ run: |
+ TESTFLAGS="$TESTFLAGS --valgrind -Gdefault -Pnone" make test
# test that compilation is warning free under clang
- clang:
- runs-on: ubuntu-20.04
+ # run with Clang, mostly to check for Clang-specific warnings
+ test-clang:
+ runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v2
- name: install
run: |
- # need toml, also pip3 isn't installed by default?
+ # need a few things
+ sudo apt-get install -qq clang python3 python3-pip
+ pip3 install toml
+ clang --version
+ python3 --version
+ - name: test-clang
+ run: |
+ # override CFLAGS since Clang does not support -fcallgraph-info
+ # and -ftrack-macro-expansions
+ make \
+ CC=clang \
+ CFLAGS="$CFLAGS -MMD -g3 -I. -std=c99 -Wall -Wextra -pedantic" \
+ test
+
+ # run benchmarks
+ #
+ # note there's no real benefit to running these on multiple archs
+ bench:
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v2
+ - name: install
+ run: |
+ # need a few things
sudo apt-get update -qq
- sudo apt-get install -qq python3 python3-pip
- sudo pip3 install toml
- - name: install-clang
+ sudo apt-get install -qq gcc python3 python3-pip valgrind
+ pip3 install toml
+ gcc --version
+ python3 --version
+ valgrind --version
+ - name: bench
+ run: |
+ make bench
+
+ # find bench results
+ make lfs.bench.csv
+ ./scripts/summary.py lfs.bench.csv \
+ -bsuite \
+ -freaded=bench_readed \
+ -fproged=bench_proged \
+ -ferased=bench_erased
+ mkdir -p bench
+ cp lfs.bench.csv bench/bench.csv
+
+ # find perfbd results
+ make lfs.perfbd.csv
+ ./scripts/perfbd.py -u lfs.perfbd.csv
+ mkdir -p bench
+ cp lfs.perfbd.csv bench/perfbd.csv
+
+ # create bench statuses
+ - name: upload-bench
+ uses: actions/upload-artifact@v2
+ with:
+ name: bench
+ path: bench
+ - name: status-bench
+ run: |
+ mkdir -p status
+ f=bench/bench.csv
+ for s in readed proged erased
+ do
+ export STEP="bench"
+ export CONTEXT="bench / $s"
+ export PREV="$(curl -sS \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
+ `?per_page=100" \
+ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
+ | select(.context == env.CONTEXT).description
+ | capture("(?[0-9]+)").prev' \
+ || echo 0)"
+ export DESCRIPTION="$(./scripts/summary.py $f -f$s=bench_$s -Y \
+ | awk '
+ NR==2 {$1=0; printf "%s B",$NF}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
+ }')"
+ jq -n '{
+ state: "success",
+ context: env.CONTEXT,
+ description: env.DESCRIPTION,
+ target_job: "${{github.job}}",
+ target_step: env.STEP,
+ }' | tee status/$(basename $f .csv)-$s.json
+ done
+ - name: upload-status-bench
+ uses: actions/upload-artifact@v2
+ with:
+ name: status
+ path: status
+ retention-days: 1
+
+ # run compatibility tests using the current master as the previous version
+ test-compat:
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v2
+ if: ${{github.event_name == 'pull_request'}}
+ # checkout the current pr target into lfsp
+ - uses: actions/checkout@v2
+ if: ${{github.event_name == 'pull_request'}}
+ with:
+ ref: ${{github.event.pull_request.base.ref}}
+ path: lfsp
+ - name: install
+ if: ${{github.event_name == 'pull_request'}}
run: |
+ # need a few things
sudo apt-get update -qq
- sudo apt-get install -qq clang
- echo "CC=clang" >> $GITHUB_ENV
- clang --version
- # no reason to not test again
- - name: test-clang
- run: make test TESTFLAGS+="-k"
+ sudo apt-get install -qq gcc python3 python3-pip
+ pip3 install toml
+ gcc --version
+ python3 --version
+ # adjust prefix of lfsp
+ - name: changeprefix
+ if: ${{github.event_name == 'pull_request'}}
+ run: |
+ ./scripts/changeprefix.py lfs lfsp lfsp/*.h lfsp/*.c
+ - name: test-compat
+ if: ${{github.event_name == 'pull_request'}}
+ run: |
+ TESTS=tests/test_compat.toml \
+ SRC="$(find . lfsp -name '*.c' -maxdepth 1 \
+ -and -not -name '*.t.*' \
+ -and -not -name '*.b.*')" \
+ CFLAGS="-DLFSP=lfsp/lfsp.h" \
+ make test
# self-host with littlefs-fuse for a fuzz-like test
fuse:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
if: ${{!endsWith(github.ref, '-prefix')}}
steps:
- uses: actions/checkout@v2
- name: install
run: |
- # need toml, also pip3 isn't installed by default?
+ # need a few things
sudo apt-get update -qq
- sudo apt-get install -qq python3 python3-pip libfuse-dev
+ sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
sudo pip3 install toml
- fusermount -V
gcc --version
+ python3 --version
+ fusermount -V
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
@@ -359,22 +556,24 @@ jobs:
cd mount/littlefs
stat .
ls -flh
+ make -B test-runner
make -B test
# test migration using littlefs-fuse
migrate:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
if: ${{!endsWith(github.ref, '-prefix')}}
steps:
- uses: actions/checkout@v2
- name: install
run: |
- # need toml, also pip3 isn't installed by default?
+ # need a few things
sudo apt-get update -qq
- sudo apt-get install -qq python3 python3-pip libfuse-dev
+ sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
sudo pip3 install toml
- fusermount -V
gcc --version
+ python3 --version
+ fusermount -V
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
@@ -414,6 +613,7 @@ jobs:
cd mount/littlefs
stat .
ls -flh
+ make -B test-runner
make -B test
# attempt to migrate
@@ -428,66 +628,185 @@ jobs:
cd mount/littlefs
stat .
ls -flh
+ make -B test-runner
make -B test
- # collect coverage info
- coverage:
- runs-on: ubuntu-20.04
- needs: [test]
+ # status related tasks that run after tests
+ status:
+ runs-on: ubuntu-22.04
+ needs: [test, bench]
steps:
- uses: actions/checkout@v2
+ if: ${{github.event_name == 'pull_request'}}
- name: install
+ if: ${{github.event_name == 'pull_request'}}
run: |
- sudo apt-get update -qq
- sudo apt-get install -qq python3 python3-pip lcov
- sudo pip3 install toml
- # yes we continue-on-error nearly every step, continue-on-error
- # at job level apparently still marks a job as failed, which isn't
- # what we want
+ # need a few things
+ sudo apt-get install -qq gcc python3 python3-pip
+ pip3 install toml
+ gcc --version
+ python3 --version
- uses: actions/download-artifact@v2
+ if: ${{github.event_name == 'pull_request'}}
continue-on-error: true
with:
- name: coverage
- path: coverage
- - name: results-coverage
+ name: sizes
+ path: sizes
+ - uses: actions/download-artifact@v2
+ if: ${{github.event_name == 'pull_request'}}
continue-on-error: true
- run: |
- mkdir -p results
- lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
- -o results/coverage.info
- ./scripts/coverage.py results/coverage.info -o results/coverage.csv
- - name: upload-results
- uses: actions/upload-artifact@v2
with:
- name: results
- path: results
- - name: collect-status
- run: |
- mkdir -p status
- [ -e results/coverage.csv ] || exit 0
- export STEP="results-coverage"
- export CONTEXT="results / coverage"
- export PREV="$(curl -sS \
- "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
- | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
- | select(.context == env.CONTEXT).description
- | capture("(?[0-9\\.]+)").result' \
- || echo 0)"
- export DESCRIPTION="$(
- ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
- NR==2 {printf "%.1f%% of %d lines",$4,$3}
- NR==2 && ENVIRON["PREV"]+0 != 0 {
- printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
- jq -n '{
- state: "success",
- context: env.CONTEXT,
- description: env.DESCRIPTION,
- target_job: "${{github.job}}",
- target_step: env.STEP}' \
- | tee status/coverage.json
- - name: upload-status
+ name: cov
+ path: cov
+ - uses: actions/download-artifact@v2
+ if: ${{github.event_name == 'pull_request'}}
+ continue-on-error: true
+ with:
+ name: bench
+ path: bench
+
+ # try to find results from tests
+ - name: create-table
+ if: ${{github.event_name == 'pull_request'}}
+ run: |
+ # compare against pull-request target
+ curl -sS \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/`
+ `${{github.event.pull_request.base.ref}}`
+ `?per_page=100" \
+ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
+ >> prev-status.json \
+ || true
+
+ # build table for GitHub
+ declare -A table
+
+ # sizes table
+ i=0
+ j=0
+ for c in "" readonly threadsafe migrate error-asserts
+ do
+ # per-config results
+ c_or_default=${c:-default}
+ c_camel=${c_or_default^}
+ table[$i,$j]=$c_camel
+ ((j+=1))
+
+ for s in code stack structs
+ do
+ f=sizes/thumb${c:+-$c}.$s.csv
+ [ -e $f ] && table[$i,$j]=$( \
+ export PREV="$(jq -re '
+ select(.context == "'"sizes (thumb${c:+, $c}) / $s"'").description
+ | capture("(?[0-9∞]+)").prev' \
+ prev-status.json || echo 0)"
+ ./scripts/summary.py $f --max=stack_limit -Y \
+ | awk '
+ NR==2 {$1=0; printf "%s B",$NF}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
+ }' \
+ | sed -e 's/ /\ /g')
+ ((j+=1))
+ done
+ ((j=0, i+=1))
+ done
+
+ # coverage table
+ i=0
+ j=4
+ for s in lines branches
+ do
+ table[$i,$j]=${s^}
+ ((j+=1))
+
+ f=cov/cov.csv
+ [ -e $f ] && table[$i,$j]=$( \
+ export PREV="$(jq -re '
+ select(.context == "'"cov / $s"'").description
+ | capture("(?[0-9]+)/(?[0-9]+)")
+ | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
+ prev-status.json || echo 0)"
+ ./scripts/cov.py -u $f -f$s -Y \
+ | awk -F '[ /%]+' -v s=$s '
+ NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",$4-ENVIRON["PREV"]
+ }' \
+ | sed -e 's/ /\ /g')
+ ((j=4, i+=1))
+ done
+
+ # benchmark table
+ i=3
+ j=4
+ for s in readed proged erased
+ do
+ table[$i,$j]=${s^}
+ ((j+=1))
+
+ f=bench/bench.csv
+ [ -e $f ] && table[$i,$j]=$( \
+ export PREV="$(jq -re '
+ select(.context == "'"bench / $s"'").description
+ | capture("(?[0-9]+)").prev' \
+ prev-status.json || echo 0)"
+ ./scripts/summary.py $f -f$s=bench_$s -Y \
+ | awk '
+ NR==2 {$1=0; printf "%s B",$NF}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
+ }' \
+ | sed -e 's/ /\ /g')
+ ((j=4, i+=1))
+ done
+
+ # build the actual table
+ echo "| | Code | Stack | Structs | | Coverage |" >> table.txt
+ echo "|:--|-----:|------:|--------:|:--|---------:|" >> table.txt
+ for ((i=0; i<6; i++))
+ do
+ echo -n "|" >> table.txt
+ for ((j=0; j<6; j++))
+ do
+ echo -n " " >> table.txt
+ [[ i -eq 2 && j -eq 5 ]] && echo -n "**Benchmarks**" >> table.txt
+ echo -n "${table[$i,$j]:-}" >> table.txt
+ echo -n " |" >> table.txt
+ done
+ echo >> table.txt
+ done
+
+ cat table.txt
+
+ # create a bot comment for successful runs on pull requests
+ - name: create-comment
+ if: ${{github.event_name == 'pull_request'}}
+ run: |
+ touch comment.txt
+ echo "" >> comment.txt
+ echo "" >> comment.txt
+ echo "Tests passed ✓, `
+ `Code: $(awk 'NR==3 {print $4}' table.txt || true), `
+ `Stack: $(awk 'NR==3 {print $6}' table.txt || true), `
+ `Structs: $(awk 'NR==3 {print $8}' table.txt || true)" \
+ >> comment.txt
+ echo " " >> comment.txt
+ echo >> comment.txt
+ [ -e table.txt ] && cat table.txt >> comment.txt
+ echo >> comment.txt
+ echo " " >> comment.txt
+ cat comment.txt
+
+ mkdir -p comment
+ jq -n --rawfile comment comment.txt '{
+ number: ${{github.event.number}},
+ body: $comment,
+ }' | tee comment/comment.json
+ - name: upload-comment
uses: actions/upload-artifact@v2
with:
- name: status
- path: status
+ name: comment
+ path: comment
retention-days: 1
+
diff --git a/.gitignore b/.gitignore
index 3f7b860e..09707c6b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,11 +4,31 @@
*.a
*.ci
*.csv
+*.t.*
+*.b.*
+*.gcno
+*.gcda
+*.perf
+lfs
+liblfs.a
# Testing things
-blocks/
-lfs
-test.c
-tests/*.toml.*
-scripts/__pycache__
+runners/test_runner
+runners/bench_runner
+lfs.code.csv
+lfs.data.csv
+lfs.stack.csv
+lfs.structs.csv
+lfs.cov.csv
+lfs.perf.csv
+lfs.perfbd.csv
+lfs.test.csv
+lfs.bench.csv
+
+# Misc
+tags
.gdb_history
+scripts/__pycache__
+
+# Historical, probably should remove at some point
+tests/*.toml.*
diff --git a/Makefile b/Makefile
index 13879336..24865e5e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,172 +1,582 @@
-ifdef BUILDDIR
-# make sure BUILDDIR ends with a slash
-override BUILDDIR := $(BUILDDIR)/
-# bit of a hack, but we want to make sure BUILDDIR directory structure
-# is correct before any commands
-$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
- $(BUILDDIR) \
- $(BUILDDIR)bd \
- $(BUILDDIR)tests))
-endif
-
+# overrideable build dir, default is in-place
+BUILDDIR ?= .
# overridable target/src/tools/flags/etc
ifneq ($(wildcard test.c main.c),)
-TARGET ?= $(BUILDDIR)lfs
+TARGET ?= $(BUILDDIR)/lfs
else
-TARGET ?= $(BUILDDIR)lfs.a
+TARGET ?= $(BUILDDIR)/liblfs.a
endif
-CC ?= gcc
-AR ?= ar
-SIZE ?= size
-CTAGS ?= ctags
-NM ?= nm
-OBJDUMP ?= objdump
-LCOV ?= lcov
+CC ?= gcc
+AR ?= ar
+SIZE ?= size
+CTAGS ?= ctags
+NM ?= nm
+OBJDUMP ?= objdump
+VALGRIND ?= valgrind
+GDB ?= gdb
+PERF ?= perf
-SRC ?= $(wildcard *.c)
-OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
-DEP := $(SRC:%.c=$(BUILDDIR)%.d)
-ASM := $(SRC:%.c=$(BUILDDIR)%.s)
-CGI := $(SRC:%.c=$(BUILDDIR)%.ci)
+SRC ?= $(filter-out $(wildcard *.t.* *.b.*),$(wildcard *.c))
+OBJ := $(SRC:%.c=$(BUILDDIR)/%.o)
+DEP := $(SRC:%.c=$(BUILDDIR)/%.d)
+ASM := $(SRC:%.c=$(BUILDDIR)/%.s)
+CI := $(SRC:%.c=$(BUILDDIR)/%.ci)
+GCDA := $(SRC:%.c=$(BUILDDIR)/%.t.gcda)
+TESTS ?= $(wildcard tests/*.toml)
+TEST_SRC ?= $(SRC) \
+ $(filter-out $(wildcard bd/*.t.* bd/*.b.*),$(wildcard bd/*.c)) \
+ runners/test_runner.c
+TEST_RUNNER ?= $(BUILDDIR)/runners/test_runner
+TEST_A := $(TESTS:%.toml=$(BUILDDIR)/%.t.a.c) \
+ $(TEST_SRC:%.c=$(BUILDDIR)/%.t.a.c)
+TEST_C := $(TEST_A:%.t.a.c=%.t.c)
+TEST_OBJ := $(TEST_C:%.t.c=%.t.o)
+TEST_DEP := $(TEST_C:%.t.c=%.t.d)
+TEST_CI := $(TEST_C:%.t.c=%.t.ci)
+TEST_GCNO := $(TEST_C:%.t.c=%.t.gcno)
+TEST_GCDA := $(TEST_C:%.t.c=%.t.gcda)
+TEST_PERF := $(TEST_RUNNER:%=%.perf)
+TEST_TRACE := $(TEST_RUNNER:%=%.trace)
+TEST_CSV := $(TEST_RUNNER:%=%.csv)
+
+BENCHES ?= $(wildcard benches/*.toml)
+BENCH_SRC ?= $(SRC) \
+ $(filter-out $(wildcard bd/*.t.* bd/*.b.*),$(wildcard bd/*.c)) \
+ runners/bench_runner.c
+BENCH_RUNNER ?= $(BUILDDIR)/runners/bench_runner
+BENCH_A := $(BENCHES:%.toml=$(BUILDDIR)/%.b.a.c) \
+ $(BENCH_SRC:%.c=$(BUILDDIR)/%.b.a.c)
+BENCH_C := $(BENCH_A:%.b.a.c=%.b.c)
+BENCH_OBJ := $(BENCH_C:%.b.c=%.b.o)
+BENCH_DEP := $(BENCH_C:%.b.c=%.b.d)
+BENCH_CI := $(BENCH_C:%.b.c=%.b.ci)
+BENCH_GCNO := $(BENCH_C:%.b.c=%.b.gcno)
+BENCH_GCDA := $(BENCH_C:%.b.c=%.b.gcda)
+BENCH_PERF := $(BENCH_RUNNER:%=%.perf)
+BENCH_TRACE := $(BENCH_RUNNER:%=%.trace)
+BENCH_CSV := $(BENCH_RUNNER:%=%.csv)
+
+CFLAGS += -fcallgraph-info=su
+CFLAGS += -g3
+CFLAGS += -I.
+CFLAGS += -std=c99 -Wall -Wextra -pedantic
+CFLAGS += -ftrack-macro-expansion=0
ifdef DEBUG
-override CFLAGS += -O0
+CFLAGS += -O0
else
-override CFLAGS += -Os
+CFLAGS += -Os
endif
ifdef TRACE
-override CFLAGS += -DLFS_YES_TRACE
+CFLAGS += -DLFS_YES_TRACE
+endif
+ifdef YES_COV
+CFLAGS += --coverage
+endif
+ifdef YES_PERF
+CFLAGS += -fno-omit-frame-pointer
+endif
+ifdef YES_PERFBD
+CFLAGS += -fno-omit-frame-pointer
endif
-override CFLAGS += -g3
-override CFLAGS += -I.
-override CFLAGS += -std=c99 -Wall -Wextra -pedantic
ifdef VERBOSE
-override TESTFLAGS += -v
-override CALLSFLAGS += -v
-override CODEFLAGS += -v
-override DATAFLAGS += -v
-override STACKFLAGS += -v
-override STRUCTSFLAGS += -v
-override COVERAGEFLAGS += -v
+CODEFLAGS += -v
+DATAFLAGS += -v
+STACKFLAGS += -v
+STRUCTSFLAGS += -v
+COVFLAGS += -v
+PERFFLAGS += -v
+PERFBDFLAGS += -v
+endif
+# forward -j flag
+PERFFLAGS += $(filter -j%,$(MAKEFLAGS))
+PERFBDFLAGS += $(filter -j%,$(MAKEFLAGS))
+ifneq ($(NM),nm)
+CODEFLAGS += --nm-path="$(NM)"
+DATAFLAGS += --nm-path="$(NM)"
+endif
+ifneq ($(OBJDUMP),objdump)
+CODEFLAGS += --objdump-path="$(OBJDUMP)"
+DATAFLAGS += --objdump-path="$(OBJDUMP)"
+STRUCTSFLAGS += --objdump-path="$(OBJDUMP)"
+PERFFLAGS += --objdump-path="$(OBJDUMP)"
+PERFBDFLAGS += --objdump-path="$(OBJDUMP)"
+endif
+ifneq ($(PERF),perf)
+PERFFLAGS += --perf-path="$(PERF)"
+endif
+
+TESTFLAGS += -b
+BENCHFLAGS += -b
+# forward -j flag
+TESTFLAGS += $(filter -j%,$(MAKEFLAGS))
+BENCHFLAGS += $(filter -j%,$(MAKEFLAGS))
+ifdef YES_PERF
+TESTFLAGS += -p $(TEST_PERF)
+BENCHFLAGS += -p $(BENCH_PERF)
+endif
+ifdef YES_PERFBD
+TESTFLAGS += -t $(TEST_TRACE) --trace-backtrace --trace-freq=100
+endif
+ifndef NO_PERFBD
+BENCHFLAGS += -t $(BENCH_TRACE) --trace-backtrace --trace-freq=100
+endif
+ifdef YES_TESTMARKS
+TESTFLAGS += -o $(TEST_CSV)
+endif
+ifndef NO_BENCHMARKS
+BENCHFLAGS += -o $(BENCH_CSV)
+endif
+ifdef VERBOSE
+TESTFLAGS += -v
+TESTCFLAGS += -v
+BENCHFLAGS += -v
+BENCHCFLAGS += -v
endif
ifdef EXEC
-override TESTFLAGS += --exec="$(EXEC)"
+TESTFLAGS += --exec="$(EXEC)"
+BENCHFLAGS += --exec="$(EXEC)"
endif
-ifdef COVERAGE
-override TESTFLAGS += --coverage
+ifneq ($(GDB),gdb)
+TESTFLAGS += --gdb-path="$(GDB)"
+BENCHFLAGS += --gdb-path="$(GDB)"
endif
-ifdef BUILDDIR
-override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
-override CALLSFLAGS += --build-dir="$(BUILDDIR:/=)"
-override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
-override DATAFLAGS += --build-dir="$(BUILDDIR:/=)"
-override STACKFLAGS += --build-dir="$(BUILDDIR:/=)"
-override STRUCTSFLAGS += --build-dir="$(BUILDDIR:/=)"
-override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)"
+ifneq ($(VALGRIND),valgrind)
+TESTFLAGS += --valgrind-path="$(VALGRIND)"
+BENCHFLAGS += --valgrind-path="$(VALGRIND)"
endif
-ifneq ($(NM),nm)
-override CODEFLAGS += --nm-tool="$(NM)"
-override DATAFLAGS += --nm-tool="$(NM)"
+ifneq ($(PERF),perf)
+TESTFLAGS += --perf-path="$(PERF)"
+BENCHFLAGS += --perf-path="$(PERF)"
endif
-ifneq ($(OBJDUMP),objdump)
-override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)"
+
+# this is a bit of a hack, but we want to make sure the BUILDDIR
+# directory structure is correct before we run any commands
+ifneq ($(BUILDDIR),.)
+$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
+ $(addprefix $(BUILDDIR)/,$(dir \
+ $(SRC) \
+ $(TESTS) \
+ $(TEST_SRC) \
+ $(BENCHES) \
+ $(BENCH_SRC)))))
endif
# commands
+
+## Build littlefs
.PHONY: all build
all build: $(TARGET)
+## Build assembly files
.PHONY: asm
asm: $(ASM)
+## Find the total size
.PHONY: size
size: $(OBJ)
$(SIZE) -t $^
+## Generate a ctags file
.PHONY: tags
tags:
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
-.PHONY: calls
-calls: $(CGI)
- ./scripts/calls.py $^ $(CALLSFLAGS)
-
-.PHONY: test
-test:
- ./scripts/test.py $(TESTFLAGS)
-.SECONDEXPANSION:
-test%: tests/test$$(firstword $$(subst \#, ,%)).toml
- ./scripts/test.py $@ $(TESTFLAGS)
+## Show this help text
+.PHONY: help
+help:
+ @$(strip awk '/^## / { \
+ sub(/^## /,""); \
+ getline rule; \
+ while (rule ~ /^(#|\.PHONY|ifdef|ifndef)/) getline rule; \
+ gsub(/:.*/, "", rule); \
+ printf " "" %-25s %s\n", rule, $$0 \
+ }' $(MAKEFILE_LIST))
+## Find the per-function code size
.PHONY: code
-code: $(OBJ)
- ./scripts/code.py $^ -S $(CODEFLAGS)
+code: CODEFLAGS+=-S
+code: $(OBJ) $(BUILDDIR)/lfs.code.csv
+ ./scripts/code.py $(OBJ) $(CODEFLAGS)
+
+## Compare per-function code size
+.PHONY: code-diff
+code-diff: $(OBJ)
+ ./scripts/code.py $^ $(CODEFLAGS) -d $(BUILDDIR)/lfs.code.csv
+## Find the per-function data size
.PHONY: data
-data: $(OBJ)
- ./scripts/data.py $^ -S $(DATAFLAGS)
+data: DATAFLAGS+=-S
+data: $(OBJ) $(BUILDDIR)/lfs.data.csv
+ ./scripts/data.py $(OBJ) $(DATAFLAGS)
+## Compare per-function data size
+.PHONY: data-diff
+data-diff: $(OBJ)
+ ./scripts/data.py $^ $(DATAFLAGS) -d $(BUILDDIR)/lfs.data.csv
+
+## Find the per-function stack usage
.PHONY: stack
-stack: $(CGI)
- ./scripts/stack.py $^ -S $(STACKFLAGS)
+stack: STACKFLAGS+=-S
+stack: $(CI) $(BUILDDIR)/lfs.stack.csv
+ ./scripts/stack.py $(CI) $(STACKFLAGS)
+
+## Compare per-function stack usage
+.PHONY: stack-diff
+stack-diff: $(CI)
+ ./scripts/stack.py $^ $(STACKFLAGS) -d $(BUILDDIR)/lfs.stack.csv
+## Find function sizes
+.PHONY: funcs
+funcs: SUMMARYFLAGS+=-S
+funcs: \
+ $(BUILDDIR)/lfs.code.csv \
+ $(BUILDDIR)/lfs.data.csv \
+ $(BUILDDIR)/lfs.stack.csv
+ $(strip ./scripts/summary.py $^ \
+ -bfunction \
+ -fcode=code_size \
+ -fdata=data_size \
+ -fstack=stack_limit --max=stack \
+ $(SUMMARYFLAGS))
+
+## Compare function sizes
+.PHONY: funcs-diff
+funcs-diff: SHELL=/bin/bash
+funcs-diff: $(OBJ) $(CI)
+ $(strip ./scripts/summary.py \
+ <(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \
+ <(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \
+ <(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \
+ -bfunction \
+ -fcode=code_size \
+ -fdata=data_size \
+ -fstack=stack_limit --max=stack \
+ $(SUMMARYFLAGS) -d <(./scripts/summary.py \
+ $(BUILDDIR)/lfs.code.csv \
+ $(BUILDDIR)/lfs.data.csv \
+ $(BUILDDIR)/lfs.stack.csv \
+ -q $(SUMMARYFLAGS) -o-))
+
+## Find struct sizes
.PHONY: structs
-structs: $(OBJ)
- ./scripts/structs.py $^ -S $(STRUCTSFLAGS)
+structs: STRUCTSFLAGS+=-S
+structs: $(OBJ) $(BUILDDIR)/lfs.structs.csv
+ ./scripts/structs.py $(OBJ) $(STRUCTSFLAGS)
+
+## Compare struct sizes
+.PHONY: structs-diff
+structs-diff: $(OBJ)
+ ./scripts/structs.py $^ $(STRUCTSFLAGS) -d $(BUILDDIR)/lfs.structs.csv
+
+## Find the line/branch coverage after a test run
+.PHONY: cov
+cov: COVFLAGS+=-s
+cov: $(GCDA) $(BUILDDIR)/lfs.cov.csv
+ $(strip ./scripts/cov.py $(GCDA) \
+ $(patsubst %,-F%,$(SRC)) \
+ $(COVFLAGS))
+
+## Compare line/branch coverage
+.PHONY: cov-diff
+cov-diff: $(GCDA)
+ $(strip ./scripts/cov.py $^ \
+ $(patsubst %,-F%,$(SRC)) \
+ $(COVFLAGS) -d $(BUILDDIR)/lfs.cov.csv)
+
+## Find the perf results after bench run with YES_PERF
+.PHONY: perf
+perf: PERFFLAGS+=-S
+perf: $(BENCH_PERF) $(BUILDDIR)/lfs.perf.csv
+ $(strip ./scripts/perf.py $(BENCH_PERF) \
+ $(patsubst %,-F%,$(SRC)) \
+ $(PERFFLAGS))
+
+## Compare perf results
+.PHONY: perf-diff
+perf-diff: $(BENCH_PERF)
+ $(strip ./scripts/perf.py $^ \
+ $(patsubst %,-F%,$(SRC)) \
+ $(PERFFLAGS) -d $(BUILDDIR)/lfs.perf.csv)
+
+## Find the perfbd results after a bench run
+.PHONY: perfbd
+perfbd: PERFBDFLAGS+=-S
+perfbd: $(BENCH_TRACE) $(BUILDDIR)/lfs.perfbd.csv
+ $(strip ./scripts/perfbd.py $(BENCH_RUNNER) $(BENCH_TRACE) \
+ $(patsubst %,-F%,$(SRC)) \
+ $(PERFBDFLAGS))
+
+## Compare perfbd results
+.PHONY: perfbd-diff
+perfbd-diff: $(BENCH_TRACE)
+ $(strip ./scripts/perfbd.py $(BENCH_RUNNER) $^ \
+ $(patsubst %,-F%,$(SRC)) \
+ $(PERFBDFLAGS) -d $(BUILDDIR)/lfs.perfbd.csv)
+
+## Find a summary of compile-time sizes
+.PHONY: summary sizes
+summary sizes: \
+ $(BUILDDIR)/lfs.code.csv \
+ $(BUILDDIR)/lfs.data.csv \
+ $(BUILDDIR)/lfs.stack.csv \
+ $(BUILDDIR)/lfs.structs.csv
+ $(strip ./scripts/summary.py $^ \
+ -fcode=code_size \
+ -fdata=data_size \
+ -fstack=stack_limit --max=stack \
+ -fstructs=struct_size \
+ -Y $(SUMMARYFLAGS))
-.PHONY: coverage
-coverage:
- ./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS)
+## Compare compile-time sizes
+.PHONY: summary-diff sizes-diff
+summary-diff sizes-diff: SHELL=/bin/bash
+summary-diff sizes-diff: $(OBJ) $(CI)
+ $(strip ./scripts/summary.py \
+ <(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \
+ <(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \
+ <(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \
+ <(./scripts/structs.py $(OBJ) -q $(STRUCTSFLAGS) -o-) \
+ -fcode=code_size \
+ -fdata=data_size \
+ -fstack=stack_limit --max=stack \
+ -fstructs=struct_size \
+ -Y $(SUMMARYFLAGS) -d <(./scripts/summary.py \
+ $(BUILDDIR)/lfs.code.csv \
+ $(BUILDDIR)/lfs.data.csv \
+ $(BUILDDIR)/lfs.stack.csv \
+ $(BUILDDIR)/lfs.structs.csv \
+ -q $(SUMMARYFLAGS) -o-))
+
+## Build the test-runner
+.PHONY: test-runner build-test
+ifndef NO_COV
+test-runner build-test: CFLAGS+=--coverage
+endif
+ifdef YES_PERF
+test-runner build-test: CFLAGS+=-fno-omit-frame-pointer
+endif
+ifdef YES_PERFBD
+test-runner build-test: CFLAGS+=-fno-omit-frame-pointer
+endif
+# note we remove some binary dependent files during compilation,
+# otherwise it's way to easy to end up with outdated results
+test-runner build-test: $(TEST_RUNNER)
+ifndef NO_COV
+ rm -f $(TEST_GCDA)
+endif
+ifdef YES_PERF
+ rm -f $(TEST_PERF)
+endif
+ifdef YES_PERFBD
+ rm -f $(TEST_TRACE)
+endif
+
+## Run the tests, -j enables parallel tests
+.PHONY: test
+test: test-runner
+ ./scripts/test.py $(TEST_RUNNER) $(TESTFLAGS)
+
+## List the tests
+.PHONY: test-list
+test-list: test-runner
+ ./scripts/test.py $(TEST_RUNNER) $(TESTFLAGS) -l
+
+## Summarize the testmarks
+.PHONY: testmarks
+testmarks: SUMMARYFLAGS+=-spassed
+testmarks: $(TEST_CSV) $(BUILDDIR)/lfs.test.csv
+ $(strip ./scripts/summary.py $(TEST_CSV) \
+ -bsuite \
+ -fpassed=test_passed \
+ $(SUMMARYFLAGS))
+
+## Compare testmarks against a previous run
+.PHONY: testmarks-diff
+testmarks-diff: $(TEST_CSV)
+ $(strip ./scripts/summary.py $^ \
+ -bsuite \
+ -fpassed=test_passed \
+ $(SUMMARYFLAGS) -d $(BUILDDIR)/lfs.test.csv)
+
+## Build the bench-runner
+.PHONY: bench-runner build-bench
+ifdef YES_COV
+bench-runner build-bench: CFLAGS+=--coverage
+endif
+ifdef YES_PERF
+bench-runner build-bench: CFLAGS+=-fno-omit-frame-pointer
+endif
+ifndef NO_PERFBD
+bench-runner build-bench: CFLAGS+=-fno-omit-frame-pointer
+endif
+# note we remove some binary dependent files during compilation,
+# otherwise it's way to easy to end up with outdated results
+bench-runner build-bench: $(BENCH_RUNNER)
+ifdef YES_COV
+ rm -f $(BENCH_GCDA)
+endif
+ifdef YES_PERF
+ rm -f $(BENCH_PERF)
+endif
+ifndef NO_PERFBD
+ rm -f $(BENCH_TRACE)
+endif
+
+## Run the benchmarks, -j enables parallel benchmarks
+.PHONY: bench
+bench: bench-runner
+ ./scripts/bench.py $(BENCH_RUNNER) $(BENCHFLAGS)
+
+## List the benchmarks
+.PHONY: bench-list
+bench-list: bench-runner
+ ./scripts/bench.py $(BENCH_RUNNER) $(BENCHFLAGS) -l
+
+## Summarize the benchmarks
+.PHONY: benchmarks
+benchmarks: SUMMARYFLAGS+=-Serased -Sproged -Sreaded
+benchmarks: $(BENCH_CSV) $(BUILDDIR)/lfs.bench.csv
+ $(strip ./scripts/summary.py $(BENCH_CSV) \
+ -bsuite \
+ -freaded=bench_readed \
+ -fproged=bench_proged \
+ -ferased=bench_erased \
+ $(SUMMARYFLAGS))
+
+## Compare benchmarks against a previous run
+.PHONY: benchmarks-diff
+benchmarks-diff: $(BENCH_CSV)
+ $(strip ./scripts/summary.py $^ \
+ -bsuite \
+ -freaded=bench_readed \
+ -fproged=bench_proged \
+ -ferased=bench_erased \
+ $(SUMMARYFLAGS) -d $(BUILDDIR)/lfs.bench.csv)
-.PHONY: summary
-summary: $(BUILDDIR)lfs.csv
- ./scripts/summary.py -Y $^ $(SUMMARYFLAGS)
# rules
-include $(DEP)
+-include $(TEST_DEP)
.SUFFIXES:
+.SECONDARY:
-$(BUILDDIR)lfs: $(OBJ)
+$(BUILDDIR)/lfs: $(OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
-$(BUILDDIR)lfs.a: $(OBJ)
+$(BUILDDIR)/liblfs.a: $(OBJ)
$(AR) rcs $@ $^
-$(BUILDDIR)lfs.csv: $(OBJ) $(CGI)
- ./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@
- ./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@
- ./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@
- ./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@
- $(if $(COVERAGE),\
- ./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \
- -q -m $@ $(COVERAGEFLAGS) -o $@)
+$(BUILDDIR)/lfs.code.csv: $(OBJ)
+ ./scripts/code.py $^ -q $(CODEFLAGS) -o $@
+
+$(BUILDDIR)/lfs.data.csv: $(OBJ)
+ ./scripts/data.py $^ -q $(DATAFLAGS) -o $@
+
+$(BUILDDIR)/lfs.stack.csv: $(CI)
+ ./scripts/stack.py $^ -q $(STACKFLAGS) -o $@
+
+$(BUILDDIR)/lfs.structs.csv: $(OBJ)
+ ./scripts/structs.py $^ -q $(STRUCTSFLAGS) -o $@
+
+$(BUILDDIR)/lfs.cov.csv: $(GCDA)
+ $(strip ./scripts/cov.py $^ \
+ $(patsubst %,-F%,$(SRC)) \
+ -q $(COVFLAGS) -o $@)
+
+$(BUILDDIR)/lfs.perf.csv: $(BENCH_PERF)
+ $(strip ./scripts/perf.py $^ \
+ $(patsubst %,-F%,$(SRC)) \
+ -q $(PERFFLAGS) -o $@)
+
+$(BUILDDIR)/lfs.perfbd.csv: $(BENCH_TRACE)
+ $(strip ./scripts/perfbd.py $(BENCH_RUNNER) $^ \
+ $(patsubst %,-F%,$(SRC)) \
+ -q $(PERFBDFLAGS) -o $@)
+
+$(BUILDDIR)/lfs.test.csv: $(TEST_CSV)
+ cp $^ $@
+
+$(BUILDDIR)/lfs.bench.csv: $(BENCH_CSV)
+ cp $^ $@
-$(BUILDDIR)%.o: %.c
- $(CC) -c -MMD $(CFLAGS) $< -o $@
+$(BUILDDIR)/runners/test_runner: $(TEST_OBJ)
+ $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
+
+$(BUILDDIR)/runners/bench_runner: $(BENCH_OBJ)
+ $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
+
+# our main build rule generates .o, .d, and .ci files, the latter
+# used for stack analysis
+$(BUILDDIR)/%.o $(BUILDDIR)/%.ci: %.c
+ $(CC) -c -MMD $(CFLAGS) $< -o $(BUILDDIR)/$*.o
+
+$(BUILDDIR)/%.o $(BUILDDIR)/%.ci: $(BUILDDIR)/%.c
+ $(CC) -c -MMD $(CFLAGS) $< -o $(BUILDDIR)/$*.o
-$(BUILDDIR)%.s: %.c
+$(BUILDDIR)/%.s: %.c
$(CC) -S $(CFLAGS) $< -o $@
-# gcc depends on the output file for intermediate file names, so
-# we can't omit to .o output. We also need to serialize with the
-# normal .o rule because otherwise we can end up with multiprocess
-# problems with two instances of gcc modifying the same .o
-$(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o
- $(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $|
+$(BUILDDIR)/%.c: %.a.c
+ ./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
+
+$(BUILDDIR)/%.c: $(BUILDDIR)/%.a.c
+ ./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
+
+$(BUILDDIR)/%.t.a.c: %.toml
+ ./scripts/test.py -c $< $(TESTCFLAGS) -o $@
+
+$(BUILDDIR)/%.t.a.c: %.c $(TESTS)
+ ./scripts/test.py -c $(TESTS) -s $< $(TESTCFLAGS) -o $@
+
+$(BUILDDIR)/%.b.a.c: %.toml
+ ./scripts/bench.py -c $< $(BENCHCFLAGS) -o $@
+
+$(BUILDDIR)/%.b.a.c: %.c $(BENCHES)
+ ./scripts/bench.py -c $(BENCHES) -s $< $(BENCHCFLAGS) -o $@
-# clean everything
+## Clean everything
.PHONY: clean
clean:
- rm -f $(BUILDDIR)lfs
- rm -f $(BUILDDIR)lfs.a
- rm -f $(BUILDDIR)lfs.csv
+ rm -f $(BUILDDIR)/lfs
+ rm -f $(BUILDDIR)/liblfs.a
+ rm -f $(BUILDDIR)/lfs.code.csv
+ rm -f $(BUILDDIR)/lfs.data.csv
+ rm -f $(BUILDDIR)/lfs.stack.csv
+ rm -f $(BUILDDIR)/lfs.structs.csv
+ rm -f $(BUILDDIR)/lfs.cov.csv
+ rm -f $(BUILDDIR)/lfs.perf.csv
+ rm -f $(BUILDDIR)/lfs.perfbd.csv
+ rm -f $(BUILDDIR)/lfs.test.csv
+ rm -f $(BUILDDIR)/lfs.bench.csv
rm -f $(OBJ)
- rm -f $(CGI)
rm -f $(DEP)
rm -f $(ASM)
- rm -f $(BUILDDIR)tests/*.toml.*
+ rm -f $(CI)
+ rm -f $(TEST_RUNNER)
+ rm -f $(TEST_A)
+ rm -f $(TEST_C)
+ rm -f $(TEST_OBJ)
+ rm -f $(TEST_DEP)
+ rm -f $(TEST_CI)
+ rm -f $(TEST_GCNO)
+ rm -f $(TEST_GCDA)
+ rm -f $(TEST_PERF)
+ rm -f $(TEST_TRACE)
+ rm -f $(TEST_CSV)
+ rm -f $(BENCH_RUNNER)
+ rm -f $(BENCH_A)
+ rm -f $(BENCH_C)
+ rm -f $(BENCH_OBJ)
+ rm -f $(BENCH_DEP)
+ rm -f $(BENCH_CI)
+ rm -f $(BENCH_GCNO)
+ rm -f $(BENCH_GCDA)
+ rm -f $(BENCH_PERF)
+ rm -f $(BENCH_TRACE)
+ rm -f $(BENCH_CSV)
diff --git a/README.md b/README.md
index 32b3793f..3afddfdd 100644
--- a/README.md
+++ b/README.md
@@ -226,6 +226,13 @@ License Identifiers that are here available: http://spdx.org/licenses/
to create images of the filesystem on your PC. Check if littlefs will fit
your needs, create images for a later download to the target memory or
inspect the content of a binary image of the target memory.
+
+- [littlefs2-rust] - A Rust wrapper for littlefs. This project allows you
+ to use littlefs in a Rust-friendly API, reaping the benefits of Rust's memory
+ safety and other guarantees.
+
+- [littlefs-disk-img-viewer] - A memory-efficient web application for viewing
+ littlefs disk images in your web browser.
- [mklfs] - A command line tool built by the [Lua RTOS] guys for making
littlefs images from a host PC. Supports Windows, Mac OS, and Linux.
@@ -243,8 +250,12 @@ License Identifiers that are here available: http://spdx.org/licenses/
MCUs. It offers static wear-leveling and power-resilience with only a fixed
_O(|address|)_ pointer structure stored on each block and in RAM.
+- [chamelon] - A pure-OCaml implementation of (most of) littlefs, designed for
+ use with the MirageOS library operating system project. It is interoperable
+ with the reference implementation, with some caveats.
[BSD-3-Clause]: https://spdx.org/licenses/BSD-3-Clause.html
+[littlefs-disk-img-viewer]: https://github.com/tniessen/littlefs-disk-img-viewer
[littlefs-fuse]: https://github.com/geky/littlefs-fuse
[FUSE]: https://github.com/libfuse/libfuse
[littlefs-js]: https://github.com/geky/littlefs-js
@@ -256,3 +267,5 @@ License Identifiers that are here available: http://spdx.org/licenses/
[SPIFFS]: https://github.com/pellepl/spiffs
[Dhara]: https://github.com/dlbeer/dhara
[littlefs-python]: https://pypi.org/project/littlefs-python/
+[littlefs2-rust]: https://crates.io/crates/littlefs2
+[chamelon]: https://github.com/yomimono/chamelon
diff --git a/SPEC.md b/SPEC.md
index 3663ea54..2370ea6d 100644
--- a/SPEC.md
+++ b/SPEC.md
@@ -1,10 +1,10 @@
## littlefs technical specification
-This is the technical specification of the little filesystem. This document
-covers the technical details of how the littlefs is stored on disk for
-introspection and tooling. This document assumes you are familiar with the
-design of the littlefs, for more info on how littlefs works check
-out [DESIGN.md](DESIGN.md).
+This is the technical specification of the little filesystem with on-disk
+version lfs2.1. This document covers the technical details of how the littlefs
+is stored on disk for introspection and tooling. This document assumes you are
+familiar with the design of the littlefs, for more info on how littlefs works
+check out [DESIGN.md](DESIGN.md).
```
| | | .---._____
@@ -133,12 +133,6 @@ tags XORed together, starting with `0xffffffff`.
'-------------------' '-------------------'
```
-One last thing to note before we get into the details around tag encoding. Each
-tag contains a valid bit used to indicate if the tag and containing commit is
-valid. This valid bit is the first bit found in the tag and the commit and can
-be used to tell if we've attempted to write to the remaining space in the
-block.
-
Here's a more complete example of metadata block containing 4 entries:
```
@@ -191,6 +185,53 @@ Here's a more complete example of metadata block containing 4 entries:
'---- most recent D
```
+Two things to note before we get into the details around tag encoding:
+
+1. Each tag contains a valid bit used to indicate if the tag and containing
+ commit is valid. After XORing, this bit should always be zero.
+
+ At the end of each commit, the valid bit of the previous tag is XORed
+ with the lowest bit in the type field of the CRC tag. This allows
+ the CRC tag to force the next commit to fail the valid bit test if it
+ has not yet been written to.
+
+2. The valid bit alone is not enough info to know if the next commit has been
+ erased. We don't know the order bits will be programmed in a program block,
+ so it's possible that the next commit had an attempted program that left the
+ valid bit unchanged.
+
+ To ensure we only ever program erased bytes, each commit can contain an
+ optional forward-CRC (FCRC). An FCRC contains a checksum of some amount of
+ bytes in the next commit at the time it was erased.
+
+ ```
+ .-------------------. \ \
+ | revision count | | |
+ |-------------------| | |
+ | metadata | | |
+ | | +---. +-- current commit
+ | | | | |
+ |-------------------| | | |
+ | FCRC ---|-. | |
+ |-------------------| / | | |
+ | CRC -----|-' /
+ |-------------------| |
+ | padding | | padding (does't need CRC)
+ | | |
+ |-------------------| \ | \
+ | erased? | +-' |
+ | | | | +-- next commit
+ | v | / |
+ | | /
+ | |
+ '-------------------'
+ ```
+
+ If the FCRC is missing or the checksum does not match, we must assume a
+ commit was attempted but failed due to power-loss.
+
+ Note that end-of-block commits do not need an FCRC.
+
## Metadata tags
So in littlefs, 32-bit tags describe every type of metadata. And this means
@@ -785,3 +826,41 @@ CRC fields:
are made about the contents.
---
+#### `0x5ff` LFS_TYPE_FCRC
+
+Added in lfs2.1, the optional FCRC tag contains a checksum of some amount of
+bytes in the next commit at the time it was erased. This allows us to ensure
+that we only ever program erased bytes, even if a previous commit failed due
+to power-loss.
+
+When programming a commit, the FCRC size must be at least as large as the
+program block size. However, the program block is not saved on disk, and can
+change between mounts, so the FCRC size on disk may be different than the
+current program block size.
+
+If the FCRC is missing or the checksum does not match, we must assume a
+commit was attempted but failed due to power-loss.
+
+Layout of the FCRC tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --]
+ ^ ^ ^ ^ ^- fcrc size ^- fcrc
+ | | | '- size (8)
+ | | '------ id (0x3ff)
+ | '------------ type (0x5ff)
+ '----------------- valid bit
+```
+
+FCRC fields:
+
+1. **FCRC size (32-bits)** - Number of bytes after this commit's CRC tag's
+ padding to include in the FCRC.
+
+2. **FCRC (32-bits)** - CRC of the bytes after this commit's CRC tag's padding
+ when erased. Like the CRC tag, this uses a CRC-32 with a polynomial of
+ `0x04c11db7` initialized with `0xffffffff`.
+
+---
diff --git a/bd/lfs_emubd.c b/bd/lfs_emubd.c
new file mode 100644
index 00000000..29925538
--- /dev/null
+++ b/bd/lfs_emubd.c
@@ -0,0 +1,662 @@
+/*
+ * Emulating block device, wraps filebd and rambd while providing a bunch
+ * of hooks for testing littlefs in various conditions.
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199309L
+#endif
+
+#include "bd/lfs_emubd.h"
+
+#include
+#include
+#include
+#include
+#include
+
+#ifdef _WIN32
+#include
+#endif
+
+
+// access to lazily-allocated/copy-on-write blocks
+//
+// Note we can only modify a block if we have exclusive access to it (rc == 1)
+//
+
+static lfs_emubd_block_t *lfs_emubd_incblock(lfs_emubd_block_t *block) {
+ if (block) {
+ block->rc += 1;
+ }
+ return block;
+}
+
+static void lfs_emubd_decblock(lfs_emubd_block_t *block) {
+ if (block) {
+ block->rc -= 1;
+ if (block->rc == 0) {
+ free(block);
+ }
+ }
+}
+
+static lfs_emubd_block_t *lfs_emubd_mutblock(
+ const struct lfs_config *cfg,
+ lfs_emubd_block_t **block) {
+ lfs_emubd_block_t *block_ = *block;
+ if (block_ && block_->rc == 1) {
+ // rc == 1? can modify
+ return block_;
+
+ } else if (block_) {
+ // rc > 1? need to create a copy
+ lfs_emubd_block_t *nblock = malloc(
+ sizeof(lfs_emubd_block_t) + cfg->block_size);
+ if (!nblock) {
+ return NULL;
+ }
+
+ memcpy(nblock, block_,
+ sizeof(lfs_emubd_block_t) + cfg->block_size);
+ nblock->rc = 1;
+
+ lfs_emubd_decblock(block_);
+ *block = nblock;
+ return nblock;
+
+ } else {
+ // no block? need to allocate
+ lfs_emubd_block_t *nblock = malloc(
+ sizeof(lfs_emubd_block_t) + cfg->block_size);
+ if (!nblock) {
+ return NULL;
+ }
+
+ nblock->rc = 1;
+ nblock->wear = 0;
+
+ // zero for consistency
+ lfs_emubd_t *bd = cfg->context;
+ memset(nblock->data,
+ (bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0,
+ cfg->block_size);
+
+ *block = nblock;
+ return nblock;
+ }
+}
+
+
+// emubd create/destroy
+
+int lfs_emubd_createcfg(const struct lfs_config *cfg, const char *path,
+ const struct lfs_emubd_config *bdcfg) {
+ LFS_EMUBD_TRACE("lfs_emubd_createcfg(%p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
+ "\"%s\", "
+ "%p {.erase_value=%"PRId32", .erase_cycles=%"PRIu32", "
+ ".badblock_behavior=%"PRIu8", .power_cycles=%"PRIu32", "
+ ".powerloss_behavior=%"PRIu8", .powerloss_cb=%p, "
+ ".powerloss_data=%p, .track_branches=%d})",
+ (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ path, (void*)bdcfg, bdcfg->erase_value, bdcfg->erase_cycles,
+ bdcfg->badblock_behavior, bdcfg->power_cycles,
+ bdcfg->powerloss_behavior, (void*)(uintptr_t)bdcfg->powerloss_cb,
+ bdcfg->powerloss_data, bdcfg->track_branches);
+ lfs_emubd_t *bd = cfg->context;
+ bd->cfg = bdcfg;
+
+ // allocate our block array, all blocks start as uninitialized
+ bd->blocks = malloc(cfg->block_count * sizeof(lfs_emubd_block_t*));
+ if (!bd->blocks) {
+ LFS_EMUBD_TRACE("lfs_emubd_createcfg -> %d", LFS_ERR_NOMEM);
+ return LFS_ERR_NOMEM;
+ }
+ memset(bd->blocks, 0, cfg->block_count * sizeof(lfs_emubd_block_t*));
+
+ // setup testing things
+ bd->readed = 0;
+ bd->proged = 0;
+ bd->erased = 0;
+ bd->power_cycles = bd->cfg->power_cycles;
+ bd->disk = NULL;
+
+ if (bd->cfg->disk_path) {
+ bd->disk = malloc(sizeof(lfs_emubd_disk_t));
+ if (!bd->disk) {
+ LFS_EMUBD_TRACE("lfs_emubd_createcfg -> %d", LFS_ERR_NOMEM);
+ return LFS_ERR_NOMEM;
+ }
+ bd->disk->rc = 1;
+ bd->disk->scratch = NULL;
+
+ #ifdef _WIN32
+ bd->disk->fd = open(bd->cfg->disk_path,
+ O_RDWR | O_CREAT | O_BINARY, 0666);
+ #else
+ bd->disk->fd = open(bd->cfg->disk_path,
+ O_RDWR | O_CREAT, 0666);
+ #endif
+ if (bd->disk->fd < 0) {
+ int err = -errno;
+ LFS_EMUBD_TRACE("lfs_emubd_create -> %d", err);
+ return err;
+ }
+
+ // if we're emulating erase values, we can keep a block around in
+ // memory of just the erase state to speed up emulated erases
+ if (bd->cfg->erase_value != -1) {
+ bd->disk->scratch = malloc(cfg->block_size);
+ if (!bd->disk->scratch) {
+ LFS_EMUBD_TRACE("lfs_emubd_createcfg -> %d", LFS_ERR_NOMEM);
+ return LFS_ERR_NOMEM;
+ }
+ memset(bd->disk->scratch,
+ bd->cfg->erase_value,
+ cfg->block_size);
+
+ // go ahead and erase all of the disk, otherwise the file will not
+ // match our internal representation
+ for (size_t i = 0; i < cfg->block_count; i++) {
+ ssize_t res = write(bd->disk->fd,
+ bd->disk->scratch,
+ cfg->block_size);
+ if (res < 0) {
+ int err = -errno;
+ LFS_EMUBD_TRACE("lfs_emubd_create -> %d", err);
+ return err;
+ }
+ }
+ }
+ }
+
+ LFS_EMUBD_TRACE("lfs_emubd_createcfg -> %d", 0);
+ return 0;
+}
+
+int lfs_emubd_create(const struct lfs_config *cfg, const char *path) {
+ LFS_EMUBD_TRACE("lfs_emubd_create(%p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
+ "\"%s\")",
+ (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ path);
+ static const struct lfs_emubd_config defaults = {.erase_value=-1};
+ int err = lfs_emubd_createcfg(cfg, path, &defaults);
+ LFS_EMUBD_TRACE("lfs_emubd_create -> %d", err);
+ return err;
+}
+
+int lfs_emubd_destroy(const struct lfs_config *cfg) {
+ LFS_EMUBD_TRACE("lfs_emubd_destroy(%p)", (void*)cfg);
+ lfs_emubd_t *bd = cfg->context;
+
+ // decrement reference counts
+ for (lfs_block_t i = 0; i < cfg->block_count; i++) {
+ lfs_emubd_decblock(bd->blocks[i]);
+ }
+ free(bd->blocks);
+
+ // clean up other resources
+ if (bd->disk) {
+ bd->disk->rc -= 1;
+ if (bd->disk->rc == 0) {
+ close(bd->disk->fd);
+ free(bd->disk->scratch);
+ free(bd->disk);
+ }
+ }
+
+ LFS_EMUBD_TRACE("lfs_emubd_destroy -> %d", 0);
+ return 0;
+}
+
+
+
+// block device API
+
+int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size) {
+ LFS_EMUBD_TRACE("lfs_emubd_read(%p, "
+ "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
+ (void*)cfg, block, off, buffer, size);
+ lfs_emubd_t *bd = cfg->context;
+
+ // check if read is valid
+ LFS_ASSERT(block < cfg->block_count);
+ LFS_ASSERT(off % cfg->read_size == 0);
+ LFS_ASSERT(size % cfg->read_size == 0);
+ LFS_ASSERT(off+size <= cfg->block_size);
+
+ // get the block
+ const lfs_emubd_block_t *b = bd->blocks[block];
+ if (b) {
+ // block bad?
+ if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles &&
+ bd->cfg->badblock_behavior == LFS_EMUBD_BADBLOCK_READERROR) {
+ LFS_EMUBD_TRACE("lfs_emubd_read -> %d", LFS_ERR_CORRUPT);
+ return LFS_ERR_CORRUPT;
+ }
+
+ // read data
+ memcpy(buffer, &b->data[off], size);
+ } else {
+ // zero for consistency
+ memset(buffer,
+ (bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0,
+ size);
+ }
+
+ // track reads
+ bd->readed += size;
+ if (bd->cfg->read_sleep) {
+ int err = nanosleep(&(struct timespec){
+ .tv_sec=bd->cfg->read_sleep/1000000000,
+ .tv_nsec=bd->cfg->read_sleep%1000000000},
+ NULL);
+ if (err) {
+ err = -errno;
+ LFS_EMUBD_TRACE("lfs_emubd_read -> %d", err);
+ return err;
+ }
+ }
+
+ LFS_EMUBD_TRACE("lfs_emubd_read -> %d", 0);
+ return 0;
+}
+
+int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size) {
+ LFS_EMUBD_TRACE("lfs_emubd_prog(%p, "
+ "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
+ (void*)cfg, block, off, buffer, size);
+ lfs_emubd_t *bd = cfg->context;
+
+ // check if write is valid
+ LFS_ASSERT(block < cfg->block_count);
+ LFS_ASSERT(off % cfg->prog_size == 0);
+ LFS_ASSERT(size % cfg->prog_size == 0);
+ LFS_ASSERT(off+size <= cfg->block_size);
+
+ // get the block
+ lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
+ if (!b) {
+ LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", LFS_ERR_NOMEM);
+ return LFS_ERR_NOMEM;
+ }
+
+ // block bad?
+ if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles) {
+ if (bd->cfg->badblock_behavior ==
+ LFS_EMUBD_BADBLOCK_PROGERROR) {
+ LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", LFS_ERR_CORRUPT);
+ return LFS_ERR_CORRUPT;
+ } else if (bd->cfg->badblock_behavior ==
+ LFS_EMUBD_BADBLOCK_PROGNOOP ||
+ bd->cfg->badblock_behavior ==
+ LFS_EMUBD_BADBLOCK_ERASENOOP) {
+ LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", 0);
+ return 0;
+ }
+ }
+
+ // were we erased properly?
+ if (bd->cfg->erase_value != -1) {
+ for (lfs_off_t i = 0; i < size; i++) {
+ LFS_ASSERT(b->data[off+i] == bd->cfg->erase_value);
+ }
+ }
+
+ // prog data
+ memcpy(&b->data[off], buffer, size);
+
+ // mirror to disk file?
+ if (bd->disk) {
+ off_t res1 = lseek(bd->disk->fd,
+ (off_t)block*cfg->block_size + (off_t)off,
+ SEEK_SET);
+ if (res1 < 0) {
+ int err = -errno;
+ LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err);
+ return err;
+ }
+
+ ssize_t res2 = write(bd->disk->fd, buffer, size);
+ if (res2 < 0) {
+ int err = -errno;
+ LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err);
+ return err;
+ }
+ }
+
+ // track progs
+ bd->proged += size;
+ if (bd->cfg->prog_sleep) {
+ int err = nanosleep(&(struct timespec){
+ .tv_sec=bd->cfg->prog_sleep/1000000000,
+ .tv_nsec=bd->cfg->prog_sleep%1000000000},
+ NULL);
+ if (err) {
+ err = -errno;
+ LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err);
+ return err;
+ }
+ }
+
+ // lose power?
+ if (bd->power_cycles > 0) {
+ bd->power_cycles -= 1;
+ if (bd->power_cycles == 0) {
+ // simulate power loss
+ bd->cfg->powerloss_cb(bd->cfg->powerloss_data);
+ }
+ }
+
+ LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", 0);
+ return 0;
+}
+
+int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block) {
+ LFS_EMUBD_TRACE("lfs_emubd_erase(%p, 0x%"PRIx32" (%"PRIu32"))",
+ (void*)cfg, block, cfg->block_size);
+ lfs_emubd_t *bd = cfg->context;
+
+ // check if erase is valid
+ LFS_ASSERT(block < cfg->block_count);
+
+ // get the block
+ lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
+ if (!b) {
+ LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", LFS_ERR_NOMEM);
+ return LFS_ERR_NOMEM;
+ }
+
+ // block bad?
+ if (bd->cfg->erase_cycles) {
+ if (b->wear >= bd->cfg->erase_cycles) {
+ if (bd->cfg->badblock_behavior ==
+ LFS_EMUBD_BADBLOCK_ERASEERROR) {
+ LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", LFS_ERR_CORRUPT);
+ return LFS_ERR_CORRUPT;
+ } else if (bd->cfg->badblock_behavior ==
+ LFS_EMUBD_BADBLOCK_ERASENOOP) {
+ LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", 0);
+ return 0;
+ }
+ } else {
+ // mark wear
+ b->wear += 1;
+ }
+ }
+
+ // emulate an erase value?
+ if (bd->cfg->erase_value != -1) {
+ memset(b->data, bd->cfg->erase_value, cfg->block_size);
+
+ // mirror to disk file?
+ if (bd->disk) {
+ off_t res1 = lseek(bd->disk->fd,
+ (off_t)block*cfg->block_size,
+ SEEK_SET);
+ if (res1 < 0) {
+ int err = -errno;
+ LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err);
+ return err;
+ }
+
+ ssize_t res2 = write(bd->disk->fd,
+ bd->disk->scratch,
+ cfg->block_size);
+ if (res2 < 0) {
+ int err = -errno;
+ LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err);
+ return err;
+ }
+ }
+ }
+
+ // track erases
+ bd->erased += cfg->block_size;
+ if (bd->cfg->erase_sleep) {
+ int err = nanosleep(&(struct timespec){
+ .tv_sec=bd->cfg->erase_sleep/1000000000,
+ .tv_nsec=bd->cfg->erase_sleep%1000000000},
+ NULL);
+ if (err) {
+ err = -errno;
+ LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err);
+ return err;
+ }
+ }
+
+ // lose power?
+ if (bd->power_cycles > 0) {
+ bd->power_cycles -= 1;
+ if (bd->power_cycles == 0) {
+ // simulate power loss
+ bd->cfg->powerloss_cb(bd->cfg->powerloss_data);
+ }
+ }
+
+ LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", 0);
+ return 0;
+}
+
+int lfs_emubd_sync(const struct lfs_config *cfg) {
+ LFS_EMUBD_TRACE("lfs_emubd_sync(%p)", (void*)cfg);
+
+ // do nothing
+ (void)cfg;
+
+ LFS_EMUBD_TRACE("lfs_emubd_sync -> %d", 0);
+ return 0;
+}
+
+/// Additional extended API for driving test features ///
+
+static int lfs_emubd_rawcrc(const struct lfs_config *cfg,
+ lfs_block_t block, uint32_t *crc) {
+ lfs_emubd_t *bd = cfg->context;
+
+ // check if crc is valid
+ LFS_ASSERT(block < cfg->block_count);
+
+ // crc the block
+ uint32_t crc_ = 0xffffffff;
+ const lfs_emubd_block_t *b = bd->blocks[block];
+ if (b) {
+ crc_ = lfs_crc(crc_, b->data, cfg->block_size);
+ } else {
+ uint8_t erase_value = (bd->cfg->erase_value != -1)
+ ? bd->cfg->erase_value
+ : 0;
+ for (lfs_size_t i = 0; i < cfg->block_size; i++) {
+ crc_ = lfs_crc(crc_, &erase_value, 1);
+ }
+ }
+ *crc = 0xffffffff ^ crc_;
+
+ return 0;
+}
+
+int lfs_emubd_crc(const struct lfs_config *cfg,
+ lfs_block_t block, uint32_t *crc) {
+ LFS_EMUBD_TRACE("lfs_emubd_crc(%p, %"PRIu32", %p)",
+ (void*)cfg, block, crc);
+ int err = lfs_emubd_rawcrc(cfg, block, crc);
+ LFS_EMUBD_TRACE("lfs_emubd_crc -> %d", err);
+ return err;
+}
+
+int lfs_emubd_bdcrc(const struct lfs_config *cfg, uint32_t *crc) {
+ LFS_EMUBD_TRACE("lfs_emubd_bdcrc(%p, %p)", (void*)cfg, crc);
+
+ uint32_t crc_ = 0xffffffff;
+ for (lfs_block_t i = 0; i < cfg->block_count; i++) {
+ uint32_t i_crc;
+ int err = lfs_emubd_rawcrc(cfg, i, &i_crc);
+ if (err) {
+ LFS_EMUBD_TRACE("lfs_emubd_bdcrc -> %d", err);
+ return err;
+ }
+
+ crc_ = lfs_crc(crc_, &i_crc, sizeof(uint32_t));
+ }
+ *crc = 0xffffffff ^ crc_;
+
+ LFS_EMUBD_TRACE("lfs_emubd_bdcrc -> %d", 0);
+ return 0;
+}
+
+lfs_emubd_sio_t lfs_emubd_readed(const struct lfs_config *cfg) {
+ LFS_EMUBD_TRACE("lfs_emubd_readed(%p)", (void*)cfg);
+ lfs_emubd_t *bd = cfg->context;
+ LFS_EMUBD_TRACE("lfs_emubd_readed -> %"PRIu64, bd->readed);
+ return bd->readed;
+}
+
+lfs_emubd_sio_t lfs_emubd_proged(const struct lfs_config *cfg) {
+ LFS_EMUBD_TRACE("lfs_emubd_proged(%p)", (void*)cfg);
+ lfs_emubd_t *bd = cfg->context;
+ LFS_EMUBD_TRACE("lfs_emubd_proged -> %"PRIu64, bd->proged);
+ return bd->proged;
+}
+
+lfs_emubd_sio_t lfs_emubd_erased(const struct lfs_config *cfg) {
+ LFS_EMUBD_TRACE("lfs_emubd_erased(%p)", (void*)cfg);
+ lfs_emubd_t *bd = cfg->context;
+ LFS_EMUBD_TRACE("lfs_emubd_erased -> %"PRIu64, bd->erased);
+ return bd->erased;
+}
+
+int lfs_emubd_setreaded(const struct lfs_config *cfg, lfs_emubd_io_t readed) {
+ LFS_EMUBD_TRACE("lfs_emubd_setreaded(%p, %"PRIu64")", (void*)cfg, readed);
+ lfs_emubd_t *bd = cfg->context;
+ bd->readed = readed;
+ LFS_EMUBD_TRACE("lfs_emubd_setreaded -> %d", 0);
+ return 0;
+}
+
+int lfs_emubd_setproged(const struct lfs_config *cfg, lfs_emubd_io_t proged) {
+ LFS_EMUBD_TRACE("lfs_emubd_setproged(%p, %"PRIu64")", (void*)cfg, proged);
+ lfs_emubd_t *bd = cfg->context;
+ bd->proged = proged;
+ LFS_EMUBD_TRACE("lfs_emubd_setproged -> %d", 0);
+ return 0;
+}
+
+int lfs_emubd_seterased(const struct lfs_config *cfg, lfs_emubd_io_t erased) {
+ LFS_EMUBD_TRACE("lfs_emubd_seterased(%p, %"PRIu64")", (void*)cfg, erased);
+ lfs_emubd_t *bd = cfg->context;
+ bd->erased = erased;
+ LFS_EMUBD_TRACE("lfs_emubd_seterased -> %d", 0);
+ return 0;
+}
+
+lfs_emubd_swear_t lfs_emubd_wear(const struct lfs_config *cfg,
+ lfs_block_t block) {
+ LFS_EMUBD_TRACE("lfs_emubd_wear(%p, %"PRIu32")", (void*)cfg, block);
+ lfs_emubd_t *bd = cfg->context;
+
+ // check if block is valid
+ LFS_ASSERT(block < cfg->block_count);
+
+ // get the wear
+ lfs_emubd_wear_t wear;
+ const lfs_emubd_block_t *b = bd->blocks[block];
+ if (b) {
+ wear = b->wear;
+ } else {
+ wear = 0;
+ }
+
+ LFS_EMUBD_TRACE("lfs_emubd_wear -> %"PRIi32, wear);
+ return wear;
+}
+
+int lfs_emubd_setwear(const struct lfs_config *cfg,
+ lfs_block_t block, lfs_emubd_wear_t wear) {
+ LFS_EMUBD_TRACE("lfs_emubd_setwear(%p, %"PRIu32", %"PRIi32")",
+ (void*)cfg, block, wear);
+ lfs_emubd_t *bd = cfg->context;
+
+ // check if block is valid
+ LFS_ASSERT(block < cfg->block_count);
+
+ // set the wear
+ lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
+ if (!b) {
+ LFS_EMUBD_TRACE("lfs_emubd_setwear -> %d", LFS_ERR_NOMEM);
+ return LFS_ERR_NOMEM;
+ }
+ b->wear = wear;
+
+ LFS_EMUBD_TRACE("lfs_emubd_setwear -> %d", 0);
+ return 0;
+}
+
+lfs_emubd_spowercycles_t lfs_emubd_powercycles(
+ const struct lfs_config *cfg) {
+ LFS_EMUBD_TRACE("lfs_emubd_powercycles(%p)", (void*)cfg);
+ lfs_emubd_t *bd = cfg->context;
+
+ LFS_EMUBD_TRACE("lfs_emubd_powercycles -> %"PRIi32, bd->power_cycles);
+ return bd->power_cycles;
+}
+
+int lfs_emubd_setpowercycles(const struct lfs_config *cfg,
+ lfs_emubd_powercycles_t power_cycles) {
+ LFS_EMUBD_TRACE("lfs_emubd_setpowercycles(%p, %"PRIi32")",
+ (void*)cfg, power_cycles);
+ lfs_emubd_t *bd = cfg->context;
+
+ bd->power_cycles = power_cycles;
+
+ LFS_EMUBD_TRACE("lfs_emubd_powercycles -> %d", 0);
+ return 0;
+}
+
+int lfs_emubd_copy(const struct lfs_config *cfg, lfs_emubd_t *copy) {
+ LFS_EMUBD_TRACE("lfs_emubd_copy(%p, %p)", (void*)cfg, (void*)copy);
+ lfs_emubd_t *bd = cfg->context;
+
+ // lazily copy over our block array
+ copy->blocks = malloc(cfg->block_count * sizeof(lfs_emubd_block_t*));
+ if (!copy->blocks) {
+ LFS_EMUBD_TRACE("lfs_emubd_copy -> %d", LFS_ERR_NOMEM);
+ return LFS_ERR_NOMEM;
+ }
+
+ for (size_t i = 0; i < cfg->block_count; i++) {
+ copy->blocks[i] = lfs_emubd_incblock(bd->blocks[i]);
+ }
+
+ // other state
+ copy->readed = bd->readed;
+ copy->proged = bd->proged;
+ copy->erased = bd->erased;
+ copy->power_cycles = bd->power_cycles;
+ copy->disk = bd->disk;
+ if (copy->disk) {
+ copy->disk->rc += 1;
+ }
+ copy->cfg = bd->cfg;
+
+ LFS_EMUBD_TRACE("lfs_emubd_copy -> %d", 0);
+ return 0;
+}
+
diff --git a/bd/lfs_emubd.h b/bd/lfs_emubd.h
new file mode 100644
index 00000000..35a411fe
--- /dev/null
+++ b/bd/lfs_emubd.h
@@ -0,0 +1,233 @@
+/*
+ * Emulating block device, wraps filebd and rambd while providing a bunch
+ * of hooks for testing littlefs in various conditions.
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef LFS_EMUBD_H
+#define LFS_EMUBD_H
+
+#include "lfs.h"
+#include "lfs_util.h"
+#include "bd/lfs_rambd.h"
+#include "bd/lfs_filebd.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+// Block device specific tracing
+#ifndef LFS_EMUBD_TRACE
+#ifdef LFS_EMUBD_YES_TRACE
+#define LFS_EMUBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
+#else
+#define LFS_EMUBD_TRACE(...)
+#endif
+#endif
+
+// Mode determining how "bad-blocks" behave during testing. This simulates
+// some real-world circumstances such as progs not sticking (prog-noop),
+// a readonly disk (erase-noop), and ECC failures (read-error).
+//
+// Not that read-noop is not allowed. Read _must_ return a consistent (but
+// may be arbitrary) value on every read.
+typedef enum lfs_emubd_badblock_behavior {
+ LFS_EMUBD_BADBLOCK_PROGERROR,
+ LFS_EMUBD_BADBLOCK_ERASEERROR,
+ LFS_EMUBD_BADBLOCK_READERROR,
+ LFS_EMUBD_BADBLOCK_PROGNOOP,
+ LFS_EMUBD_BADBLOCK_ERASENOOP,
+} lfs_emubd_badblock_behavior_t;
+
+// Mode determining how power-loss behaves during testing. For now this
+// only supports a noop behavior, leaving the data on-disk untouched.
+typedef enum lfs_emubd_powerloss_behavior {
+ LFS_EMUBD_POWERLOSS_NOOP,
+} lfs_emubd_powerloss_behavior_t;
+
+// Type for measuring read/program/erase operations
+typedef uint64_t lfs_emubd_io_t;
+typedef int64_t lfs_emubd_sio_t;
+
+// Type for measuring wear
+typedef uint32_t lfs_emubd_wear_t;
+typedef int32_t lfs_emubd_swear_t;
+
+// Type for tracking power-cycles
+typedef uint32_t lfs_emubd_powercycles_t;
+typedef int32_t lfs_emubd_spowercycles_t;
+
+// Type for delays in nanoseconds
+typedef uint64_t lfs_emubd_sleep_t;
+typedef int64_t lfs_emubd_ssleep_t;
+
+// emubd config, this is required for testing
+struct lfs_emubd_config {
+ // 8-bit erase value to use for simulating erases. -1 does not simulate
+ // erases, which can speed up testing by avoiding the extra block-device
+ // operations to store the erase value.
+ int32_t erase_value;
+
+ // Number of erase cycles before a block becomes "bad". The exact behavior
+ // of bad blocks is controlled by badblock_behavior.
+ uint32_t erase_cycles;
+
+ // The mode determining how bad-blocks fail
+ lfs_emubd_badblock_behavior_t badblock_behavior;
+
+ // Number of write operations (erase/prog) before triggering a power-loss.
+ // power_cycles=0 disables this. The exact behavior of power-loss is
+ // controlled by a combination of powerloss_behavior and powerloss_cb.
+ lfs_emubd_powercycles_t power_cycles;
+
+ // The mode determining how power-loss affects disk
+ lfs_emubd_powerloss_behavior_t powerloss_behavior;
+
+ // Function to call to emulate power-loss. The exact behavior of power-loss
+ // is up to the runner to provide.
+ void (*powerloss_cb)(void*);
+
+ // Data for power-loss callback
+ void *powerloss_data;
+
+ // True to track when power-loss could have occured. Note this involves
+ // heavy memory usage!
+ bool track_branches;
+
+ // Path to file to use as a mirror of the disk. This provides a way to view
+ // the current state of the block device.
+ const char *disk_path;
+
+ // Artificial delay in nanoseconds, there is no purpose for this other
+ // than slowing down the simulation.
+ lfs_emubd_sleep_t read_sleep;
+
+ // Artificial delay in nanoseconds, there is no purpose for this other
+ // than slowing down the simulation.
+ lfs_emubd_sleep_t prog_sleep;
+
+ // Artificial delay in nanoseconds, there is no purpose for this other
+ // than slowing down the simulation.
+ lfs_emubd_sleep_t erase_sleep;
+};
+
+// A reference counted block
+typedef struct lfs_emubd_block {
+ uint32_t rc;
+ lfs_emubd_wear_t wear;
+
+ uint8_t data[];
+} lfs_emubd_block_t;
+
+// Disk mirror
+typedef struct lfs_emubd_disk {
+ uint32_t rc;
+ int fd;
+ uint8_t *scratch;
+} lfs_emubd_disk_t;
+
+// emubd state
+typedef struct lfs_emubd {
+ // array of copy-on-write blocks
+ lfs_emubd_block_t **blocks;
+
+ // some other test state
+ lfs_emubd_io_t readed;
+ lfs_emubd_io_t proged;
+ lfs_emubd_io_t erased;
+ lfs_emubd_powercycles_t power_cycles;
+ lfs_emubd_disk_t *disk;
+
+ const struct lfs_emubd_config *cfg;
+} lfs_emubd_t;
+
+
+/// Block device API ///
+
+// Create an emulating block device using the geometry in lfs_config
+//
+// Note that filebd is used if a path is provided, if path is NULL
+// emubd will use rambd which can be much faster.
+int lfs_emubd_create(const struct lfs_config *cfg, const char *path);
+int lfs_emubd_createcfg(const struct lfs_config *cfg, const char *path,
+ const struct lfs_emubd_config *bdcfg);
+
+// Clean up memory associated with block device
+int lfs_emubd_destroy(const struct lfs_config *cfg);
+
+// Read a block
+int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size);
+
+// Program a block
+//
+// The block must have previously been erased.
+int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size);
+
+// Erase a block
+//
+// A block must be erased before being programmed. The
+// state of an erased block is undefined.
+int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block);
+
+// Sync the block device
+int lfs_emubd_sync(const struct lfs_config *cfg);
+
+
+/// Additional extended API for driving test features ///
+
+// A CRC of a block for debugging purposes
+int lfs_emubd_crc(const struct lfs_config *cfg,
+ lfs_block_t block, uint32_t *crc);
+
+// A CRC of the entire block device for debugging purposes
+int lfs_emubd_bdcrc(const struct lfs_config *cfg, uint32_t *crc);
+
+// Get total amount of bytes read
+lfs_emubd_sio_t lfs_emubd_readed(const struct lfs_config *cfg);
+
+// Get total amount of bytes programmed
+lfs_emubd_sio_t lfs_emubd_proged(const struct lfs_config *cfg);
+
+// Get total amount of bytes erased
+lfs_emubd_sio_t lfs_emubd_erased(const struct lfs_config *cfg);
+
+// Manually set amount of bytes read
+int lfs_emubd_setreaded(const struct lfs_config *cfg, lfs_emubd_io_t readed);
+
+// Manually set amount of bytes programmed
+int lfs_emubd_setproged(const struct lfs_config *cfg, lfs_emubd_io_t proged);
+
+// Manually set amount of bytes erased
+int lfs_emubd_seterased(const struct lfs_config *cfg, lfs_emubd_io_t erased);
+
+// Get simulated wear on a given block
+lfs_emubd_swear_t lfs_emubd_wear(const struct lfs_config *cfg,
+ lfs_block_t block);
+
+// Manually set simulated wear on a given block
+int lfs_emubd_setwear(const struct lfs_config *cfg,
+ lfs_block_t block, lfs_emubd_wear_t wear);
+
+// Get the remaining power-cycles
+lfs_emubd_spowercycles_t lfs_emubd_powercycles(
+ const struct lfs_config *cfg);
+
+// Manually set the remaining power-cycles
+int lfs_emubd_setpowercycles(const struct lfs_config *cfg,
+ lfs_emubd_powercycles_t power_cycles);
+
+// Create a copy-on-write copy of the state of this block device
+int lfs_emubd_copy(const struct lfs_config *cfg, lfs_emubd_t *copy);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/bd/lfs_filebd.c b/bd/lfs_filebd.c
index ee0c31e2..780c8f90 100644
--- a/bd/lfs_filebd.c
+++ b/bd/lfs_filebd.c
@@ -15,21 +15,18 @@
#include
#endif
-int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
- const struct lfs_filebd_config *bdcfg) {
- LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, "
+int lfs_filebd_create(const struct lfs_config *cfg, const char *path) {
+ LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
- "\"%s\", "
- "%p {.erase_value=%"PRId32"})",
+ "\"%s\")",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
path, (void*)bdcfg, bdcfg->erase_value);
lfs_filebd_t *bd = cfg->context;
- bd->cfg = bdcfg;
// open file
#ifdef _WIN32
@@ -40,31 +37,14 @@ int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
if (bd->fd < 0) {
int err = -errno;
- LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", err);
+ LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err);
return err;
}
- LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", 0);
+ LFS_FILEBD_TRACE("lfs_filebd_create -> %d", 0);
return 0;
}
-int lfs_filebd_create(const struct lfs_config *cfg, const char *path) {
- LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, "
- ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
- ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
- ".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
- "\"%s\")",
- (void*)cfg, cfg->context,
- (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
- (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
- cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
- path);
- static const struct lfs_filebd_config defaults = {.erase_value=-1};
- int err = lfs_filebd_createcfg(cfg, path, &defaults);
- LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err);
- return err;
-}
-
int lfs_filebd_destroy(const struct lfs_config *cfg) {
LFS_FILEBD_TRACE("lfs_filebd_destroy(%p)", (void*)cfg);
lfs_filebd_t *bd = cfg->context;
@@ -86,14 +66,13 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_filebd_t *bd = cfg->context;
// check if read is valid
+ LFS_ASSERT(block < cfg->block_count);
LFS_ASSERT(off % cfg->read_size == 0);
LFS_ASSERT(size % cfg->read_size == 0);
- LFS_ASSERT(block < cfg->block_count);
+ LFS_ASSERT(off+size <= cfg->block_size);
// zero for reproducibility (in case file is truncated)
- if (bd->cfg->erase_value != -1) {
- memset(buffer, bd->cfg->erase_value, size);
- }
+ memset(buffer, 0, size);
// read
off_t res1 = lseek(bd->fd,
@@ -117,37 +96,16 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size) {
- LFS_FILEBD_TRACE("lfs_filebd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
+ LFS_FILEBD_TRACE("lfs_filebd_prog(%p, "
+ "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_filebd_t *bd = cfg->context;
// check if write is valid
+ LFS_ASSERT(block < cfg->block_count);
LFS_ASSERT(off % cfg->prog_size == 0);
LFS_ASSERT(size % cfg->prog_size == 0);
- LFS_ASSERT(block < cfg->block_count);
-
- // check that data was erased? only needed for testing
- if (bd->cfg->erase_value != -1) {
- off_t res1 = lseek(bd->fd,
- (off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
- if (res1 < 0) {
- int err = -errno;
- LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
- return err;
- }
-
- for (lfs_off_t i = 0; i < size; i++) {
- uint8_t c;
- ssize_t res2 = read(bd->fd, &c, 1);
- if (res2 < 0) {
- int err = -errno;
- LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
- return err;
- }
-
- LFS_ASSERT(c == bd->cfg->erase_value);
- }
- }
+ LFS_ASSERT(off+size <= cfg->block_size);
// program data
off_t res1 = lseek(bd->fd,
@@ -170,30 +128,14 @@ int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
}
int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) {
- LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
- lfs_filebd_t *bd = cfg->context;
+ LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32" (%"PRIu32"))",
+ (void*)cfg, block, cfg->block_size);
// check if erase is valid
LFS_ASSERT(block < cfg->block_count);
- // erase, only needed for testing
- if (bd->cfg->erase_value != -1) {
- off_t res1 = lseek(bd->fd, (off_t)block*cfg->block_size, SEEK_SET);
- if (res1 < 0) {
- int err = -errno;
- LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
- return err;
- }
-
- for (lfs_off_t i = 0; i < cfg->block_size; i++) {
- ssize_t res2 = write(bd->fd, &(uint8_t){bd->cfg->erase_value}, 1);
- if (res2 < 0) {
- int err = -errno;
- LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
- return err;
- }
- }
- }
+ // erase is a noop
+ (void)block;
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", 0);
return 0;
@@ -201,6 +143,7 @@ int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) {
int lfs_filebd_sync(const struct lfs_config *cfg) {
LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg);
+
// file sync
lfs_filebd_t *bd = cfg->context;
#ifdef _WIN32
diff --git a/bd/lfs_filebd.h b/bd/lfs_filebd.h
index 1a9456c5..0f24996a 100644
--- a/bd/lfs_filebd.h
+++ b/bd/lfs_filebd.h
@@ -18,31 +18,22 @@ extern "C"
// Block device specific tracing
+#ifndef LFS_FILEBD_TRACE
#ifdef LFS_FILEBD_YES_TRACE
#define LFS_FILEBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
#else
#define LFS_FILEBD_TRACE(...)
#endif
-
-// filebd config (optional)
-struct lfs_filebd_config {
- // 8-bit erase value to use for simulating erases. -1 does not simulate
- // erases, which can speed up testing by avoiding all the extra block-device
- // operations to store the erase value.
- int32_t erase_value;
-};
+#endif
// filebd state
typedef struct lfs_filebd {
int fd;
- const struct lfs_filebd_config *cfg;
} lfs_filebd_t;
// Create a file block device using the geometry in lfs_config
int lfs_filebd_create(const struct lfs_config *cfg, const char *path);
-int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
- const struct lfs_filebd_config *bdcfg);
// Clean up memory associated with block device
int lfs_filebd_destroy(const struct lfs_config *cfg);
diff --git a/bd/lfs_rambd.c b/bd/lfs_rambd.c
index 39bb8150..ab180b93 100644
--- a/bd/lfs_rambd.c
+++ b/bd/lfs_rambd.c
@@ -13,12 +13,12 @@ int lfs_rambd_createcfg(const struct lfs_config *cfg,
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
- "%p {.erase_value=%"PRId32", .buffer=%p})",
+ "%p {.buffer=%p})",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
- (void*)bdcfg, bdcfg->erase_value, bdcfg->buffer);
+ (void*)bdcfg, bdcfg->buffer);
lfs_rambd_t *bd = cfg->context;
bd->cfg = bdcfg;
@@ -33,13 +33,8 @@ int lfs_rambd_createcfg(const struct lfs_config *cfg,
}
}
- // zero for reproducibility?
- if (bd->cfg->erase_value != -1) {
- memset(bd->buffer, bd->cfg->erase_value,
- cfg->block_size * cfg->block_count);
- } else {
- memset(bd->buffer, 0, cfg->block_size * cfg->block_count);
- }
+ // zero for reproducibility
+ memset(bd->buffer, 0, cfg->block_size * cfg->block_count);
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", 0);
return 0;
@@ -54,7 +49,7 @@ int lfs_rambd_create(const struct lfs_config *cfg) {
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count);
- static const struct lfs_rambd_config defaults = {.erase_value=-1};
+ static const struct lfs_rambd_config defaults = {0};
int err = lfs_rambd_createcfg(cfg, &defaults);
LFS_RAMBD_TRACE("lfs_rambd_create -> %d", err);
return err;
@@ -79,9 +74,10 @@ int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_rambd_t *bd = cfg->context;
// check if read is valid
+ LFS_ASSERT(block < cfg->block_count);
LFS_ASSERT(off % cfg->read_size == 0);
LFS_ASSERT(size % cfg->read_size == 0);
- LFS_ASSERT(block < cfg->block_count);
+ LFS_ASSERT(off+size <= cfg->block_size);
// read data
memcpy(buffer, &bd->buffer[block*cfg->block_size + off], size);
@@ -98,17 +94,10 @@ int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_rambd_t *bd = cfg->context;
// check if write is valid
+ LFS_ASSERT(block < cfg->block_count);
LFS_ASSERT(off % cfg->prog_size == 0);
LFS_ASSERT(size % cfg->prog_size == 0);
- LFS_ASSERT(block < cfg->block_count);
-
- // check that data was erased? only needed for testing
- if (bd->cfg->erase_value != -1) {
- for (lfs_off_t i = 0; i < size; i++) {
- LFS_ASSERT(bd->buffer[block*cfg->block_size + off + i] ==
- bd->cfg->erase_value);
- }
- }
+ LFS_ASSERT(off+size <= cfg->block_size);
// program data
memcpy(&bd->buffer[block*cfg->block_size + off], buffer, size);
@@ -118,17 +107,14 @@ int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
}
int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) {
- LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
- lfs_rambd_t *bd = cfg->context;
+ LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32" (%"PRIu32"))",
+ (void*)cfg, block, cfg->block_size);
// check if erase is valid
LFS_ASSERT(block < cfg->block_count);
- // erase, only needed for testing
- if (bd->cfg->erase_value != -1) {
- memset(&bd->buffer[block*cfg->block_size],
- bd->cfg->erase_value, cfg->block_size);
- }
+ // erase is a noop
+ (void)block;
LFS_RAMBD_TRACE("lfs_rambd_erase -> %d", 0);
return 0;
@@ -136,8 +122,10 @@ int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) {
int lfs_rambd_sync(const struct lfs_config *cfg) {
LFS_RAMBD_TRACE("lfs_rambd_sync(%p)", (void*)cfg);
- // sync does nothing because we aren't backed by anything real
+
+ // sync is a noop
(void)cfg;
+
LFS_RAMBD_TRACE("lfs_rambd_sync -> %d", 0);
return 0;
}
diff --git a/bd/lfs_rambd.h b/bd/lfs_rambd.h
index 3a70bc6e..34246802 100644
--- a/bd/lfs_rambd.h
+++ b/bd/lfs_rambd.h
@@ -18,18 +18,16 @@ extern "C"
// Block device specific tracing
+#ifndef LFS_RAMBD_TRACE
#ifdef LFS_RAMBD_YES_TRACE
#define LFS_RAMBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
#else
#define LFS_RAMBD_TRACE(...)
#endif
+#endif
// rambd config (optional)
struct lfs_rambd_config {
- // 8-bit erase value to simulate erasing with. -1 indicates no erase
- // occurs, which is still a valid block device
- int32_t erase_value;
-
// Optional statically allocated buffer for the block device.
void *buffer;
};
diff --git a/bd/lfs_testbd.c b/bd/lfs_testbd.c
deleted file mode 100644
index 1f0877d4..00000000
--- a/bd/lfs_testbd.c
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * Testing block device, wraps filebd and rambd while providing a bunch
- * of hooks for testing littlefs in various conditions.
- *
- * Copyright (c) 2022, The littlefs authors.
- * Copyright (c) 2017, Arm Limited. All rights reserved.
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#include "bd/lfs_testbd.h"
-
-#include
-
-
-int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
- const struct lfs_testbd_config *bdcfg) {
- LFS_TESTBD_TRACE("lfs_testbd_createcfg(%p {.context=%p, "
- ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
- ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
- ".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
- "\"%s\", "
- "%p {.erase_value=%"PRId32", .erase_cycles=%"PRIu32", "
- ".badblock_behavior=%"PRIu8", .power_cycles=%"PRIu32", "
- ".buffer=%p, .wear_buffer=%p})",
- (void*)cfg, cfg->context,
- (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
- (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
- cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
- path, (void*)bdcfg, bdcfg->erase_value, bdcfg->erase_cycles,
- bdcfg->badblock_behavior, bdcfg->power_cycles,
- bdcfg->buffer, bdcfg->wear_buffer);
- lfs_testbd_t *bd = cfg->context;
- bd->cfg = bdcfg;
-
- // setup testing things
- bd->persist = path;
- bd->power_cycles = bd->cfg->power_cycles;
-
- if (bd->cfg->erase_cycles) {
- if (bd->cfg->wear_buffer) {
- bd->wear = bd->cfg->wear_buffer;
- } else {
- bd->wear = lfs_malloc(sizeof(lfs_testbd_wear_t)*cfg->block_count);
- if (!bd->wear) {
- LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", LFS_ERR_NOMEM);
- return LFS_ERR_NOMEM;
- }
- }
-
- memset(bd->wear, 0, sizeof(lfs_testbd_wear_t) * cfg->block_count);
- }
-
- // create underlying block device
- if (bd->persist) {
- bd->u.file.cfg = (struct lfs_filebd_config){
- .erase_value = bd->cfg->erase_value,
- };
- int err = lfs_filebd_createcfg(cfg, path, &bd->u.file.cfg);
- LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err);
- return err;
- } else {
- bd->u.ram.cfg = (struct lfs_rambd_config){
- .erase_value = bd->cfg->erase_value,
- .buffer = bd->cfg->buffer,
- };
- int err = lfs_rambd_createcfg(cfg, &bd->u.ram.cfg);
- LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err);
- return err;
- }
-}
-
-int lfs_testbd_create(const struct lfs_config *cfg, const char *path) {
- LFS_TESTBD_TRACE("lfs_testbd_create(%p {.context=%p, "
- ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
- ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
- ".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
- "\"%s\")",
- (void*)cfg, cfg->context,
- (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
- (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
- cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
- path);
- static const struct lfs_testbd_config defaults = {.erase_value=-1};
- int err = lfs_testbd_createcfg(cfg, path, &defaults);
- LFS_TESTBD_TRACE("lfs_testbd_create -> %d", err);
- return err;
-}
-
-int lfs_testbd_destroy(const struct lfs_config *cfg) {
- LFS_TESTBD_TRACE("lfs_testbd_destroy(%p)", (void*)cfg);
- lfs_testbd_t *bd = cfg->context;
- if (bd->cfg->erase_cycles && !bd->cfg->wear_buffer) {
- lfs_free(bd->wear);
- }
-
- if (bd->persist) {
- int err = lfs_filebd_destroy(cfg);
- LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err);
- return err;
- } else {
- int err = lfs_rambd_destroy(cfg);
- LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err);
- return err;
- }
-}
-
-/// Internal mapping to block devices ///
-static int lfs_testbd_rawread(const struct lfs_config *cfg, lfs_block_t block,
- lfs_off_t off, void *buffer, lfs_size_t size) {
- lfs_testbd_t *bd = cfg->context;
- if (bd->persist) {
- return lfs_filebd_read(cfg, block, off, buffer, size);
- } else {
- return lfs_rambd_read(cfg, block, off, buffer, size);
- }
-}
-
-static int lfs_testbd_rawprog(const struct lfs_config *cfg, lfs_block_t block,
- lfs_off_t off, const void *buffer, lfs_size_t size) {
- lfs_testbd_t *bd = cfg->context;
- if (bd->persist) {
- return lfs_filebd_prog(cfg, block, off, buffer, size);
- } else {
- return lfs_rambd_prog(cfg, block, off, buffer, size);
- }
-}
-
-static int lfs_testbd_rawerase(const struct lfs_config *cfg,
- lfs_block_t block) {
- lfs_testbd_t *bd = cfg->context;
- if (bd->persist) {
- return lfs_filebd_erase(cfg, block);
- } else {
- return lfs_rambd_erase(cfg, block);
- }
-}
-
-static int lfs_testbd_rawsync(const struct lfs_config *cfg) {
- lfs_testbd_t *bd = cfg->context;
- if (bd->persist) {
- return lfs_filebd_sync(cfg);
- } else {
- return lfs_rambd_sync(cfg);
- }
-}
-
-/// block device API ///
-int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
- lfs_off_t off, void *buffer, lfs_size_t size) {
- LFS_TESTBD_TRACE("lfs_testbd_read(%p, "
- "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
- (void*)cfg, block, off, buffer, size);
- lfs_testbd_t *bd = cfg->context;
-
- // check if read is valid
- LFS_ASSERT(off % cfg->read_size == 0);
- LFS_ASSERT(size % cfg->read_size == 0);
- LFS_ASSERT(block < cfg->block_count);
-
- // block bad?
- if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles &&
- bd->cfg->badblock_behavior == LFS_TESTBD_BADBLOCK_READERROR) {
- LFS_TESTBD_TRACE("lfs_testbd_read -> %d", LFS_ERR_CORRUPT);
- return LFS_ERR_CORRUPT;
- }
-
- // read
- int err = lfs_testbd_rawread(cfg, block, off, buffer, size);
- LFS_TESTBD_TRACE("lfs_testbd_read -> %d", err);
- return err;
-}
-
-int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
- lfs_off_t off, const void *buffer, lfs_size_t size) {
- LFS_TESTBD_TRACE("lfs_testbd_prog(%p, "
- "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
- (void*)cfg, block, off, buffer, size);
- lfs_testbd_t *bd = cfg->context;
-
- // check if write is valid
- LFS_ASSERT(off % cfg->prog_size == 0);
- LFS_ASSERT(size % cfg->prog_size == 0);
- LFS_ASSERT(block < cfg->block_count);
-
- // block bad?
- if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles) {
- if (bd->cfg->badblock_behavior ==
- LFS_TESTBD_BADBLOCK_PROGERROR) {
- LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", LFS_ERR_CORRUPT);
- return LFS_ERR_CORRUPT;
- } else if (bd->cfg->badblock_behavior ==
- LFS_TESTBD_BADBLOCK_PROGNOOP ||
- bd->cfg->badblock_behavior ==
- LFS_TESTBD_BADBLOCK_ERASENOOP) {
- LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
- return 0;
- }
- }
-
- // prog
- int err = lfs_testbd_rawprog(cfg, block, off, buffer, size);
- if (err) {
- LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", err);
- return err;
- }
-
- // lose power?
- if (bd->power_cycles > 0) {
- bd->power_cycles -= 1;
- if (bd->power_cycles == 0) {
- // sync to make sure we persist the last changes
- LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0);
- // simulate power loss
- exit(33);
- }
- }
-
- LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
- return 0;
-}
-
-int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
- LFS_TESTBD_TRACE("lfs_testbd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
- lfs_testbd_t *bd = cfg->context;
-
- // check if erase is valid
- LFS_ASSERT(block < cfg->block_count);
-
- // block bad?
- if (bd->cfg->erase_cycles) {
- if (bd->wear[block] >= bd->cfg->erase_cycles) {
- if (bd->cfg->badblock_behavior ==
- LFS_TESTBD_BADBLOCK_ERASEERROR) {
- LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", LFS_ERR_CORRUPT);
- return LFS_ERR_CORRUPT;
- } else if (bd->cfg->badblock_behavior ==
- LFS_TESTBD_BADBLOCK_ERASENOOP) {
- LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", 0);
- return 0;
- }
- } else {
- // mark wear
- bd->wear[block] += 1;
- }
- }
-
- // erase
- int err = lfs_testbd_rawerase(cfg, block);
- if (err) {
- LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", err);
- return err;
- }
-
- // lose power?
- if (bd->power_cycles > 0) {
- bd->power_cycles -= 1;
- if (bd->power_cycles == 0) {
- // sync to make sure we persist the last changes
- LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0);
- // simulate power loss
- exit(33);
- }
- }
-
- LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
- return 0;
-}
-
-int lfs_testbd_sync(const struct lfs_config *cfg) {
- LFS_TESTBD_TRACE("lfs_testbd_sync(%p)", (void*)cfg);
- int err = lfs_testbd_rawsync(cfg);
- LFS_TESTBD_TRACE("lfs_testbd_sync -> %d", err);
- return err;
-}
-
-
-/// simulated wear operations ///
-lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
- lfs_block_t block) {
- LFS_TESTBD_TRACE("lfs_testbd_getwear(%p, %"PRIu32")", (void*)cfg, block);
- lfs_testbd_t *bd = cfg->context;
-
- // check if block is valid
- LFS_ASSERT(bd->cfg->erase_cycles);
- LFS_ASSERT(block < cfg->block_count);
-
- LFS_TESTBD_TRACE("lfs_testbd_getwear -> %"PRIu32, bd->wear[block]);
- return bd->wear[block];
-}
-
-int lfs_testbd_setwear(const struct lfs_config *cfg,
- lfs_block_t block, lfs_testbd_wear_t wear) {
- LFS_TESTBD_TRACE("lfs_testbd_setwear(%p, %"PRIu32")", (void*)cfg, block);
- lfs_testbd_t *bd = cfg->context;
-
- // check if block is valid
- LFS_ASSERT(bd->cfg->erase_cycles);
- LFS_ASSERT(block < cfg->block_count);
-
- bd->wear[block] = wear;
-
- LFS_TESTBD_TRACE("lfs_testbd_setwear -> %d", 0);
- return 0;
-}
diff --git a/bd/lfs_testbd.h b/bd/lfs_testbd.h
deleted file mode 100644
index 61679e5e..00000000
--- a/bd/lfs_testbd.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Testing block device, wraps filebd and rambd while providing a bunch
- * of hooks for testing littlefs in various conditions.
- *
- * Copyright (c) 2022, The littlefs authors.
- * Copyright (c) 2017, Arm Limited. All rights reserved.
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef LFS_TESTBD_H
-#define LFS_TESTBD_H
-
-#include "lfs.h"
-#include "lfs_util.h"
-#include "bd/lfs_rambd.h"
-#include "bd/lfs_filebd.h"
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-
-// Block device specific tracing
-#ifdef LFS_TESTBD_YES_TRACE
-#define LFS_TESTBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
-#else
-#define LFS_TESTBD_TRACE(...)
-#endif
-
-// Mode determining how "bad blocks" behave during testing. This simulates
-// some real-world circumstances such as progs not sticking (prog-noop),
-// a readonly disk (erase-noop), and ECC failures (read-error).
-//
-// Not that read-noop is not allowed. Read _must_ return a consistent (but
-// may be arbitrary) value on every read.
-enum lfs_testbd_badblock_behavior {
- LFS_TESTBD_BADBLOCK_PROGERROR,
- LFS_TESTBD_BADBLOCK_ERASEERROR,
- LFS_TESTBD_BADBLOCK_READERROR,
- LFS_TESTBD_BADBLOCK_PROGNOOP,
- LFS_TESTBD_BADBLOCK_ERASENOOP,
-};
-
-// Type for measuring wear
-typedef uint32_t lfs_testbd_wear_t;
-typedef int32_t lfs_testbd_swear_t;
-
-// testbd config, this is required for testing
-struct lfs_testbd_config {
- // 8-bit erase value to use for simulating erases. -1 does not simulate
- // erases, which can speed up testing by avoiding all the extra block-device
- // operations to store the erase value.
- int32_t erase_value;
-
- // Number of erase cycles before a block becomes "bad". The exact behavior
- // of bad blocks is controlled by the badblock_mode.
- uint32_t erase_cycles;
-
- // The mode determining how bad blocks fail
- uint8_t badblock_behavior;
-
- // Number of write operations (erase/prog) before forcefully killing
- // the program with exit. Simulates power-loss. 0 disables.
- uint32_t power_cycles;
-
- // Optional buffer for RAM block device.
- void *buffer;
-
- // Optional buffer for wear
- void *wear_buffer;
-};
-
-// testbd state
-typedef struct lfs_testbd {
- union {
- struct {
- lfs_filebd_t bd;
- struct lfs_filebd_config cfg;
- } file;
- struct {
- lfs_rambd_t bd;
- struct lfs_rambd_config cfg;
- } ram;
- } u;
-
- bool persist;
- uint32_t power_cycles;
- lfs_testbd_wear_t *wear;
-
- const struct lfs_testbd_config *cfg;
-} lfs_testbd_t;
-
-
-/// Block device API ///
-
-// Create a test block device using the geometry in lfs_config
-//
-// Note that filebd is used if a path is provided, if path is NULL
-// testbd will use rambd which can be much faster.
-int lfs_testbd_create(const struct lfs_config *cfg, const char *path);
-int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
- const struct lfs_testbd_config *bdcfg);
-
-// Clean up memory associated with block device
-int lfs_testbd_destroy(const struct lfs_config *cfg);
-
-// Read a block
-int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
- lfs_off_t off, void *buffer, lfs_size_t size);
-
-// Program a block
-//
-// The block must have previously been erased.
-int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
- lfs_off_t off, const void *buffer, lfs_size_t size);
-
-// Erase a block
-//
-// A block must be erased before being programmed. The
-// state of an erased block is undefined.
-int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block);
-
-// Sync the block device
-int lfs_testbd_sync(const struct lfs_config *cfg);
-
-
-/// Additional extended API for driving test features ///
-
-// Get simulated wear on a given block
-lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
- lfs_block_t block);
-
-// Manually set simulated wear on a given block
-int lfs_testbd_setwear(const struct lfs_config *cfg,
- lfs_block_t block, lfs_testbd_wear_t wear);
-
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif
diff --git a/benches/bench_dir.toml b/benches/bench_dir.toml
new file mode 100644
index 00000000..5f8cb490
--- /dev/null
+++ b/benches/bench_dir.toml
@@ -0,0 +1,270 @@
+[cases.bench_dir_open]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.N = 1024
+defines.FILE_SIZE = 8
+defines.CHUNK_SIZE = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // first create the files
+ char name[256];
+ uint8_t buffer[CHUNK_SIZE];
+ for (lfs_size_t i = 0; i < N; i++) {
+ sprintf(name, "file%08x", i);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+
+ uint32_t file_prng = i;
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ buffer[k] = BENCH_PRNG(&file_prng);
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ // then read the files
+ BENCH_START();
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < N; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (N-1-i)
+ : BENCH_PRNG(&prng) % N;
+ sprintf(name, "file%08x", i_);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0;
+
+ uint32_t file_prng = i_;
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ lfs_file_read(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ assert(buffer[k] == BENCH_PRNG(&file_prng));
+ }
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_dir_creat]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.N = 1024
+defines.FILE_SIZE = 8
+defines.CHUNK_SIZE = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ BENCH_START();
+ uint32_t prng = 42;
+ char name[256];
+ uint8_t buffer[CHUNK_SIZE];
+ for (lfs_size_t i = 0; i < N; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (N-1-i)
+ : BENCH_PRNG(&prng) % N;
+ sprintf(name, "file%08x", i_);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+
+ uint32_t file_prng = i_;
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ buffer[k] = BENCH_PRNG(&file_prng);
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_dir_remove]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.N = 1024
+defines.FILE_SIZE = 8
+defines.CHUNK_SIZE = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // first create the files
+ char name[256];
+ uint8_t buffer[CHUNK_SIZE];
+ for (lfs_size_t i = 0; i < N; i++) {
+ sprintf(name, "file%08x", i);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+
+ uint32_t file_prng = i;
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ buffer[k] = BENCH_PRNG(&file_prng);
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ // then remove the files
+ BENCH_START();
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < N; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (N-1-i)
+ : BENCH_PRNG(&prng) % N;
+ sprintf(name, "file%08x", i_);
+ int err = lfs_remove(&lfs, name);
+ assert(!err || err == LFS_ERR_NOENT);
+ }
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_dir_read]
+defines.N = 1024
+defines.FILE_SIZE = 8
+defines.CHUNK_SIZE = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // first create the files
+ char name[256];
+ uint8_t buffer[CHUNK_SIZE];
+ for (lfs_size_t i = 0; i < N; i++) {
+ sprintf(name, "file%08x", i);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+
+ uint32_t file_prng = i;
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ buffer[k] = BENCH_PRNG(&file_prng);
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ // then read the directory
+ BENCH_START();
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(name, "file%08x", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ assert(strcmp(info.name, name) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_dir_mkdir]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.N = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ BENCH_START();
+ uint32_t prng = 42;
+ char name[256];
+ for (lfs_size_t i = 0; i < N; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (N-1-i)
+ : BENCH_PRNG(&prng) % N;
+ printf("hm %d\n", i);
+ sprintf(name, "dir%08x", i_);
+ int err = lfs_mkdir(&lfs, name);
+ assert(!err || err == LFS_ERR_EXIST);
+ }
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_dir_rmdir]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.N = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // first create the dirs
+ char name[256];
+ for (lfs_size_t i = 0; i < N; i++) {
+ sprintf(name, "dir%08x", i);
+ lfs_mkdir(&lfs, name) => 0;
+ }
+
+ // then remove the dirs
+ BENCH_START();
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < N; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (N-1-i)
+ : BENCH_PRNG(&prng) % N;
+ sprintf(name, "dir%08x", i_);
+ int err = lfs_remove(&lfs, name);
+ assert(!err || err == LFS_ERR_NOENT);
+ }
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+
diff --git a/benches/bench_file.toml b/benches/bench_file.toml
new file mode 100644
index 00000000..168eaad8
--- /dev/null
+++ b/benches/bench_file.toml
@@ -0,0 +1,95 @@
+[cases.bench_file_read]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.SIZE = '128*1024'
+defines.CHUNK_SIZE = 64
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_size_t chunks = (SIZE+CHUNK_SIZE-1)/CHUNK_SIZE;
+
+ // first write the file
+ lfs_file_t file;
+ uint8_t buffer[CHUNK_SIZE];
+ lfs_file_open(&lfs, &file, "file",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ for (lfs_size_t i = 0; i < chunks; i++) {
+ uint32_t chunk_prng = i;
+ for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
+ buffer[j] = BENCH_PRNG(&chunk_prng);
+ }
+
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // then read the file
+ BENCH_START();
+ lfs_file_open(&lfs, &file, "file", LFS_O_RDONLY) => 0;
+
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < chunks; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (chunks-1-i)
+ : BENCH_PRNG(&prng) % chunks;
+ lfs_file_seek(&lfs, &file, i_*CHUNK_SIZE, LFS_SEEK_SET)
+ => i_*CHUNK_SIZE;
+ lfs_file_read(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+
+ uint32_t chunk_prng = i_;
+ for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
+ assert(buffer[j] == BENCH_PRNG(&chunk_prng));
+ }
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_file_write]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.SIZE = '128*1024'
+defines.CHUNK_SIZE = 64
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_size_t chunks = (SIZE+CHUNK_SIZE-1)/CHUNK_SIZE;
+
+ BENCH_START();
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, "file",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+
+ uint8_t buffer[CHUNK_SIZE];
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < chunks; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (chunks-1-i)
+ : BENCH_PRNG(&prng) % chunks;
+ uint32_t chunk_prng = i_;
+ for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
+ buffer[j] = BENCH_PRNG(&chunk_prng);
+ }
+
+ lfs_file_seek(&lfs, &file, i_*CHUNK_SIZE, LFS_SEEK_SET)
+ => i_*CHUNK_SIZE;
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/benches/bench_superblock.toml b/benches/bench_superblock.toml
new file mode 100644
index 00000000..37659d47
--- /dev/null
+++ b/benches/bench_superblock.toml
@@ -0,0 +1,56 @@
+[cases.bench_superblocks_found]
+# support benchmarking with files
+defines.N = [0, 1024]
+defines.FILE_SIZE = 8
+defines.CHUNK_SIZE = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // create files?
+ lfs_mount(&lfs, cfg) => 0;
+ char name[256];
+ uint8_t buffer[CHUNK_SIZE];
+ for (lfs_size_t i = 0; i < N; i++) {
+ sprintf(name, "file%08x", i);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ buffer[k] = i+j+k;
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ BENCH_START();
+ lfs_mount(&lfs, cfg) => 0;
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_superblocks_missing]
+code = '''
+ lfs_t lfs;
+
+ BENCH_START();
+ int err = lfs_mount(&lfs, cfg);
+ assert(err != 0);
+ BENCH_STOP();
+'''
+
+[cases.bench_superblocks_format]
+code = '''
+ lfs_t lfs;
+
+ BENCH_START();
+ lfs_format(&lfs, cfg) => 0;
+ BENCH_STOP();
+'''
+
diff --git a/lfs.c b/lfs.c
index 26280fa8..d2f3b5e9 100644
--- a/lfs.c
+++ b/lfs.c
@@ -135,14 +135,14 @@ static int lfs_bd_cmp(lfs_t *lfs,
uint8_t dat[8];
diff = lfs_min(size-i, sizeof(dat));
- int res = lfs_bd_read(lfs,
+ int err = lfs_bd_read(lfs,
pcache, rcache, hint-i,
block, off+i, &dat, diff);
- if (res) {
- return res;
+ if (err) {
+ return err;
}
- res = memcmp(dat, data + i, diff);
+ int res = memcmp(dat, data + i, diff);
if (res) {
return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
}
@@ -151,6 +151,27 @@ static int lfs_bd_cmp(lfs_t *lfs,
return LFS_CMP_EQ;
}
+static int lfs_bd_crc(lfs_t *lfs,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
+ lfs_block_t block, lfs_off_t off, lfs_size_t size, uint32_t *crc) {
+ lfs_size_t diff = 0;
+
+ for (lfs_off_t i = 0; i < size; i += diff) {
+ uint8_t dat[8];
+ diff = lfs_min(size-i, sizeof(dat));
+ int err = lfs_bd_read(lfs,
+ pcache, rcache, hint-i,
+ block, off+i, &dat, diff);
+ if (err) {
+ return err;
+ }
+
+ *crc = lfs_crc(*crc, &dat, diff);
+ }
+
+ return 0;
+}
+
#ifndef LFS_READONLY
static int lfs_bd_flush(lfs_t *lfs,
lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
@@ -279,14 +300,12 @@ static inline int lfs_pair_cmp(
paira[0] == pairb[1] || paira[1] == pairb[0]);
}
-#ifndef LFS_READONLY
-static inline bool lfs_pair_sync(
+static inline bool lfs_pair_issync(
const lfs_block_t paira[2],
const lfs_block_t pairb[2]) {
return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
(paira[0] == pairb[1] && paira[1] == pairb[0]);
}
-#endif
static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
pair[0] = lfs_fromle32(pair[0]);
@@ -325,6 +344,10 @@ static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
return (tag & 0x70000000) >> 20;
}
+static inline uint16_t lfs_tag_type2(lfs_tag_t tag) {
+ return (tag & 0x78000000) >> 20;
+}
+
static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
return (tag & 0x7ff00000) >> 20;
}
@@ -386,12 +409,16 @@ static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
}
static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
- return lfs_tag_size(a->tag);
+ return lfs_tag_size(a->tag) & 0x1ff;
}
static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
return lfs_tag_type1(a->tag);
}
+
+static inline bool lfs_gstate_needssuperblock(const lfs_gstate_t *a) {
+ return lfs_tag_size(a->tag) >> 9;
+}
#endif
static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
@@ -413,6 +440,24 @@ static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
}
#endif
+// operations on forward-CRCs used to track erased state
+struct lfs_fcrc {
+ lfs_size_t size;
+ uint32_t crc;
+};
+
+static void lfs_fcrc_fromle32(struct lfs_fcrc *fcrc) {
+ fcrc->size = lfs_fromle32(fcrc->size);
+ fcrc->crc = lfs_fromle32(fcrc->crc);
+}
+
+#ifndef LFS_READONLY
+static void lfs_fcrc_tole32(struct lfs_fcrc *fcrc) {
+ fcrc->size = lfs_tole32(fcrc->size);
+ fcrc->crc = lfs_tole32(fcrc->crc);
+}
+#endif
+
// other endianness operations
static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
ctz->head = lfs_fromle32(ctz->head);
@@ -490,6 +535,7 @@ static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
+static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock);
static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
static void lfs_fs_prepmove(lfs_t *lfs,
uint16_t id, const lfs_block_t pair[2]);
@@ -1035,6 +1081,11 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
bool tempsplit = false;
lfs_stag_t tempbesttag = besttag;
+ // assume not erased until proven otherwise
+ bool maybeerased = false;
+ bool hasfcrc = false;
+ struct lfs_fcrc fcrc;
+
dir->rev = lfs_tole32(dir->rev);
uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
dir->rev = lfs_fromle32(dir->rev);
@@ -1049,7 +1100,6 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
if (err) {
if (err == LFS_ERR_CORRUPT) {
// can't continue?
- dir->erased = false;
break;
}
return err;
@@ -1058,19 +1108,18 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
crc = lfs_crc(crc, &tag, sizeof(tag));
tag = lfs_frombe32(tag) ^ ptag;
- // next commit not yet programmed or we're not in valid range
+ // next commit not yet programmed?
if (!lfs_tag_isvalid(tag)) {
- dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC &&
- dir->off % lfs->cfg->prog_size == 0);
+ maybeerased = true;
break;
+ // out of range?
} else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
- dir->erased = false;
break;
}
ptag = tag;
- if (lfs_tag_type1(tag) == LFS_TYPE_CRC) {
+ if (lfs_tag_type2(tag) == LFS_TYPE_CCRC) {
// check the crc attr
uint32_t dcrc;
err = lfs_bd_read(lfs,
@@ -1078,7 +1127,6 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
if (err) {
if (err == LFS_ERR_CORRUPT) {
- dir->erased = false;
break;
}
return err;
@@ -1086,7 +1134,6 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
dcrc = lfs_fromle32(dcrc);
if (crc != dcrc) {
- dir->erased = false;
break;
}
@@ -1113,21 +1160,19 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
continue;
}
+ // fcrc is only valid when last tag was a crc
+ hasfcrc = false;
+
// crc the entry first, hopefully leaving it in the cache
- for (lfs_off_t j = sizeof(tag); j < lfs_tag_dsize(tag); j++) {
- uint8_t dat;
- err = lfs_bd_read(lfs,
- NULL, &lfs->rcache, lfs->cfg->block_size,
- dir->pair[0], off+j, &dat, 1);
- if (err) {
- if (err == LFS_ERR_CORRUPT) {
- dir->erased = false;
- break;
- }
- return err;
+ err = lfs_bd_crc(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off+sizeof(tag),
+ lfs_tag_dsize(tag)-sizeof(tag), &crc);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ break;
}
-
- crc = lfs_crc(crc, &dat, 1);
+ return err;
}
// directory modification tags?
@@ -1154,12 +1199,24 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
dir->pair[0], off+sizeof(tag), &temptail, 8);
if (err) {
if (err == LFS_ERR_CORRUPT) {
- dir->erased = false;
break;
}
return err;
}
lfs_pair_fromle32(temptail);
+ } else if (lfs_tag_type3(tag) == LFS_TYPE_FCRC) {
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off+sizeof(tag),
+ &fcrc, sizeof(fcrc));
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ break;
+ }
+ }
+
+ lfs_fcrc_fromle32(&fcrc);
+ hasfcrc = true;
}
// found a match for our fetcher?
@@ -1168,7 +1225,6 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
dir->pair[0], off+sizeof(tag)});
if (res < 0) {
if (res == LFS_ERR_CORRUPT) {
- dir->erased = false;
break;
}
return res;
@@ -1190,35 +1246,54 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
}
}
- // consider what we have good enough
- if (dir->off > 0) {
- // synthetic move
- if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
- if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
- besttag |= 0x80000000;
- } else if (besttag != -1 &&
- lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
- besttag -= LFS_MKTAG(0, 1, 0);
- }
- }
+ // found no valid commits?
+ if (dir->off == 0) {
+ // try the other block?
+ lfs_pair_swap(dir->pair);
+ dir->rev = revs[(r+1)%2];
+ continue;
+ }
- // found tag? or found best id?
- if (id) {
- *id = lfs_min(lfs_tag_id(besttag), dir->count);
+ // did we end on a valid commit? we may have an erased block
+ dir->erased = false;
+ if (maybeerased && hasfcrc && dir->off % lfs->cfg->prog_size == 0) {
+ // check for an fcrc matching the next prog's erased state, if
+ // this failed most likely a previous prog was interrupted, we
+ // need a new erase
+ uint32_t fcrc_ = 0xffffffff;
+ int err = lfs_bd_crc(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], dir->off, fcrc.size, &fcrc_);
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
}
- if (lfs_tag_isvalid(besttag)) {
- return besttag;
- } else if (lfs_tag_id(besttag) < dir->count) {
- return LFS_ERR_NOENT;
- } else {
- return 0;
+ // found beginning of erased part?
+ dir->erased = (fcrc_ == fcrc.crc);
+ }
+
+ // synthetic move
+ if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
+ if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
+ besttag |= 0x80000000;
+ } else if (besttag != -1 &&
+ lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
+ besttag -= LFS_MKTAG(0, 1, 0);
}
}
- // failed, try the other block?
- lfs_pair_swap(dir->pair);
- dir->rev = revs[(r+1)%2];
+ // found tag? or found best id?
+ if (id) {
+ *id = lfs_min(lfs_tag_id(besttag), dir->count);
+ }
+
+ if (lfs_tag_isvalid(besttag)) {
+ return besttag;
+ } else if (lfs_tag_id(besttag) < dir->count) {
+ return LFS_ERR_NOENT;
+ } else {
+ return 0;
+ }
}
LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
@@ -1492,9 +1567,15 @@ static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
#endif
#ifndef LFS_READONLY
+
static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
// align to program units
- const lfs_off_t end = lfs_alignup(commit->off + 2*sizeof(uint32_t),
+ //
+ // this gets a bit complex as we have two types of crcs:
+ // - 5-word crc with fcrc to check following prog (middle of block)
+ // - 2-word crc with no following prog (end of block)
+ const lfs_off_t end = lfs_alignup(
+ lfs_min(commit->off + 5*sizeof(uint32_t), lfs->cfg->block_size),
lfs->cfg->prog_size);
lfs_off_t off1 = 0;
@@ -1504,89 +1585,116 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
// padding is not crced, which lets fetches skip padding but
// makes committing a bit more complicated
while (commit->off < end) {
- lfs_off_t off = commit->off + sizeof(lfs_tag_t);
- lfs_off_t noff = lfs_min(end - off, 0x3fe) + off;
+ lfs_off_t noff = (
+ lfs_min(end - (commit->off+sizeof(lfs_tag_t)), 0x3fe)
+ + (commit->off+sizeof(lfs_tag_t)));
+ // too large for crc tag? need padding commits
if (noff < end) {
- noff = lfs_min(noff, end - 2*sizeof(uint32_t));
+ noff = lfs_min(noff, end - 5*sizeof(uint32_t));
}
- // read erased state from next program unit
- lfs_tag_t tag = 0xffffffff;
- int err = lfs_bd_read(lfs,
- NULL, &lfs->rcache, sizeof(tag),
- commit->block, noff, &tag, sizeof(tag));
- if (err && err != LFS_ERR_CORRUPT) {
- return err;
- }
+ // space for fcrc?
+ uint8_t eperturb = -1;
+ if (noff >= end && noff <= lfs->cfg->block_size - lfs->cfg->prog_size) {
+ // first read the leading byte, this always contains a bit
+ // we can perturb to avoid writes that don't change the fcrc
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->prog_size,
+ commit->block, noff, &eperturb, 1);
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
+ }
- // build crc tag
- bool reset = ~lfs_frombe32(tag) >> 31;
- tag = LFS_MKTAG(LFS_TYPE_CRC + reset, 0x3ff, noff - off);
+ // find the expected fcrc, don't bother avoiding a reread
+ // of the eperturb, it should still be in our cache
+ struct lfs_fcrc fcrc = {.size=lfs->cfg->prog_size, .crc=0xffffffff};
+ err = lfs_bd_crc(lfs,
+ NULL, &lfs->rcache, lfs->cfg->prog_size,
+ commit->block, noff, fcrc.size, &fcrc.crc);
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
+ }
- // write out crc
- uint32_t footer[2];
- footer[0] = lfs_tobe32(tag ^ commit->ptag);
- commit->crc = lfs_crc(commit->crc, &footer[0], sizeof(footer[0]));
- footer[1] = lfs_tole32(commit->crc);
- err = lfs_bd_prog(lfs,
+ lfs_fcrc_tole32(&fcrc);
+ err = lfs_dir_commitattr(lfs, commit,
+ LFS_MKTAG(LFS_TYPE_FCRC, 0x3ff, sizeof(struct lfs_fcrc)),
+ &fcrc);
+ if (err) {
+ return err;
+ }
+ }
+
+ // build commit crc
+ struct {
+ lfs_tag_t tag;
+ uint32_t crc;
+ } ccrc;
+ lfs_tag_t ntag = LFS_MKTAG(
+ LFS_TYPE_CCRC + (((uint8_t)~eperturb) >> 7), 0x3ff,
+ noff - (commit->off+sizeof(lfs_tag_t)));
+ ccrc.tag = lfs_tobe32(ntag ^ commit->ptag);
+ commit->crc = lfs_crc(commit->crc, &ccrc.tag, sizeof(lfs_tag_t));
+ ccrc.crc = lfs_tole32(commit->crc);
+
+ int err = lfs_bd_prog(lfs,
&lfs->pcache, &lfs->rcache, false,
- commit->block, commit->off, &footer, sizeof(footer));
+ commit->block, commit->off, &ccrc, sizeof(ccrc));
if (err) {
return err;
}
// keep track of non-padding checksum to verify
if (off1 == 0) {
- off1 = commit->off + sizeof(uint32_t);
+ off1 = commit->off + sizeof(lfs_tag_t);
crc1 = commit->crc;
}
- commit->off += sizeof(tag)+lfs_tag_size(tag);
- commit->ptag = tag ^ ((lfs_tag_t)reset << 31);
- commit->crc = 0xffffffff; // reset crc for next "commit"
- }
+ commit->off = noff;
+ // perturb valid bit?
+ commit->ptag = ntag ^ ((0x80 & ~eperturb) << 24);
+ // reset crc for next commit
+ commit->crc = 0xffffffff;
- // flush buffers
- int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
- if (err) {
- return err;
+ // manually flush here since we don't prog the padding, this confuses
+ // the caching layer
+ if (noff >= end || noff >= lfs->pcache.off + lfs->cfg->cache_size) {
+ // flush buffers
+ int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
+ if (err) {
+ return err;
+ }
+ }
}
// successful commit, check checksums to make sure
+ //
+ // note that we don't need to check padding commits, worst
+ // case if they are corrupted we would have had to compact anyways
lfs_off_t off = commit->begin;
- lfs_off_t noff = off1;
- while (off < end) {
- uint32_t crc = 0xffffffff;
- for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) {
- // check against written crc, may catch blocks that
- // become readonly and match our commit size exactly
- if (i == off1 && crc != crc1) {
- return LFS_ERR_CORRUPT;
- }
-
- // leave it up to caching to make this efficient
- uint8_t dat;
- err = lfs_bd_read(lfs,
- NULL, &lfs->rcache, noff+sizeof(uint32_t)-i,
- commit->block, i, &dat, 1);
- if (err) {
- return err;
- }
+ uint32_t crc = 0xffffffff;
+ int err = lfs_bd_crc(lfs,
+ NULL, &lfs->rcache, off1+sizeof(uint32_t),
+ commit->block, off, off1-off, &crc);
+ if (err) {
+ return err;
+ }
- crc = lfs_crc(crc, &dat, 1);
- }
+ // check non-padding commits against known crc
+ if (crc != crc1) {
+ return LFS_ERR_CORRUPT;
+ }
- // detected write error?
- if (crc != 0) {
- return LFS_ERR_CORRUPT;
- }
+ // make sure to check crc in case we happen to pick
+ // up an unrelated crc (frozen block?)
+ err = lfs_bd_crc(lfs,
+ NULL, &lfs->rcache, sizeof(uint32_t),
+ commit->block, off1, sizeof(uint32_t), &crc);
+ if (err) {
+ return err;
+ }
- // skip padding
- off = lfs_min(end - noff, 0x3fe) + noff;
- if (off < end) {
- off = lfs_min(off, end - 2*sizeof(uint32_t));
- }
- noff = off + sizeof(uint32_t);
+ if (crc != 0) {
+ return LFS_ERR_CORRUPT;
}
return 0;
@@ -1927,11 +2035,20 @@ static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
return err;
}
- // space is complicated, we need room for tail, crc, gstate,
- // cleanup delete, and we cap at half a block to give room
- // for metadata updates.
+ // space is complicated, we need room for:
+ //
+ // - tail: 4+2*4 = 12 bytes
+ // - gstate: 4+3*4 = 16 bytes
+ // - move delete: 4 = 4 bytes
+ // - crc: 4+4 = 8 bytes
+ // total = 40 bytes
+ //
+ // And we cap at half a block to avoid degenerate cases with
+ // nearly-full metadata blocks.
+ //
if (end - split < 0xff
- && size <= lfs_min(lfs->cfg->block_size - 36,
+ && size <= lfs_min(
+ lfs->cfg->block_size - 40,
lfs_alignup(
(lfs->cfg->metadata_max
? lfs->cfg->metadata_max
@@ -2595,11 +2712,6 @@ static int lfs_dir_rawseek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
while (off > 0) {
- int diff = lfs_min(dir->m.count - dir->id, off);
- dir->id += diff;
- dir->pos += diff;
- off -= diff;
-
if (dir->id == dir->m.count) {
if (!dir->m.split) {
return LFS_ERR_INVAL;
@@ -2612,6 +2724,11 @@ static int lfs_dir_rawseek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
dir->id = 0;
}
+
+ int diff = lfs_min(dir->m.count - dir->id, off);
+ dir->id += diff;
+ dir->pos += diff;
+ off -= diff;
}
return 0;
@@ -3348,7 +3465,7 @@ static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
// find out which block we're extending from
int err = lfs_ctz_find(lfs, NULL, &file->cache,
file->ctz.head, file->ctz.size,
- file->pos-1, &file->block, &file->off);
+ file->pos-1, &file->block, &(lfs_off_t){0});
if (err) {
file->flags |= LFS_F_ERRED;
return err;
@@ -3526,26 +3643,55 @@ static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
lfs_off_t pos = file->pos;
lfs_off_t oldsize = lfs_file_rawsize(lfs, file);
if (size < oldsize) {
- // need to flush since directly changing metadata
- int err = lfs_file_flush(lfs, file);
- if (err) {
- return err;
- }
+ // revert to inline file?
+ if (size <= lfs_min(0x3fe, lfs_min(
+ lfs->cfg->cache_size,
+ (lfs->cfg->metadata_max ?
+ lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) {
+ // flush+seek to head
+ lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_SET);
+ if (res < 0) {
+ return (int)res;
+ }
- // lookup new head in ctz skip list
- err = lfs_ctz_find(lfs, NULL, &file->cache,
- file->ctz.head, file->ctz.size,
- size, &file->block, &file->off);
- if (err) {
- return err;
- }
+ // read our data into rcache temporarily
+ lfs_cache_drop(lfs, &lfs->rcache);
+ res = lfs_file_flushedread(lfs, file,
+ lfs->rcache.buffer, size);
+ if (res < 0) {
+ return (int)res;
+ }
- // need to set pos/block/off consistently so seeking back to
- // the old position does not get confused
- file->pos = size;
- file->ctz.head = file->block;
- file->ctz.size = size;
- file->flags |= LFS_F_DIRTY | LFS_F_READING;
+ file->ctz.head = LFS_BLOCK_INLINE;
+ file->ctz.size = size;
+ file->flags |= LFS_F_DIRTY | LFS_F_READING | LFS_F_INLINE;
+ file->cache.block = file->ctz.head;
+ file->cache.off = 0;
+ file->cache.size = lfs->cfg->cache_size;
+ memcpy(file->cache.buffer, lfs->rcache.buffer, size);
+
+ } else {
+ // need to flush since directly changing metadata
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ return err;
+ }
+
+ // lookup new head in ctz skip list
+ err = lfs_ctz_find(lfs, NULL, &file->cache,
+ file->ctz.head, file->ctz.size,
+ size-1, &file->block, &(lfs_off_t){0});
+ if (err) {
+ return err;
+ }
+
+ // need to set pos/block/off consistently so seeking back to
+ // the old position does not get confused
+ file->pos = size;
+ file->ctz.head = file->block;
+ file->ctz.size = size;
+ file->flags |= LFS_F_DIRTY | LFS_F_READING;
+ }
} else if (size > oldsize) {
// flush+seek if not already at end
lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END);
@@ -3905,6 +4051,12 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
lfs->cfg = cfg;
int err = 0;
+ // check that bool is a truthy-preserving type
+ //
+ // note the most common reason for this failure is a before-c99 compiler,
+ // which littlefs currently does not support
+ LFS_ASSERT((bool)0x80000000);
+
// validate that the lfs-cfg sizes were initiated properly before
// performing any arithmetic logics with them
LFS_ASSERT(lfs->cfg->read_size != 0);
@@ -3917,7 +4069,10 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
- // check that the block size is large enough to fit ctz pointers
+ // check that the block size is large enough to fit all ctz pointers
+ LFS_ASSERT(lfs->cfg->block_size >= 128);
+ // this is the exact calculation for all ctz pointers, if this fails
+ // and the simpler assert above does not, math must be broken
LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
<= lfs->cfg->block_size);
@@ -4101,14 +4256,23 @@ static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
// scan directory blocks for superblock and any global updates
lfs_mdir_t dir = {.tail = {0, 1}};
- lfs_block_t cycle = 0;
+ lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
+ lfs_size_t tortoise_i = 1;
+ lfs_size_t tortoise_period = 1;
while (!lfs_pair_isnull(dir.tail)) {
- if (cycle >= lfs->cfg->block_count/2) {
- // loop detected
+ // detect cycles with Brent's algorithm
+ if (lfs_pair_issync(dir.tail, tortoise)) {
+ LFS_WARN("Cycle detected in tail list");
err = LFS_ERR_CORRUPT;
goto cleanup;
}
- cycle += 1;
+ if (tortoise_i == tortoise_period) {
+ tortoise[0] = dir.tail[0];
+ tortoise[1] = dir.tail[1];
+ tortoise_i = 0;
+ tortoise_period *= 2;
+ }
+ tortoise_i += 1;
// fetch next block in tail list
lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
@@ -4144,12 +4308,29 @@ static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
uint16_t minor_version = (0xffff & (superblock.version >> 0));
if ((major_version != LFS_DISK_VERSION_MAJOR ||
minor_version > LFS_DISK_VERSION_MINOR)) {
- LFS_ERROR("Invalid version v%"PRIu16".%"PRIu16,
- major_version, minor_version);
+ LFS_ERROR("Invalid version "
+ "v%"PRIu16".%"PRIu16" != v%"PRIu16".%"PRIu16,
+ major_version, minor_version,
+ LFS_DISK_VERSION_MAJOR, LFS_DISK_VERSION_MINOR);
err = LFS_ERR_INVAL;
goto cleanup;
}
+ // found older minor version? set an in-device only bit in the
+ // gstate so we know we need to rewrite the superblock before
+ // the first write
+ if (minor_version < LFS_DISK_VERSION_MINOR) {
+ LFS_DEBUG("Found older minor version "
+ "v%"PRIu16".%"PRIu16" < v%"PRIu16".%"PRIu16,
+ major_version, minor_version,
+ LFS_DISK_VERSION_MAJOR, LFS_DISK_VERSION_MINOR);
+ #ifndef LFS_READONLY
+ // note this bit is reserved on disk, so fetching more gstate
+ // will not interfere here
+ lfs_fs_prepsuperblock(lfs, true);
+ #endif
+ }
+
// check superblock configuration
if (superblock.name_max) {
if (superblock.name_max > lfs->name_max) {
@@ -4259,13 +4440,22 @@ int lfs_fs_rawtraverse(lfs_t *lfs,
}
#endif
- lfs_block_t cycle = 0;
+ lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
+ lfs_size_t tortoise_i = 1;
+ lfs_size_t tortoise_period = 1;
while (!lfs_pair_isnull(dir.tail)) {
- if (cycle >= lfs->cfg->block_count/2) {
- // loop detected
+ // detect cycles with Brent's algorithm
+ if (lfs_pair_issync(dir.tail, tortoise)) {
+ LFS_WARN("Cycle detected in tail list");
return LFS_ERR_CORRUPT;
}
- cycle += 1;
+ if (tortoise_i == tortoise_period) {
+ tortoise[0] = dir.tail[0];
+ tortoise[1] = dir.tail[1];
+ tortoise_i = 0;
+ tortoise_period *= 2;
+ }
+ tortoise_i += 1;
for (int i = 0; i < 2; i++) {
int err = cb(data, dir.tail[i]);
@@ -4344,13 +4534,22 @@ static int lfs_fs_pred(lfs_t *lfs,
// iterate over all directory directory entries
pdir->tail[0] = 0;
pdir->tail[1] = 1;
- lfs_block_t cycle = 0;
+ lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
+ lfs_size_t tortoise_i = 1;
+ lfs_size_t tortoise_period = 1;
while (!lfs_pair_isnull(pdir->tail)) {
- if (cycle >= lfs->cfg->block_count/2) {
- // loop detected
+ // detect cycles with Brent's algorithm
+ if (lfs_pair_issync(pdir->tail, tortoise)) {
+ LFS_WARN("Cycle detected in tail list");
return LFS_ERR_CORRUPT;
}
- cycle += 1;
+ if (tortoise_i == tortoise_period) {
+ tortoise[0] = pdir->tail[0];
+ tortoise[1] = pdir->tail[1];
+ tortoise_i = 0;
+ tortoise_period *= 2;
+ }
+ tortoise_i += 1;
if (lfs_pair_cmp(pdir->tail, pair) == 0) {
return 0;
@@ -4400,13 +4599,22 @@ static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
// use fetchmatch with callback to find pairs
parent->tail[0] = 0;
parent->tail[1] = 1;
- lfs_block_t cycle = 0;
+ lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
+ lfs_size_t tortoise_i = 1;
+ lfs_size_t tortoise_period = 1;
while (!lfs_pair_isnull(parent->tail)) {
- if (cycle >= lfs->cfg->block_count/2) {
- // loop detected
+ // detect cycles with Brent's algorithm
+ if (lfs_pair_issync(parent->tail, tortoise)) {
+ LFS_WARN("Cycle detected in tail list");
return LFS_ERR_CORRUPT;
}
- cycle += 1;
+ if (tortoise_i == tortoise_period) {
+ tortoise[0] = parent->tail[0];
+ tortoise[1] = parent->tail[1];
+ tortoise_i = 0;
+ tortoise_period *= 2;
+ }
+ tortoise_i += 1;
lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
LFS_MKTAG(0x7ff, 0, 0x3ff),
@@ -4423,9 +4631,17 @@ static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
}
#endif
+#ifndef LFS_READONLY
+static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock) {
+ lfs->gstate.tag = (lfs->gstate.tag & ~LFS_MKTAG(0, 0, 0x200))
+ | (uint32_t)needssuperblock << 9;
+}
+#endif
+
#ifndef LFS_READONLY
static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
- LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0);
+ LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0x000 || orphans >= 0);
+ LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) < 0x1ff || orphans <= 0);
lfs->gstate.tag += orphans;
lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
@@ -4444,6 +4660,45 @@ static void lfs_fs_prepmove(lfs_t *lfs,
}
#endif
+#ifndef LFS_READONLY
+static int lfs_fs_desuperblock(lfs_t *lfs) {
+ if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
+ return 0;
+ }
+
+ LFS_DEBUG("Rewriting superblock {0x%"PRIx32", 0x%"PRIx32"}",
+ lfs->root[0],
+ lfs->root[1]);
+
+ lfs_mdir_t root;
+ int err = lfs_dir_fetch(lfs, &root, lfs->root);
+ if (err) {
+ return err;
+ }
+
+ // write a new superblock
+ lfs_superblock_t superblock = {
+ .version = LFS_DISK_VERSION,
+ .block_size = lfs->cfg->block_size,
+ .block_count = lfs->cfg->block_count,
+ .name_max = lfs->name_max,
+ .file_max = lfs->file_max,
+ .attr_max = lfs->attr_max,
+ };
+
+ lfs_superblock_tole32(&superblock);
+ err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock}));
+ if (err) {
+ return err;
+ }
+
+ lfs_fs_prepsuperblock(lfs, false);
+ return 0;
+}
+#endif
+
#ifndef LFS_READONLY
static int lfs_fs_demove(lfs_t *lfs) {
if (!lfs_gstate_hasmove(&lfs->gdisk)) {
@@ -4456,6 +4711,10 @@ static int lfs_fs_demove(lfs_t *lfs) {
lfs->gdisk.pair[1],
lfs_tag_id(lfs->gdisk.tag));
+ // no other gstate is supported at this time, so if we found something else
+ // something most likely went wrong in gstate calculation
+ LFS_ASSERT(lfs_tag_type3(lfs->gdisk.tag) == LFS_TYPE_DELETE);
+
// fetch and delete the moved entry
lfs_mdir_t movedir;
int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
@@ -4482,12 +4741,20 @@ static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
return 0;
}
- int8_t found = 0;
-restart:
- {
+ // Check for orphans in two separate passes:
+ // - 1 for half-orphans (relocations)
+ // - 2 for full-orphans (removes/renames)
+ //
+ // Two separate passes are needed as half-orphans can contain outdated
+ // references to full-orphans, effectively hiding them from the deorphan
+ // search.
+ //
+ int pass = 0;
+ while (pass < 2) {
// Fix any orphans
lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
lfs_mdir_t dir;
+ bool moreorphans = false;
// iterate over all directory directory entries
while (!lfs_pair_isnull(pdir.tail)) {
@@ -4505,42 +4772,7 @@ static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
return tag;
}
- // note we only check for full orphans if we may have had a
- // power-loss, otherwise orphans are created intentionally
- // during operations such as lfs_mkdir
- if (tag == LFS_ERR_NOENT && powerloss) {
- // we are an orphan
- LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
- pdir.tail[0], pdir.tail[1]);
-
- // steal state
- err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
- if (err) {
- return err;
- }
-
- // steal tail
- lfs_pair_tole32(dir.tail);
- int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
- {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
- dir.tail}));
- lfs_pair_fromle32(dir.tail);
- if (state < 0) {
- return state;
- }
-
- found += 1;
-
- // did our commit create more orphans?
- if (state == LFS_OK_ORPHANED) {
- goto restart;
- }
-
- // refetch tail
- continue;
- }
-
- if (tag != LFS_ERR_NOENT) {
+ if (pass == 0 && tag != LFS_ERR_NOENT) {
lfs_block_t pair[2];
lfs_stag_t state = lfs_dir_get(lfs, &parent,
LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
@@ -4549,7 +4781,7 @@ static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
}
lfs_pair_fromle32(pair);
- if (!lfs_pair_sync(pair, pdir.tail)) {
+ if (!lfs_pair_issync(pair, pdir.tail)) {
// we have desynced
LFS_DEBUG("Fixing half-orphan "
"{0x%"PRIx32", 0x%"PRIx32"} "
@@ -4579,33 +4811,69 @@ static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
return state;
}
- found += 1;
-
// did our commit create more orphans?
if (state == LFS_OK_ORPHANED) {
- goto restart;
+ moreorphans = true;
}
// refetch tail
continue;
}
}
+
+ // note we only check for full orphans if we may have had a
+ // power-loss, otherwise orphans are created intentionally
+ // during operations such as lfs_mkdir
+ if (pass == 1 && tag == LFS_ERR_NOENT && powerloss) {
+ // we are an orphan
+ LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
+ pdir.tail[0], pdir.tail[1]);
+
+ // steal state
+ err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
+ if (err) {
+ return err;
+ }
+
+ // steal tail
+ lfs_pair_tole32(dir.tail);
+ int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
+ dir.tail}));
+ lfs_pair_fromle32(dir.tail);
+ if (state < 0) {
+ return state;
+ }
+
+ // did our commit create more orphans?
+ if (state == LFS_OK_ORPHANED) {
+ moreorphans = true;
+ }
+
+ // refetch tail
+ continue;
+ }
}
pdir = dir;
}
+
+ pass = moreorphans ? 0 : pass+1;
}
// mark orphans as fixed
- return lfs_fs_preporphans(lfs, -lfs_min(
- lfs_gstate_getorphans(&lfs->gstate),
- found));
+ return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
}
#endif
#ifndef LFS_READONLY
static int lfs_fs_forceconsistency(lfs_t *lfs) {
- int err = lfs_fs_demove(lfs);
+ int err = lfs_fs_desuperblock(lfs);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_fs_demove(lfs);
if (err) {
return err;
}
@@ -4619,6 +4887,36 @@ static int lfs_fs_forceconsistency(lfs_t *lfs) {
}
#endif
+#ifndef LFS_READONLY
+int lfs_fs_rawmkconsistent(lfs_t *lfs) {
+ // lfs_fs_forceconsistency does most of the work here
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+
+ // do we have any pending gstate?
+ lfs_gstate_t delta = {0};
+ lfs_gstate_xor(&delta, &lfs->gdisk);
+ lfs_gstate_xor(&delta, &lfs->gstate);
+ if (!lfs_gstate_iszero(&delta)) {
+ // lfs_dir_commit will implicitly write out any pending gstate
+ lfs_mdir_t root;
+ err = lfs_dir_fetch(lfs, &root, lfs->root);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_dir_commit(lfs, &root, NULL, 0);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
static int lfs_fs_size_count(void *p, lfs_block_t block) {
(void)block;
lfs_size_t *size = p;
@@ -5784,6 +6082,22 @@ int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
return err;
}
+#ifndef LFS_READONLY
+int lfs_fs_mkconsistent(lfs_t *lfs) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);
+
+ err = lfs_fs_rawmkconsistent(lfs);
+
+ LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
#ifdef LFS_MIGRATE
int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
int err = LFS_LOCK(cfg);
diff --git a/lfs.h b/lfs.h
index 2bce17f5..eb5c355d 100644
--- a/lfs.h
+++ b/lfs.h
@@ -8,8 +8,6 @@
#ifndef LFS_H
#define LFS_H
-#include
-#include
#include "lfs_util.h"
#ifdef __cplusplus
@@ -23,14 +21,14 @@ extern "C"
// Software library version
// Major (top-nibble), incremented on backwards incompatible changes
// Minor (bottom-nibble), incremented on feature additions
-#define LFS_VERSION 0x00020005
+#define LFS_VERSION 0x00020006
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
// Version of On-disk data structures
// Major (top-nibble), incremented on backwards incompatible changes
// Minor (bottom-nibble), incremented on feature additions
-#define LFS_DISK_VERSION 0x00020000
+#define LFS_DISK_VERSION 0x00020001
#define LFS_DISK_VERSION_MAJOR (0xffff & (LFS_DISK_VERSION >> 16))
#define LFS_DISK_VERSION_MINOR (0xffff & (LFS_DISK_VERSION >> 0))
@@ -114,6 +112,8 @@ enum lfs_type {
LFS_TYPE_SOFTTAIL = 0x600,
LFS_TYPE_HARDTAIL = 0x601,
LFS_TYPE_MOVESTATE = 0x7ff,
+ LFS_TYPE_CCRC = 0x500,
+ LFS_TYPE_FCRC = 0x5ff,
// internal chip sources
LFS_FROM_NOOP = 0x000,
@@ -676,6 +676,18 @@ lfs_ssize_t lfs_fs_size(lfs_t *lfs);
// Returns a negative error code on failure.
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
+#ifndef LFS_READONLY
+// Attempt to make the filesystem consistent and ready for writing
+//
+// Calling this function is not required, consistency will be implicitly
+// enforced on the first operation that writes to the filesystem, but this
+// function allows the work to be performed earlier and without other
+// filesystem changes.
+//
+// Returns a negative error code on failure.
+int lfs_fs_mkconsistent(lfs_t *lfs);
+#endif
+
#ifndef LFS_READONLY
#ifdef LFS_MIGRATE
// Attempts to migrate a previous version of littlefs
diff --git a/lfs_util.h b/lfs_util.h
index 13e93961..7f79defd 100644
--- a/lfs_util.h
+++ b/lfs_util.h
@@ -23,6 +23,7 @@
// System includes
#include
#include
+#include
#include
#include
diff --git a/runners/bench_runner.c b/runners/bench_runner.c
new file mode 100644
index 00000000..ba791b25
--- /dev/null
+++ b/runners/bench_runner.c
@@ -0,0 +1,2051 @@
+/*
+ * Runner for littlefs benchmarks
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199309L
+#endif
+
+#include "runners/bench_runner.h"
+#include "bd/lfs_emubd.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+// some helpers
+
+// append to an array with amortized doubling
+void *mappend(void **p,
+ size_t size,
+ size_t *count,
+ size_t *capacity) {
+ uint8_t *p_ = *p;
+ size_t count_ = *count;
+ size_t capacity_ = *capacity;
+
+ count_ += 1;
+ if (count_ > capacity_) {
+ capacity_ = (2*capacity_ < 4) ? 4 : 2*capacity_;
+
+ p_ = realloc(p_, capacity_*size);
+ if (!p_) {
+ return NULL;
+ }
+ }
+
+ *p = p_;
+ *count = count_;
+ *capacity = capacity_;
+ return &p_[(count_-1)*size];
+}
+
+// a quick self-terminating text-safe varint scheme
+static void leb16_print(uintmax_t x) {
+ // allow 'w' to indicate negative numbers
+ if ((intmax_t)x < 0) {
+ printf("w");
+ x = -x;
+ }
+
+ while (true) {
+ char nibble = (x & 0xf) | (x > 0xf ? 0x10 : 0);
+ printf("%c", (nibble < 10) ? '0'+nibble : 'a'+nibble-10);
+ if (x <= 0xf) {
+ break;
+ }
+ x >>= 4;
+ }
+}
+
+static uintmax_t leb16_parse(const char *s, char **tail) {
+ bool neg = false;
+ uintmax_t x = 0;
+ if (tail) {
+ *tail = (char*)s;
+ }
+
+ if (s[0] == 'w') {
+ neg = true;
+ s = s+1;
+ }
+
+ size_t i = 0;
+ while (true) {
+ uintmax_t nibble = s[i];
+ if (nibble >= '0' && nibble <= '9') {
+ nibble = nibble - '0';
+ } else if (nibble >= 'a' && nibble <= 'v') {
+ nibble = nibble - 'a' + 10;
+ } else {
+ // invalid?
+ return 0;
+ }
+
+ x |= (nibble & 0xf) << (4*i);
+ i += 1;
+ if (!(nibble & 0x10)) {
+ s = s + i;
+ break;
+ }
+ }
+
+ if (tail) {
+ *tail = (char*)s;
+ }
+ return neg ? -x : x;
+}
+
+
+
+// bench_runner types
+
+typedef struct bench_geometry {
+ const char *name;
+ bench_define_t defines[BENCH_GEOMETRY_DEFINE_COUNT];
+} bench_geometry_t;
+
+typedef struct bench_id {
+ const char *name;
+ const bench_define_t *defines;
+ size_t define_count;
+} bench_id_t;
+
+
+// bench suites are linked into a custom ld section
+extern struct bench_suite __start__bench_suites;
+extern struct bench_suite __stop__bench_suites;
+
+const struct bench_suite *bench_suites = &__start__bench_suites;
+#define BENCH_SUITE_COUNT \
+ ((size_t)(&__stop__bench_suites - &__start__bench_suites))
+
+
+// bench define management
+typedef struct bench_define_map {
+ const bench_define_t *defines;
+ size_t count;
+} bench_define_map_t;
+
+typedef struct bench_define_names {
+ const char *const *names;
+ size_t count;
+} bench_define_names_t;
+
+intmax_t bench_define_lit(void *data) {
+ return (intptr_t)data;
+}
+
+#define BENCH_CONST(x) {bench_define_lit, (void*)(uintptr_t)(x)}
+#define BENCH_LIT(x) ((bench_define_t)BENCH_CONST(x))
+
+
+#define BENCH_DEF(k, v) \
+ intmax_t bench_define_##k(void *data) { \
+ (void)data; \
+ return v; \
+ }
+
+ BENCH_IMPLICIT_DEFINES
+#undef BENCH_DEF
+
+#define BENCH_DEFINE_MAP_OVERRIDE 0
+#define BENCH_DEFINE_MAP_EXPLICIT 1
+#define BENCH_DEFINE_MAP_PERMUTATION 2
+#define BENCH_DEFINE_MAP_GEOMETRY 3
+#define BENCH_DEFINE_MAP_IMPLICIT 4
+#define BENCH_DEFINE_MAP_COUNT 5
+
+bench_define_map_t bench_define_maps[BENCH_DEFINE_MAP_COUNT] = {
+ [BENCH_DEFINE_MAP_IMPLICIT] = {
+ (const bench_define_t[BENCH_IMPLICIT_DEFINE_COUNT]) {
+ #define BENCH_DEF(k, v) \
+ [k##_i] = {bench_define_##k, NULL},
+
+ BENCH_IMPLICIT_DEFINES
+ #undef BENCH_DEF
+ },
+ BENCH_IMPLICIT_DEFINE_COUNT,
+ },
+};
+
+#define BENCH_DEFINE_NAMES_SUITE 0
+#define BENCH_DEFINE_NAMES_IMPLICIT 1
+#define BENCH_DEFINE_NAMES_COUNT 2
+
+bench_define_names_t bench_define_names[BENCH_DEFINE_NAMES_COUNT] = {
+ [BENCH_DEFINE_NAMES_IMPLICIT] = {
+ (const char *const[BENCH_IMPLICIT_DEFINE_COUNT]){
+ #define BENCH_DEF(k, v) \
+ [k##_i] = #k,
+
+ BENCH_IMPLICIT_DEFINES
+ #undef BENCH_DEF
+ },
+ BENCH_IMPLICIT_DEFINE_COUNT,
+ },
+};
+
+intmax_t *bench_define_cache;
+size_t bench_define_cache_count;
+unsigned *bench_define_cache_mask;
+
+const char *bench_define_name(size_t define) {
+ // lookup in our bench names
+ for (size_t i = 0; i < BENCH_DEFINE_NAMES_COUNT; i++) {
+ if (define < bench_define_names[i].count
+ && bench_define_names[i].names
+ && bench_define_names[i].names[define]) {
+ return bench_define_names[i].names[define];
+ }
+ }
+
+ return NULL;
+}
+
+bool bench_define_ispermutation(size_t define) {
+ // is this define specific to the permutation?
+ for (size_t i = 0; i < BENCH_DEFINE_MAP_IMPLICIT; i++) {
+ if (define < bench_define_maps[i].count
+ && bench_define_maps[i].defines[define].cb) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+intmax_t bench_define(size_t define) {
+ // is the define in our cache?
+ if (define < bench_define_cache_count
+ && (bench_define_cache_mask[define/(8*sizeof(unsigned))]
+ & (1 << (define%(8*sizeof(unsigned)))))) {
+ return bench_define_cache[define];
+ }
+
+ // lookup in our bench defines
+ for (size_t i = 0; i < BENCH_DEFINE_MAP_COUNT; i++) {
+ if (define < bench_define_maps[i].count
+ && bench_define_maps[i].defines[define].cb) {
+ intmax_t v = bench_define_maps[i].defines[define].cb(
+ bench_define_maps[i].defines[define].data);
+
+ // insert into cache!
+ bench_define_cache[define] = v;
+ bench_define_cache_mask[define / (8*sizeof(unsigned))]
+ |= 1 << (define%(8*sizeof(unsigned)));
+
+ return v;
+ }
+ }
+
+ return 0;
+
+ // not found?
+ const char *name = bench_define_name(define);
+ fprintf(stderr, "error: undefined define %s (%zd)\n",
+ name ? name : "(unknown)",
+ define);
+ assert(false);
+ exit(-1);
+}
+
+void bench_define_flush(void) {
+ // clear cache between permutations
+ memset(bench_define_cache_mask, 0,
+ sizeof(unsigned)*(
+ (bench_define_cache_count+(8*sizeof(unsigned))-1)
+ / (8*sizeof(unsigned))));
+}
+
+// geometry updates
+const bench_geometry_t *bench_geometry = NULL;
+
+void bench_define_geometry(const bench_geometry_t *geometry) {
+ bench_define_maps[BENCH_DEFINE_MAP_GEOMETRY] = (bench_define_map_t){
+ geometry->defines, BENCH_GEOMETRY_DEFINE_COUNT};
+}
+
+// override updates
+typedef struct bench_override {
+ const char *name;
+ const intmax_t *defines;
+ size_t permutations;
+} bench_override_t;
+
+const bench_override_t *bench_overrides = NULL;
+size_t bench_override_count = 0;
+
+bench_define_t *bench_override_defines = NULL;
+size_t bench_override_define_count = 0;
+size_t bench_override_define_permutations = 1;
+size_t bench_override_define_capacity = 0;
+
+// suite/perm updates
+void bench_define_suite(const struct bench_suite *suite) {
+ bench_define_names[BENCH_DEFINE_NAMES_SUITE] = (bench_define_names_t){
+ suite->define_names, suite->define_count};
+
+ // make sure our cache is large enough
+ if (lfs_max(suite->define_count, BENCH_IMPLICIT_DEFINE_COUNT)
+ > bench_define_cache_count) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncount = 1 << lfs_npw2(
+ lfs_max(suite->define_count, BENCH_IMPLICIT_DEFINE_COUNT));
+ bench_define_cache = realloc(bench_define_cache, ncount*sizeof(intmax_t));
+ bench_define_cache_mask = realloc(bench_define_cache_mask,
+ sizeof(unsigned)*(
+ (ncount+(8*sizeof(unsigned))-1)
+ / (8*sizeof(unsigned))));
+ bench_define_cache_count = ncount;
+ }
+
+ // map any overrides
+ if (bench_override_count > 0) {
+ // first figure out the total size of override permutations
+ size_t count = 0;
+ size_t permutations = 1;
+ for (size_t i = 0; i < bench_override_count; i++) {
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // define name match?
+ const char *name = bench_define_name(d);
+ if (name && strcmp(name, bench_overrides[i].name) == 0) {
+ count = lfs_max(count, d+1);
+ permutations *= bench_overrides[i].permutations;
+ break;
+ }
+ }
+ }
+ bench_override_define_count = count;
+ bench_override_define_permutations = permutations;
+
+ // make sure our override arrays are big enough
+ if (count * permutations > bench_override_define_capacity) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncapacity = 1 << lfs_npw2(count * permutations);
+ bench_override_defines = realloc(
+ bench_override_defines,
+ sizeof(bench_define_t)*ncapacity);
+ bench_override_define_capacity = ncapacity;
+ }
+
+ // zero unoverridden defines
+ memset(bench_override_defines, 0,
+ sizeof(bench_define_t) * count * permutations);
+
+ // compute permutations
+ size_t p = 1;
+ for (size_t i = 0; i < bench_override_count; i++) {
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // define name match?
+ const char *name = bench_define_name(d);
+ if (name && strcmp(name, bench_overrides[i].name) == 0) {
+ // scatter the define permutations based on already
+ // seen permutations
+ for (size_t j = 0; j < permutations; j++) {
+ bench_override_defines[j*count + d] = BENCH_LIT(
+ bench_overrides[i].defines[(j/p)
+ % bench_overrides[i].permutations]);
+ }
+
+ // keep track of how many permutations we've seen so far
+ p *= bench_overrides[i].permutations;
+ break;
+ }
+ }
+ }
+ }
+}
+
+void bench_define_perm(
+ const struct bench_suite *suite,
+ const struct bench_case *case_,
+ size_t perm) {
+ if (case_->defines) {
+ bench_define_maps[BENCH_DEFINE_MAP_PERMUTATION] = (bench_define_map_t){
+ case_->defines + perm*suite->define_count,
+ suite->define_count};
+ } else {
+ bench_define_maps[BENCH_DEFINE_MAP_PERMUTATION] = (bench_define_map_t){
+ NULL, 0};
+ }
+}
+
+void bench_define_override(size_t perm) {
+ bench_define_maps[BENCH_DEFINE_MAP_OVERRIDE] = (bench_define_map_t){
+ bench_override_defines + perm*bench_override_define_count,
+ bench_override_define_count};
+}
+
+void bench_define_explicit(
+ const bench_define_t *defines,
+ size_t define_count) {
+ bench_define_maps[BENCH_DEFINE_MAP_EXPLICIT] = (bench_define_map_t){
+ defines, define_count};
+}
+
+void bench_define_cleanup(void) {
+ // bench define management can allocate a few things
+ free(bench_define_cache);
+ free(bench_define_cache_mask);
+ free(bench_override_defines);
+}
+
+
+
+// bench state
+extern const bench_geometry_t *bench_geometries;
+extern size_t bench_geometry_count;
+
+const bench_id_t *bench_ids = (const bench_id_t[]) {
+ {NULL, NULL, 0},
+};
+size_t bench_id_count = 1;
+
+size_t bench_step_start = 0;
+size_t bench_step_stop = -1;
+size_t bench_step_step = 1;
+
+const char *bench_disk_path = NULL;
+const char *bench_trace_path = NULL;
+bool bench_trace_backtrace = false;
+uint32_t bench_trace_period = 0;
+uint32_t bench_trace_freq = 0;
+FILE *bench_trace_file = NULL;
+uint32_t bench_trace_cycles = 0;
+uint64_t bench_trace_time = 0;
+uint64_t bench_trace_open_time = 0;
+lfs_emubd_sleep_t bench_read_sleep = 0.0;
+lfs_emubd_sleep_t bench_prog_sleep = 0.0;
+lfs_emubd_sleep_t bench_erase_sleep = 0.0;
+
+// this determines both the backtrace buffer and the trace printf buffer, if
+// trace ends up interleaved or truncated this may need to be increased
+#ifndef BENCH_TRACE_BACKTRACE_BUFFER_SIZE
+#define BENCH_TRACE_BACKTRACE_BUFFER_SIZE 8192
+#endif
+void *bench_trace_backtrace_buffer[
+ BENCH_TRACE_BACKTRACE_BUFFER_SIZE / sizeof(void*)];
+
+// trace printing
+void bench_trace(const char *fmt, ...) {
+ if (bench_trace_path) {
+ // sample at a specific period?
+ if (bench_trace_period) {
+ if (bench_trace_cycles % bench_trace_period != 0) {
+ bench_trace_cycles += 1;
+ return;
+ }
+ bench_trace_cycles += 1;
+ }
+
+ // sample at a specific frequency?
+ if (bench_trace_freq) {
+ struct timespec t;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000
+ + (uint64_t)t.tv_nsec;
+ if (now - bench_trace_time < (1000*1000*1000) / bench_trace_freq) {
+ return;
+ }
+ bench_trace_time = now;
+ }
+
+ if (!bench_trace_file) {
+ // Tracing output is heavy and trying to open every trace
+ // call is slow, so we only try to open the trace file every
+ // so often. Note this doesn't affect successfully opened files
+ struct timespec t;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000
+ + (uint64_t)t.tv_nsec;
+ if (now - bench_trace_open_time < 100*1000*1000) {
+ return;
+ }
+ bench_trace_open_time = now;
+
+ // try to open the trace file
+ int fd;
+ if (strcmp(bench_trace_path, "-") == 0) {
+ fd = dup(1);
+ if (fd < 0) {
+ return;
+ }
+ } else {
+ fd = open(
+ bench_trace_path,
+ O_WRONLY | O_CREAT | O_APPEND | O_NONBLOCK,
+ 0666);
+ if (fd < 0) {
+ return;
+ }
+ int err = fcntl(fd, F_SETFL, O_WRONLY | O_CREAT | O_APPEND);
+ assert(!err);
+ }
+
+ FILE *f = fdopen(fd, "a");
+ assert(f);
+ int err = setvbuf(f, NULL, _IOFBF,
+ BENCH_TRACE_BACKTRACE_BUFFER_SIZE);
+ assert(!err);
+ bench_trace_file = f;
+ }
+
+ // print trace
+ va_list va;
+ va_start(va, fmt);
+ int res = vfprintf(bench_trace_file, fmt, va);
+ va_end(va);
+ if (res < 0) {
+ fclose(bench_trace_file);
+ bench_trace_file = NULL;
+ return;
+ }
+
+ if (bench_trace_backtrace) {
+ // print backtrace
+ size_t count = backtrace(
+ bench_trace_backtrace_buffer,
+ BENCH_TRACE_BACKTRACE_BUFFER_SIZE);
+ // note we skip our own stack frame
+ for (size_t i = 1; i < count; i++) {
+ res = fprintf(bench_trace_file, "\tat %p\n",
+ bench_trace_backtrace_buffer[i]);
+ if (res < 0) {
+ fclose(bench_trace_file);
+ bench_trace_file = NULL;
+ return;
+ }
+ }
+ }
+
+ // flush immediately
+ fflush(bench_trace_file);
+ }
+}
+
+
+// bench prng
+uint32_t bench_prng(uint32_t *state) {
+ // A simple xorshift32 generator, easily reproducible. Keep in mind
+ // determinism is much more important than actual randomness here.
+ uint32_t x = *state;
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ *state = x;
+ return x;
+}
+
+
+// bench recording state
+static struct lfs_config *bench_cfg = NULL;
+static lfs_emubd_io_t bench_last_readed = 0;
+static lfs_emubd_io_t bench_last_proged = 0;
+static lfs_emubd_io_t bench_last_erased = 0;
+lfs_emubd_io_t bench_readed = 0;
+lfs_emubd_io_t bench_proged = 0;
+lfs_emubd_io_t bench_erased = 0;
+
+void bench_reset(void) {
+ bench_readed = 0;
+ bench_proged = 0;
+ bench_erased = 0;
+ bench_last_readed = 0;
+ bench_last_proged = 0;
+ bench_last_erased = 0;
+}
+
+void bench_start(void) {
+ assert(bench_cfg);
+ lfs_emubd_sio_t readed = lfs_emubd_readed(bench_cfg);
+ assert(readed >= 0);
+ lfs_emubd_sio_t proged = lfs_emubd_proged(bench_cfg);
+ assert(proged >= 0);
+ lfs_emubd_sio_t erased = lfs_emubd_erased(bench_cfg);
+ assert(erased >= 0);
+
+ bench_last_readed = readed;
+ bench_last_proged = proged;
+ bench_last_erased = erased;
+}
+
+void bench_stop(void) {
+ assert(bench_cfg);
+ lfs_emubd_sio_t readed = lfs_emubd_readed(bench_cfg);
+ assert(readed >= 0);
+ lfs_emubd_sio_t proged = lfs_emubd_proged(bench_cfg);
+ assert(proged >= 0);
+ lfs_emubd_sio_t erased = lfs_emubd_erased(bench_cfg);
+ assert(erased >= 0);
+
+ bench_readed += readed - bench_last_readed;
+ bench_proged += proged - bench_last_proged;
+ bench_erased += erased - bench_last_erased;
+}
+
+
+// encode our permutation into a reusable id
+static void perm_printid(
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ (void)suite;
+ // case[:permutation]
+ printf("%s:", case_->name);
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (bench_define_ispermutation(d)) {
+ leb16_print(d);
+ leb16_print(BENCH_DEFINE(d));
+ }
+ }
+}
+
+// a quick trie for keeping track of permutations we've seen
+typedef struct bench_seen {
+ struct bench_seen_branch *branches;
+ size_t branch_count;
+ size_t branch_capacity;
+} bench_seen_t;
+
+struct bench_seen_branch {
+ intmax_t define;
+ struct bench_seen branch;
+};
+
+bool bench_seen_insert(
+ bench_seen_t *seen,
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ (void)case_;
+ bool was_seen = true;
+
+ // use the currently set defines
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // treat unpermuted defines the same as 0
+ intmax_t define = bench_define_ispermutation(d) ? BENCH_DEFINE(d) : 0;
+
+ // already seen?
+ struct bench_seen_branch *branch = NULL;
+ for (size_t i = 0; i < seen->branch_count; i++) {
+ if (seen->branches[i].define == define) {
+ branch = &seen->branches[i];
+ break;
+ }
+ }
+
+ // need to create a new node
+ if (!branch) {
+ was_seen = false;
+ branch = mappend(
+ (void**)&seen->branches,
+ sizeof(struct bench_seen_branch),
+ &seen->branch_count,
+ &seen->branch_capacity);
+ branch->define = define;
+ branch->branch = (bench_seen_t){NULL, 0, 0};
+ }
+
+ seen = &branch->branch;
+ }
+
+ return was_seen;
+}
+
+void bench_seen_cleanup(bench_seen_t *seen) {
+ for (size_t i = 0; i < seen->branch_count; i++) {
+ bench_seen_cleanup(&seen->branches[i].branch);
+ }
+ free(seen->branches);
+}
+
+// iterate through permutations in a bench case
+static void case_forperm(
+ const struct bench_suite *suite,
+ const struct bench_case *case_,
+ const bench_define_t *defines,
+ size_t define_count,
+ void (*cb)(
+ void *data,
+ const struct bench_suite *suite,
+ const struct bench_case *case_),
+ void *data) {
+ // explicit permutation?
+ if (defines) {
+ bench_define_explicit(defines, define_count);
+
+ for (size_t v = 0; v < bench_override_define_permutations; v++) {
+ // define override permutation
+ bench_define_override(v);
+ bench_define_flush();
+
+ cb(data, suite, case_);
+ }
+
+ return;
+ }
+
+ bench_seen_t seen = {NULL, 0, 0};
+
+ for (size_t k = 0; k < case_->permutations; k++) {
+ // define permutation
+ bench_define_perm(suite, case_, k);
+
+ for (size_t v = 0; v < bench_override_define_permutations; v++) {
+ // define override permutation
+ bench_define_override(v);
+
+ for (size_t g = 0; g < bench_geometry_count; g++) {
+ // define geometry
+ bench_define_geometry(&bench_geometries[g]);
+ bench_define_flush();
+
+ // have we seen this permutation before?
+ bool was_seen = bench_seen_insert(&seen, suite, case_);
+ if (!(k == 0 && v == 0 && g == 0) && was_seen) {
+ continue;
+ }
+
+ cb(data, suite, case_);
+ }
+ }
+ }
+
+ bench_seen_cleanup(&seen);
+}
+
+
+// how many permutations are there actually in a bench case
+struct perm_count_state {
+ size_t total;
+ size_t filtered;
+};
+
+void perm_count(
+ void *data,
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ struct perm_count_state *state = data;
+ (void)suite;
+ (void)case_;
+
+ state->total += 1;
+
+ if (case_->filter && !case_->filter()) {
+ return;
+ }
+
+ state->filtered += 1;
+}
+
+
+// operations we can do
+static void summary(void) {
+ printf("%-23s %7s %7s %7s %11s\n",
+ "", "flags", "suites", "cases", "perms");
+ size_t suites = 0;
+ size_t cases = 0;
+ bench_flags_t flags = 0;
+ struct perm_count_state perms = {0, 0};
+
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ cases += 1;
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_count,
+ &perms);
+ }
+
+ suites += 1;
+ flags |= bench_suites[i].flags;
+ }
+ }
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (flags & BENCH_REENTRANT) ? "r" : "",
+ (!flags) ? "-" : "");
+ printf("%-23s %7s %7zu %7zu %11s\n",
+ "TOTAL",
+ flag_buf,
+ suites,
+ cases,
+ perm_buf);
+}
+
+static void list_suites(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ size_t len = strlen(bench_suites[i].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %7s %7s %11s\n",
+ name_width, "suite", "flags", "cases", "perms");
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ size_t cases = 0;
+ struct perm_count_state perms = {0, 0};
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ cases += 1;
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_count,
+ &perms);
+ }
+
+ // no benches found?
+ if (!cases) {
+ continue;
+ }
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (bench_suites[i].flags & BENCH_REENTRANT) ? "r" : "",
+ (!bench_suites[i].flags) ? "-" : "");
+ printf("%-*s %7s %7zu %11s\n",
+ name_width,
+ bench_suites[i].name,
+ flag_buf,
+ cases,
+ perm_buf);
+ }
+ }
+}
+
+static void list_cases(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ size_t len = strlen(bench_suites[i].cases[j].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %7s %11s\n", name_width, "case", "flags", "perms");
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ struct perm_count_state perms = {0, 0};
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_count,
+ &perms);
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (bench_suites[i].cases[j].flags & BENCH_REENTRANT)
+ ? "r" : "",
+ (!bench_suites[i].cases[j].flags)
+ ? "-" : "");
+ printf("%-*s %7s %11s\n",
+ name_width,
+ bench_suites[i].cases[j].name,
+ flag_buf,
+ perm_buf);
+ }
+ }
+ }
+}
+
+static void list_suite_paths(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ size_t len = strlen(bench_suites[i].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %s\n", name_width, "suite", "path");
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ size_t cases = 0;
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+
+ cases += 1;
+ }
+ }
+
+ // no benches found?
+ if (!cases) {
+ continue;
+ }
+
+ printf("%-*s %s\n",
+ name_width,
+ bench_suites[i].name,
+ bench_suites[i].path);
+ }
+ }
+}
+
+static void list_case_paths(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ size_t len = strlen(bench_suites[i].cases[j].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %s\n", name_width, "case", "path");
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ printf("%-*s %s\n",
+ name_width,
+ bench_suites[i].cases[j].name,
+ bench_suites[i].cases[j].path);
+ }
+ }
+ }
+}
+
+struct list_defines_define {
+ const char *name;
+ intmax_t *values;
+ size_t value_count;
+ size_t value_capacity;
+};
+
+struct list_defines_defines {
+ struct list_defines_define *defines;
+ size_t define_count;
+ size_t define_capacity;
+};
+
+static void list_defines_add(
+ struct list_defines_defines *defines,
+ size_t d) {
+ const char *name = bench_define_name(d);
+ intmax_t value = BENCH_DEFINE(d);
+
+ // define already in defines?
+ for (size_t i = 0; i < defines->define_count; i++) {
+ if (strcmp(defines->defines[i].name, name) == 0) {
+ // value already in values?
+ for (size_t j = 0; j < defines->defines[i].value_count; j++) {
+ if (defines->defines[i].values[j] == value) {
+ return;
+ }
+ }
+
+ *(intmax_t*)mappend(
+ (void**)&defines->defines[i].values,
+ sizeof(intmax_t),
+ &defines->defines[i].value_count,
+ &defines->defines[i].value_capacity) = value;
+
+ return;
+ }
+ }
+
+ // new define?
+ struct list_defines_define *define = mappend(
+ (void**)&defines->defines,
+ sizeof(struct list_defines_define),
+ &defines->define_count,
+ &defines->define_capacity);
+ define->name = name;
+ define->values = malloc(sizeof(intmax_t));
+ define->values[0] = value;
+ define->value_count = 1;
+ define->value_capacity = 1;
+}
+
+void perm_list_defines(
+ void *data,
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ struct list_defines_defines *defines = data;
+ (void)suite;
+ (void)case_;
+
+ // collect defines
+ for (size_t d = 0;
+ d < lfs_max(suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (d < BENCH_IMPLICIT_DEFINE_COUNT
+ || bench_define_ispermutation(d)) {
+ list_defines_add(defines, d);
+ }
+ }
+}
+
+void perm_list_permutation_defines(
+ void *data,
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ struct list_defines_defines *defines = data;
+ (void)suite;
+ (void)case_;
+
+ // collect permutation_defines
+ for (size_t d = 0;
+ d < lfs_max(suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (bench_define_ispermutation(d)) {
+ list_defines_add(defines, d);
+ }
+ }
+}
+
+extern const bench_geometry_t builtin_geometries[];
+
+static void list_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // add defines
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_list_defines,
+ &defines);
+ }
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+static void list_permutation_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // add permutation defines
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_list_permutation_defines,
+ &defines);
+ }
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+static void list_implicit_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // yes we do need to define a suite, this does a bit of bookeeping
+ // such as setting up the define cache
+ bench_define_suite(&(const struct bench_suite){0});
+
+ // make sure to include builtin geometries here
+ extern const bench_geometry_t builtin_geometries[];
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ bench_define_geometry(&builtin_geometries[g]);
+ bench_define_flush();
+
+ // add implicit defines
+ for (size_t d = 0; d < BENCH_IMPLICIT_DEFINE_COUNT; d++) {
+ list_defines_add(&defines, d);
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+
+
+// geometries to bench
+
+const bench_geometry_t builtin_geometries[] = {
+ {"default", {{0}, BENCH_CONST(16), BENCH_CONST(512), {0}}},
+ {"eeprom", {{0}, BENCH_CONST(1), BENCH_CONST(512), {0}}},
+ {"emmc", {{0}, {0}, BENCH_CONST(512), {0}}},
+ {"nor", {{0}, BENCH_CONST(1), BENCH_CONST(4096), {0}}},
+ {"nand", {{0}, BENCH_CONST(4096), BENCH_CONST(32768), {0}}},
+ {NULL, {{0}, {0}, {0}, {0}}},
+};
+
+const bench_geometry_t *bench_geometries = builtin_geometries;
+size_t bench_geometry_count = 5;
+
+static void list_geometries(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ size_t len = strlen(builtin_geometries[g].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ // yes we do need to define a suite, this does a bit of bookeeping
+ // such as setting up the define cache
+ bench_define_suite(&(const struct bench_suite){0});
+
+ printf("%-*s %7s %7s %7s %7s %11s\n",
+ name_width, "geometry", "read", "prog", "erase", "count", "size");
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ bench_define_geometry(&builtin_geometries[g]);
+ bench_define_flush();
+ printf("%-*s %7ju %7ju %7ju %7ju %11ju\n",
+ name_width,
+ builtin_geometries[g].name,
+ READ_SIZE,
+ PROG_SIZE,
+ BLOCK_SIZE,
+ BLOCK_COUNT,
+ BLOCK_SIZE*BLOCK_COUNT);
+ }
+}
+
+
+
+// global bench step count
+size_t bench_step = 0;
+
+void perm_run(
+ void *data,
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ (void)data;
+
+ // skip this step?
+ if (!(bench_step >= bench_step_start
+ && bench_step < bench_step_stop
+ && (bench_step-bench_step_start) % bench_step_step == 0)) {
+ bench_step += 1;
+ return;
+ }
+ bench_step += 1;
+
+ // filter?
+ if (case_->filter && !case_->filter()) {
+ printf("skipped ");
+ perm_printid(suite, case_);
+ printf("\n");
+ return;
+ }
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = bench_disk_path,
+ .read_sleep = bench_read_sleep,
+ .prog_sleep = bench_prog_sleep,
+ .erase_sleep = bench_erase_sleep,
+ };
+
+ int err = lfs_emubd_createcfg(&cfg, bench_disk_path, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the bench
+ bench_cfg = &cfg;
+ bench_reset();
+ printf("running ");
+ perm_printid(suite, case_);
+ printf("\n");
+
+ case_->run(&cfg);
+
+ printf("finished ");
+ perm_printid(suite, case_);
+ printf(" %"PRIu64" %"PRIu64" %"PRIu64,
+ bench_readed,
+ bench_proged,
+ bench_erased);
+ printf("\n");
+
+ // cleanup
+ err = lfs_emubd_destroy(&cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+}
+
+static void run(void) {
+ // ignore disconnected pipes
+ signal(SIGPIPE, SIG_IGN);
+
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_run,
+ NULL);
+ }
+ }
+ }
+}
+
+
+
+// option handling
+enum opt_flags {
+ OPT_HELP = 'h',
+ OPT_SUMMARY = 'Y',
+ OPT_LIST_SUITES = 'l',
+ OPT_LIST_CASES = 'L',
+ OPT_LIST_SUITE_PATHS = 1,
+ OPT_LIST_CASE_PATHS = 2,
+ OPT_LIST_DEFINES = 3,
+ OPT_LIST_PERMUTATION_DEFINES = 4,
+ OPT_LIST_IMPLICIT_DEFINES = 5,
+ OPT_LIST_GEOMETRIES = 6,
+ OPT_DEFINE = 'D',
+ OPT_GEOMETRY = 'G',
+ OPT_STEP = 's',
+ OPT_DISK = 'd',
+ OPT_TRACE = 't',
+ OPT_TRACE_BACKTRACE = 7,
+ OPT_TRACE_PERIOD = 8,
+ OPT_TRACE_FREQ = 9,
+ OPT_READ_SLEEP = 10,
+ OPT_PROG_SLEEP = 11,
+ OPT_ERASE_SLEEP = 12,
+};
+
+const char *short_opts = "hYlLD:G:s:d:t:";
+
+const struct option long_opts[] = {
+ {"help", no_argument, NULL, OPT_HELP},
+ {"summary", no_argument, NULL, OPT_SUMMARY},
+ {"list-suites", no_argument, NULL, OPT_LIST_SUITES},
+ {"list-cases", no_argument, NULL, OPT_LIST_CASES},
+ {"list-suite-paths", no_argument, NULL, OPT_LIST_SUITE_PATHS},
+ {"list-case-paths", no_argument, NULL, OPT_LIST_CASE_PATHS},
+ {"list-defines", no_argument, NULL, OPT_LIST_DEFINES},
+ {"list-permutation-defines",
+ no_argument, NULL, OPT_LIST_PERMUTATION_DEFINES},
+ {"list-implicit-defines",
+ no_argument, NULL, OPT_LIST_IMPLICIT_DEFINES},
+ {"list-geometries", no_argument, NULL, OPT_LIST_GEOMETRIES},
+ {"define", required_argument, NULL, OPT_DEFINE},
+ {"geometry", required_argument, NULL, OPT_GEOMETRY},
+ {"step", required_argument, NULL, OPT_STEP},
+ {"disk", required_argument, NULL, OPT_DISK},
+ {"trace", required_argument, NULL, OPT_TRACE},
+ {"trace-backtrace", no_argument, NULL, OPT_TRACE_BACKTRACE},
+ {"trace-period", required_argument, NULL, OPT_TRACE_PERIOD},
+ {"trace-freq", required_argument, NULL, OPT_TRACE_FREQ},
+ {"read-sleep", required_argument, NULL, OPT_READ_SLEEP},
+ {"prog-sleep", required_argument, NULL, OPT_PROG_SLEEP},
+ {"erase-sleep", required_argument, NULL, OPT_ERASE_SLEEP},
+ {NULL, 0, NULL, 0},
+};
+
+const char *const help_text[] = {
+ "Show this help message.",
+ "Show quick summary.",
+ "List bench suites.",
+ "List bench cases.",
+ "List the path for each bench suite.",
+ "List the path and line number for each bench case.",
+ "List all defines in this bench-runner.",
+ "List explicit defines in this bench-runner.",
+ "List implicit defines in this bench-runner.",
+ "List the available disk geometries.",
+ "Override a bench define.",
+ "Comma-separated list of disk geometries to bench.",
+ "Comma-separated range of bench permutations to run (start,stop,step).",
+ "Direct block device operations to this file.",
+ "Direct trace output to this file.",
+ "Include a backtrace with every trace statement.",
+ "Sample trace output at this period in cycles.",
+ "Sample trace output at this frequency in hz.",
+ "Artificial read delay in seconds.",
+ "Artificial prog delay in seconds.",
+ "Artificial erase delay in seconds.",
+};
+
+int main(int argc, char **argv) {
+ void (*op)(void) = run;
+
+ size_t bench_override_capacity = 0;
+ size_t bench_geometry_capacity = 0;
+ size_t bench_id_capacity = 0;
+
+ // parse options
+ while (true) {
+ int c = getopt_long(argc, argv, short_opts, long_opts, NULL);
+ switch (c) {
+ // generate help message
+ case OPT_HELP: {
+ printf("usage: %s [options] [bench_id]\n", argv[0]);
+ printf("\n");
+
+ printf("options:\n");
+ size_t i = 0;
+ while (long_opts[i].name) {
+ size_t indent;
+ if (long_opts[i].has_arg == no_argument) {
+ if (long_opts[i].val >= '0' && long_opts[i].val < 'z') {
+ indent = printf(" -%c, --%s ",
+ long_opts[i].val,
+ long_opts[i].name);
+ } else {
+ indent = printf(" --%s ",
+ long_opts[i].name);
+ }
+ } else {
+ if (long_opts[i].val >= '0' && long_opts[i].val < 'z') {
+ indent = printf(" -%c %s, --%s %s ",
+ long_opts[i].val,
+ long_opts[i].name,
+ long_opts[i].name,
+ long_opts[i].name);
+ } else {
+ indent = printf(" --%s %s ",
+ long_opts[i].name,
+ long_opts[i].name);
+ }
+ }
+
+ // a quick, hacky, byte-level method for text wrapping
+ size_t len = strlen(help_text[i]);
+ size_t j = 0;
+ if (indent < 24) {
+ printf("%*s %.80s\n",
+ (int)(24-1-indent),
+ "",
+ &help_text[i][j]);
+ j += 80;
+ } else {
+ printf("\n");
+ }
+
+ while (j < len) {
+ printf("%24s%.80s\n", "", &help_text[i][j]);
+ j += 80;
+ }
+
+ i += 1;
+ }
+
+ printf("\n");
+ exit(0);
+ }
+ // summary/list flags
+ case OPT_SUMMARY:
+ op = summary;
+ break;
+ case OPT_LIST_SUITES:
+ op = list_suites;
+ break;
+ case OPT_LIST_CASES:
+ op = list_cases;
+ break;
+ case OPT_LIST_SUITE_PATHS:
+ op = list_suite_paths;
+ break;
+ case OPT_LIST_CASE_PATHS:
+ op = list_case_paths;
+ break;
+ case OPT_LIST_DEFINES:
+ op = list_defines;
+ break;
+ case OPT_LIST_PERMUTATION_DEFINES:
+ op = list_permutation_defines;
+ break;
+ case OPT_LIST_IMPLICIT_DEFINES:
+ op = list_implicit_defines;
+ break;
+ case OPT_LIST_GEOMETRIES:
+ op = list_geometries;
+ break;
+ // configuration
+ case OPT_DEFINE: {
+ // allocate space
+ bench_override_t *override = mappend(
+ (void**)&bench_overrides,
+ sizeof(bench_override_t),
+ &bench_override_count,
+ &bench_override_capacity);
+
+ // parse into string key/intmax_t value, cannibalizing the
+ // arg in the process
+ char *sep = strchr(optarg, '=');
+ char *parsed = NULL;
+ if (!sep) {
+ goto invalid_define;
+ }
+ *sep = '\0';
+ override->name = optarg;
+ optarg = sep+1;
+
+ // parse comma-separated permutations
+ {
+ override->defines = NULL;
+ override->permutations = 0;
+ size_t override_capacity = 0;
+ while (true) {
+ optarg += strspn(optarg, " ");
+
+ if (strncmp(optarg, "range", strlen("range")) == 0) {
+ // range of values
+ optarg += strlen("range");
+ optarg += strspn(optarg, " ");
+ if (*optarg != '(') {
+ goto invalid_define;
+ }
+ optarg += 1;
+
+ intmax_t start = strtoumax(optarg, &parsed, 0);
+ intmax_t stop = -1;
+ intmax_t step = 1;
+ // allow empty string for start=0
+ if (parsed == optarg) {
+ start = 0;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != ')') {
+ goto invalid_define;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ stop = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=end
+ if (parsed == optarg) {
+ stop = -1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != ')') {
+ goto invalid_define;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ step = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=1
+ if (parsed == optarg) {
+ step = 1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ')') {
+ goto invalid_define;
+ }
+ }
+ } else {
+ // single value = stop only
+ stop = start;
+ start = 0;
+ }
+
+ if (*optarg != ')') {
+ goto invalid_define;
+ }
+ optarg += 1;
+
+ // calculate the range of values
+ assert(step != 0);
+ for (intmax_t i = start;
+ (step < 0)
+ ? i > stop
+ : (uintmax_t)i < (uintmax_t)stop;
+ i += step) {
+ *(intmax_t*)mappend(
+ (void**)&override->defines,
+ sizeof(intmax_t),
+ &override->permutations,
+ &override_capacity) = i;
+ }
+ } else if (*optarg != '\0') {
+ // single value
+ intmax_t define = strtoimax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ goto invalid_define;
+ }
+ optarg = parsed + strspn(parsed, " ");
+ *(intmax_t*)mappend(
+ (void**)&override->defines,
+ sizeof(intmax_t),
+ &override->permutations,
+ &override_capacity) = define;
+ } else {
+ break;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ }
+ }
+ }
+ assert(override->permutations > 0);
+ break;
+
+invalid_define:
+ fprintf(stderr, "error: invalid define: %s\n", optarg);
+ exit(-1);
+ }
+ case OPT_GEOMETRY: {
+ // reset our geometry scenarios
+ if (bench_geometry_capacity > 0) {
+ free((bench_geometry_t*)bench_geometries);
+ }
+ bench_geometries = NULL;
+ bench_geometry_count = 0;
+ bench_geometry_capacity = 0;
+
+ // parse the comma separated list of disk geometries
+ while (*optarg) {
+ // allocate space
+ bench_geometry_t *geometry = mappend(
+ (void**)&bench_geometries,
+ sizeof(bench_geometry_t),
+ &bench_geometry_count,
+ &bench_geometry_capacity);
+
+ // parse the disk geometry
+ optarg += strspn(optarg, " ");
+
+ // named disk geometry
+ size_t len = strcspn(optarg, " ,");
+ for (size_t i = 0; builtin_geometries[i].name; i++) {
+ if (len == strlen(builtin_geometries[i].name)
+ && memcmp(optarg,
+ builtin_geometries[i].name,
+ len) == 0) {
+ *geometry = builtin_geometries[i];
+ optarg += len;
+ goto geometry_next;
+ }
+ }
+
+ // comma-separated read/prog/erase/count
+ if (*optarg == '{') {
+ lfs_size_t sizes[4];
+ size_t count = 0;
+
+ char *s = optarg + 1;
+ while (count < 4) {
+ char *parsed = NULL;
+ sizes[count] = strtoumax(s, &parsed, 0);
+ count += 1;
+
+ s = parsed + strspn(parsed, " ");
+ if (*s == ',') {
+ s += 1;
+ continue;
+ } else if (*s == '}') {
+ s += 1;
+ break;
+ } else {
+ goto geometry_unknown;
+ }
+ }
+
+ // allow implicit r=p and p=e for common geometries
+ memset(geometry, 0, sizeof(bench_geometry_t));
+ if (count >= 3) {
+ geometry->defines[READ_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ geometry->defines[PROG_SIZE_i]
+ = BENCH_LIT(sizes[1]);
+ geometry->defines[BLOCK_SIZE_i]
+ = BENCH_LIT(sizes[2]);
+ } else if (count >= 2) {
+ geometry->defines[PROG_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ geometry->defines[BLOCK_SIZE_i]
+ = BENCH_LIT(sizes[1]);
+ } else {
+ geometry->defines[BLOCK_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ }
+ if (count >= 4) {
+ geometry->defines[BLOCK_COUNT_i]
+ = BENCH_LIT(sizes[3]);
+ }
+ optarg = s;
+ goto geometry_next;
+ }
+
+ // leb16-encoded read/prog/erase/count
+ if (*optarg == ':') {
+ lfs_size_t sizes[4];
+ size_t count = 0;
+
+ char *s = optarg + 1;
+ while (true) {
+ char *parsed = NULL;
+ uintmax_t x = leb16_parse(s, &parsed);
+ if (parsed == s || count >= 4) {
+ break;
+ }
+
+ sizes[count] = x;
+ count += 1;
+ s = parsed;
+ }
+
+ // allow implicit r=p and p=e for common geometries
+ memset(geometry, 0, sizeof(bench_geometry_t));
+ if (count >= 3) {
+ geometry->defines[READ_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ geometry->defines[PROG_SIZE_i]
+ = BENCH_LIT(sizes[1]);
+ geometry->defines[BLOCK_SIZE_i]
+ = BENCH_LIT(sizes[2]);
+ } else if (count >= 2) {
+ geometry->defines[PROG_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ geometry->defines[BLOCK_SIZE_i]
+ = BENCH_LIT(sizes[1]);
+ } else {
+ geometry->defines[BLOCK_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ }
+ if (count >= 4) {
+ geometry->defines[BLOCK_COUNT_i]
+ = BENCH_LIT(sizes[3]);
+ }
+ optarg = s;
+ goto geometry_next;
+ }
+
+geometry_unknown:
+ // unknown scenario?
+ fprintf(stderr, "error: unknown disk geometry: %s\n",
+ optarg);
+ exit(-1);
+
+geometry_next:
+ optarg += strspn(optarg, " ");
+ if (*optarg == ',') {
+ optarg += 1;
+ } else if (*optarg == '\0') {
+ break;
+ } else {
+ goto geometry_unknown;
+ }
+ }
+ break;
+ }
+ case OPT_STEP: {
+ char *parsed = NULL;
+ bench_step_start = strtoumax(optarg, &parsed, 0);
+ bench_step_stop = -1;
+ bench_step_step = 1;
+ // allow empty string for start=0
+ if (parsed == optarg) {
+ bench_step_start = 0;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != '\0') {
+ goto step_unknown;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ bench_step_stop = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=end
+ if (parsed == optarg) {
+ bench_step_stop = -1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != '\0') {
+ goto step_unknown;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ bench_step_step = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=1
+ if (parsed == optarg) {
+ bench_step_step = 1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != '\0') {
+ goto step_unknown;
+ }
+ }
+ } else {
+ // single value = stop only
+ bench_step_stop = bench_step_start;
+ bench_step_start = 0;
+ }
+
+ break;
+step_unknown:
+ fprintf(stderr, "error: invalid step: %s\n", optarg);
+ exit(-1);
+ }
+ case OPT_DISK:
+ bench_disk_path = optarg;
+ break;
+ case OPT_TRACE:
+ bench_trace_path = optarg;
+ break;
+ case OPT_TRACE_BACKTRACE:
+ bench_trace_backtrace = true;
+ break;
+ case OPT_TRACE_PERIOD: {
+ char *parsed = NULL;
+ bench_trace_period = strtoumax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid trace-period: %s\n", optarg);
+ exit(-1);
+ }
+ break;
+ }
+ case OPT_TRACE_FREQ: {
+ char *parsed = NULL;
+ bench_trace_freq = strtoumax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid trace-freq: %s\n", optarg);
+ exit(-1);
+ }
+ break;
+ }
+ case OPT_READ_SLEEP: {
+ char *parsed = NULL;
+ double read_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid read-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ bench_read_sleep = read_sleep*1.0e9;
+ break;
+ }
+ case OPT_PROG_SLEEP: {
+ char *parsed = NULL;
+ double prog_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid prog-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ bench_prog_sleep = prog_sleep*1.0e9;
+ break;
+ }
+ case OPT_ERASE_SLEEP: {
+ char *parsed = NULL;
+ double erase_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid erase-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ bench_erase_sleep = erase_sleep*1.0e9;
+ break;
+ }
+ // done parsing
+ case -1:
+ goto getopt_done;
+ // unknown arg, getopt prints a message for us
+ default:
+ exit(-1);
+ }
+ }
+getopt_done: ;
+
+ if (argc > optind) {
+ // reset our bench identifier list
+ bench_ids = NULL;
+ bench_id_count = 0;
+ bench_id_capacity = 0;
+ }
+
+ // parse bench identifier, if any, cannibalizing the arg in the process
+ for (; argc > optind; optind++) {
+ bench_define_t *defines = NULL;
+ size_t define_count = 0;
+
+ // parse name, can be suite or case
+ char *name = argv[optind];
+ char *defines_ = strchr(name, ':');
+ if (defines_) {
+ *defines_ = '\0';
+ defines_ += 1;
+ }
+
+ // remove optional path and .toml suffix
+ char *slash = strrchr(name, '/');
+ if (slash) {
+ name = slash+1;
+ }
+
+ size_t name_len = strlen(name);
+ if (name_len > 5 && strcmp(&name[name_len-5], ".toml") == 0) {
+ name[name_len-5] = '\0';
+ }
+
+ if (defines_) {
+ // parse defines
+ while (true) {
+ char *parsed;
+ size_t d = leb16_parse(defines_, &parsed);
+ intmax_t v = leb16_parse(parsed, &parsed);
+ if (parsed == defines_) {
+ break;
+ }
+ defines_ = parsed;
+
+ if (d >= define_count) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncount = 1 << lfs_npw2(d+1);
+ defines = realloc(defines,
+ ncount*sizeof(bench_define_t));
+ memset(defines+define_count, 0,
+ (ncount-define_count)*sizeof(bench_define_t));
+ define_count = ncount;
+ }
+ defines[d] = BENCH_LIT(v);
+ }
+ }
+
+ // append to identifier list
+ *(bench_id_t*)mappend(
+ (void**)&bench_ids,
+ sizeof(bench_id_t),
+ &bench_id_count,
+ &bench_id_capacity) = (bench_id_t){
+ .name = name,
+ .defines = defines,
+ .define_count = define_count,
+ };
+ }
+
+ // do the thing
+ op();
+
+ // cleanup (need to be done for valgrind benching)
+ bench_define_cleanup();
+ if (bench_overrides) {
+ for (size_t i = 0; i < bench_override_count; i++) {
+ free((void*)bench_overrides[i].defines);
+ }
+ free((void*)bench_overrides);
+ }
+ if (bench_geometry_capacity) {
+ free((void*)bench_geometries);
+ }
+ if (bench_id_capacity) {
+ for (size_t i = 0; i < bench_id_count; i++) {
+ free((void*)bench_ids[i].defines);
+ }
+ free((void*)bench_ids);
+ }
+}
diff --git a/runners/bench_runner.h b/runners/bench_runner.h
new file mode 100644
index 00000000..6296c091
--- /dev/null
+++ b/runners/bench_runner.h
@@ -0,0 +1,131 @@
+/*
+ * Runner for littlefs benchmarks
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef BENCH_RUNNER_H
+#define BENCH_RUNNER_H
+
+
+// override LFS_TRACE
+void bench_trace(const char *fmt, ...);
+
+#define LFS_TRACE_(fmt, ...) \
+ bench_trace("%s:%d:trace: " fmt "%s\n", \
+ __FILE__, \
+ __LINE__, \
+ __VA_ARGS__)
+#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+#define LFS_EMUBD_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+
+// provide BENCH_START/BENCH_STOP macros
+void bench_start(void);
+void bench_stop(void);
+
+#define BENCH_START() bench_start()
+#define BENCH_STOP() bench_stop()
+
+
+// note these are indirectly included in any generated files
+#include "bd/lfs_emubd.h"
+#include
+
+// give source a chance to define feature macros
+#undef _FEATURES_H
+#undef _STDIO_H
+
+
+// generated bench configurations
+struct lfs_config;
+
+enum bench_flags {
+ BENCH_REENTRANT = 0x1,
+};
+typedef uint8_t bench_flags_t;
+
+typedef struct bench_define {
+ intmax_t (*cb)(void *data);
+ void *data;
+} bench_define_t;
+
+struct bench_case {
+ const char *name;
+ const char *path;
+ bench_flags_t flags;
+ size_t permutations;
+
+ const bench_define_t *defines;
+
+ bool (*filter)(void);
+ void (*run)(struct lfs_config *cfg);
+};
+
+struct bench_suite {
+ const char *name;
+ const char *path;
+ bench_flags_t flags;
+
+ const char *const *define_names;
+ size_t define_count;
+
+ const struct bench_case *cases;
+ size_t case_count;
+};
+
+
+// deterministic prng for pseudo-randomness in benches
+uint32_t bench_prng(uint32_t *state);
+
+#define BENCH_PRNG(state) bench_prng(state)
+
+
+// access generated bench defines
+intmax_t bench_define(size_t define);
+
+#define BENCH_DEFINE(i) bench_define(i)
+
+// a few preconfigured defines that control how benches run
+
+#define READ_SIZE_i 0
+#define PROG_SIZE_i 1
+#define BLOCK_SIZE_i 2
+#define BLOCK_COUNT_i 3
+#define CACHE_SIZE_i 4
+#define LOOKAHEAD_SIZE_i 5
+#define BLOCK_CYCLES_i 6
+#define ERASE_VALUE_i 7
+#define ERASE_CYCLES_i 8
+#define BADBLOCK_BEHAVIOR_i 9
+#define POWERLOSS_BEHAVIOR_i 10
+
+#define READ_SIZE bench_define(READ_SIZE_i)
+#define PROG_SIZE bench_define(PROG_SIZE_i)
+#define BLOCK_SIZE bench_define(BLOCK_SIZE_i)
+#define BLOCK_COUNT bench_define(BLOCK_COUNT_i)
+#define CACHE_SIZE bench_define(CACHE_SIZE_i)
+#define LOOKAHEAD_SIZE bench_define(LOOKAHEAD_SIZE_i)
+#define BLOCK_CYCLES bench_define(BLOCK_CYCLES_i)
+#define ERASE_VALUE bench_define(ERASE_VALUE_i)
+#define ERASE_CYCLES bench_define(ERASE_CYCLES_i)
+#define BADBLOCK_BEHAVIOR bench_define(BADBLOCK_BEHAVIOR_i)
+#define POWERLOSS_BEHAVIOR bench_define(POWERLOSS_BEHAVIOR_i)
+
+#define BENCH_IMPLICIT_DEFINES \
+ BENCH_DEF(READ_SIZE, PROG_SIZE) \
+ BENCH_DEF(PROG_SIZE, BLOCK_SIZE) \
+ BENCH_DEF(BLOCK_SIZE, 0) \
+ BENCH_DEF(BLOCK_COUNT, (1024*1024)/BLOCK_SIZE) \
+ BENCH_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
+ BENCH_DEF(LOOKAHEAD_SIZE, 16) \
+ BENCH_DEF(BLOCK_CYCLES, -1) \
+ BENCH_DEF(ERASE_VALUE, 0xff) \
+ BENCH_DEF(ERASE_CYCLES, 0) \
+ BENCH_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \
+ BENCH_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP)
+
+#define BENCH_GEOMETRY_DEFINE_COUNT 4
+#define BENCH_IMPLICIT_DEFINE_COUNT 11
+
+
+#endif
diff --git a/runners/test_runner.c b/runners/test_runner.c
new file mode 100644
index 00000000..abc867c2
--- /dev/null
+++ b/runners/test_runner.c
@@ -0,0 +1,2763 @@
+/*
+ * Runner for littlefs tests
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199309L
+#endif
+
+#include "runners/test_runner.h"
+#include "bd/lfs_emubd.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+// some helpers
+
+// append to an array with amortized doubling
+void *mappend(void **p,
+ size_t size,
+ size_t *count,
+ size_t *capacity) {
+ uint8_t *p_ = *p;
+ size_t count_ = *count;
+ size_t capacity_ = *capacity;
+
+ count_ += 1;
+ if (count_ > capacity_) {
+ capacity_ = (2*capacity_ < 4) ? 4 : 2*capacity_;
+
+ p_ = realloc(p_, capacity_*size);
+ if (!p_) {
+ return NULL;
+ }
+ }
+
+ *p = p_;
+ *count = count_;
+ *capacity = capacity_;
+ return &p_[(count_-1)*size];
+}
+
+// a quick self-terminating text-safe varint scheme
+static void leb16_print(uintmax_t x) {
+ // allow 'w' to indicate negative numbers
+ if ((intmax_t)x < 0) {
+ printf("w");
+ x = -x;
+ }
+
+ while (true) {
+ char nibble = (x & 0xf) | (x > 0xf ? 0x10 : 0);
+ printf("%c", (nibble < 10) ? '0'+nibble : 'a'+nibble-10);
+ if (x <= 0xf) {
+ break;
+ }
+ x >>= 4;
+ }
+}
+
+static uintmax_t leb16_parse(const char *s, char **tail) {
+ bool neg = false;
+ uintmax_t x = 0;
+ if (tail) {
+ *tail = (char*)s;
+ }
+
+ if (s[0] == 'w') {
+ neg = true;
+ s = s+1;
+ }
+
+ size_t i = 0;
+ while (true) {
+ uintmax_t nibble = s[i];
+ if (nibble >= '0' && nibble <= '9') {
+ nibble = nibble - '0';
+ } else if (nibble >= 'a' && nibble <= 'v') {
+ nibble = nibble - 'a' + 10;
+ } else {
+ // invalid?
+ return 0;
+ }
+
+ x |= (nibble & 0xf) << (4*i);
+ i += 1;
+ if (!(nibble & 0x10)) {
+ s = s + i;
+ break;
+ }
+ }
+
+ if (tail) {
+ *tail = (char*)s;
+ }
+ return neg ? -x : x;
+}
+
+
+
+// test_runner types
+
+typedef struct test_geometry {
+ const char *name;
+ test_define_t defines[TEST_GEOMETRY_DEFINE_COUNT];
+} test_geometry_t;
+
+typedef struct test_powerloss {
+ const char *name;
+ void (*run)(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_);
+ const lfs_emubd_powercycles_t *cycles;
+ size_t cycle_count;
+} test_powerloss_t;
+
+typedef struct test_id {
+ const char *name;
+ const test_define_t *defines;
+ size_t define_count;
+ const lfs_emubd_powercycles_t *cycles;
+ size_t cycle_count;
+} test_id_t;
+
+
+// test suites are linked into a custom ld section
+extern struct test_suite __start__test_suites;
+extern struct test_suite __stop__test_suites;
+
+const struct test_suite *test_suites = &__start__test_suites;
+#define TEST_SUITE_COUNT \
+ ((size_t)(&__stop__test_suites - &__start__test_suites))
+
+
+// test define management
+typedef struct test_define_map {
+ const test_define_t *defines;
+ size_t count;
+} test_define_map_t;
+
+typedef struct test_define_names {
+ const char *const *names;
+ size_t count;
+} test_define_names_t;
+
+intmax_t test_define_lit(void *data) {
+ return (intptr_t)data;
+}
+
+#define TEST_CONST(x) {test_define_lit, (void*)(uintptr_t)(x)}
+#define TEST_LIT(x) ((test_define_t)TEST_CONST(x))
+
+
+#define TEST_DEF(k, v) \
+ intmax_t test_define_##k(void *data) { \
+ (void)data; \
+ return v; \
+ }
+
+ TEST_IMPLICIT_DEFINES
+#undef TEST_DEF
+
+#define TEST_DEFINE_MAP_OVERRIDE 0
+#define TEST_DEFINE_MAP_EXPLICIT 1
+#define TEST_DEFINE_MAP_PERMUTATION 2
+#define TEST_DEFINE_MAP_GEOMETRY 3
+#define TEST_DEFINE_MAP_IMPLICIT 4
+#define TEST_DEFINE_MAP_COUNT 5
+
+test_define_map_t test_define_maps[TEST_DEFINE_MAP_COUNT] = {
+ [TEST_DEFINE_MAP_IMPLICIT] = {
+ (const test_define_t[TEST_IMPLICIT_DEFINE_COUNT]) {
+ #define TEST_DEF(k, v) \
+ [k##_i] = {test_define_##k, NULL},
+
+ TEST_IMPLICIT_DEFINES
+ #undef TEST_DEF
+ },
+ TEST_IMPLICIT_DEFINE_COUNT,
+ },
+};
+
+#define TEST_DEFINE_NAMES_SUITE 0
+#define TEST_DEFINE_NAMES_IMPLICIT 1
+#define TEST_DEFINE_NAMES_COUNT 2
+
+test_define_names_t test_define_names[TEST_DEFINE_NAMES_COUNT] = {
+ [TEST_DEFINE_NAMES_IMPLICIT] = {
+ (const char *const[TEST_IMPLICIT_DEFINE_COUNT]){
+ #define TEST_DEF(k, v) \
+ [k##_i] = #k,
+
+ TEST_IMPLICIT_DEFINES
+ #undef TEST_DEF
+ },
+ TEST_IMPLICIT_DEFINE_COUNT,
+ },
+};
+
+intmax_t *test_define_cache;
+size_t test_define_cache_count;
+unsigned *test_define_cache_mask;
+
+const char *test_define_name(size_t define) {
+ // lookup in our test names
+ for (size_t i = 0; i < TEST_DEFINE_NAMES_COUNT; i++) {
+ if (define < test_define_names[i].count
+ && test_define_names[i].names
+ && test_define_names[i].names[define]) {
+ return test_define_names[i].names[define];
+ }
+ }
+
+ return NULL;
+}
+
+bool test_define_ispermutation(size_t define) {
+ // is this define specific to the permutation?
+ for (size_t i = 0; i < TEST_DEFINE_MAP_IMPLICIT; i++) {
+ if (define < test_define_maps[i].count
+ && test_define_maps[i].defines[define].cb) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+intmax_t test_define(size_t define) {
+ // is the define in our cache?
+ if (define < test_define_cache_count
+ && (test_define_cache_mask[define/(8*sizeof(unsigned))]
+ & (1 << (define%(8*sizeof(unsigned)))))) {
+ return test_define_cache[define];
+ }
+
+ // lookup in our test defines
+ for (size_t i = 0; i < TEST_DEFINE_MAP_COUNT; i++) {
+ if (define < test_define_maps[i].count
+ && test_define_maps[i].defines[define].cb) {
+ intmax_t v = test_define_maps[i].defines[define].cb(
+ test_define_maps[i].defines[define].data);
+
+ // insert into cache!
+ test_define_cache[define] = v;
+ test_define_cache_mask[define / (8*sizeof(unsigned))]
+ |= 1 << (define%(8*sizeof(unsigned)));
+
+ return v;
+ }
+ }
+
+ return 0;
+
+ // not found?
+ const char *name = test_define_name(define);
+ fprintf(stderr, "error: undefined define %s (%zd)\n",
+ name ? name : "(unknown)",
+ define);
+ assert(false);
+ exit(-1);
+}
+
+void test_define_flush(void) {
+ // clear cache between permutations
+ memset(test_define_cache_mask, 0,
+ sizeof(unsigned)*(
+ (test_define_cache_count+(8*sizeof(unsigned))-1)
+ / (8*sizeof(unsigned))));
+}
+
+// geometry updates
+const test_geometry_t *test_geometry = NULL;
+
+void test_define_geometry(const test_geometry_t *geometry) {
+ test_define_maps[TEST_DEFINE_MAP_GEOMETRY] = (test_define_map_t){
+ geometry->defines, TEST_GEOMETRY_DEFINE_COUNT};
+}
+
+// override updates
+typedef struct test_override {
+ const char *name;
+ const intmax_t *defines;
+ size_t permutations;
+} test_override_t;
+
+const test_override_t *test_overrides = NULL;
+size_t test_override_count = 0;
+
+test_define_t *test_override_defines = NULL;
+size_t test_override_define_count = 0;
+size_t test_override_define_permutations = 1;
+size_t test_override_define_capacity = 0;
+
+// suite/perm updates
+void test_define_suite(const struct test_suite *suite) {
+ test_define_names[TEST_DEFINE_NAMES_SUITE] = (test_define_names_t){
+ suite->define_names, suite->define_count};
+
+ // make sure our cache is large enough
+ if (lfs_max(suite->define_count, TEST_IMPLICIT_DEFINE_COUNT)
+ > test_define_cache_count) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncount = 1 << lfs_npw2(
+ lfs_max(suite->define_count, TEST_IMPLICIT_DEFINE_COUNT));
+ test_define_cache = realloc(test_define_cache, ncount*sizeof(intmax_t));
+ test_define_cache_mask = realloc(test_define_cache_mask,
+ sizeof(unsigned)*(
+ (ncount+(8*sizeof(unsigned))-1)
+ / (8*sizeof(unsigned))));
+ test_define_cache_count = ncount;
+ }
+
+ // map any overrides
+ if (test_override_count > 0) {
+ // first figure out the total size of override permutations
+ size_t count = 0;
+ size_t permutations = 1;
+ for (size_t i = 0; i < test_override_count; i++) {
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // define name match?
+ const char *name = test_define_name(d);
+ if (name && strcmp(name, test_overrides[i].name) == 0) {
+ count = lfs_max(count, d+1);
+ permutations *= test_overrides[i].permutations;
+ break;
+ }
+ }
+ }
+ test_override_define_count = count;
+ test_override_define_permutations = permutations;
+
+ // make sure our override arrays are big enough
+ if (count * permutations > test_override_define_capacity) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncapacity = 1 << lfs_npw2(count * permutations);
+ test_override_defines = realloc(
+ test_override_defines,
+ sizeof(test_define_t)*ncapacity);
+ test_override_define_capacity = ncapacity;
+ }
+
+ // zero unoverridden defines
+ memset(test_override_defines, 0,
+ sizeof(test_define_t) * count * permutations);
+
+ // compute permutations
+ size_t p = 1;
+ for (size_t i = 0; i < test_override_count; i++) {
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // define name match?
+ const char *name = test_define_name(d);
+ if (name && strcmp(name, test_overrides[i].name) == 0) {
+ // scatter the define permutations based on already
+ // seen permutations
+ for (size_t j = 0; j < permutations; j++) {
+ test_override_defines[j*count + d] = TEST_LIT(
+ test_overrides[i].defines[(j/p)
+ % test_overrides[i].permutations]);
+ }
+
+ // keep track of how many permutations we've seen so far
+ p *= test_overrides[i].permutations;
+ break;
+ }
+ }
+ }
+ }
+}
+
+void test_define_perm(
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ size_t perm) {
+ if (case_->defines) {
+ test_define_maps[TEST_DEFINE_MAP_PERMUTATION] = (test_define_map_t){
+ case_->defines + perm*suite->define_count,
+ suite->define_count};
+ } else {
+ test_define_maps[TEST_DEFINE_MAP_PERMUTATION] = (test_define_map_t){
+ NULL, 0};
+ }
+}
+
+void test_define_override(size_t perm) {
+ test_define_maps[TEST_DEFINE_MAP_OVERRIDE] = (test_define_map_t){
+ test_override_defines + perm*test_override_define_count,
+ test_override_define_count};
+}
+
+void test_define_explicit(
+ const test_define_t *defines,
+ size_t define_count) {
+ test_define_maps[TEST_DEFINE_MAP_EXPLICIT] = (test_define_map_t){
+ defines, define_count};
+}
+
+void test_define_cleanup(void) {
+ // test define management can allocate a few things
+ free(test_define_cache);
+ free(test_define_cache_mask);
+ free(test_override_defines);
+}
+
+
+
+// test state
+extern const test_geometry_t *test_geometries;
+extern size_t test_geometry_count;
+
+extern const test_powerloss_t *test_powerlosses;
+extern size_t test_powerloss_count;
+
+const test_id_t *test_ids = (const test_id_t[]) {
+ {NULL, NULL, 0, NULL, 0},
+};
+size_t test_id_count = 1;
+
+size_t test_step_start = 0;
+size_t test_step_stop = -1;
+size_t test_step_step = 1;
+
+const char *test_disk_path = NULL;
+const char *test_trace_path = NULL;
+bool test_trace_backtrace = false;
+uint32_t test_trace_period = 0;
+uint32_t test_trace_freq = 0;
+FILE *test_trace_file = NULL;
+uint32_t test_trace_cycles = 0;
+uint64_t test_trace_time = 0;
+uint64_t test_trace_open_time = 0;
+lfs_emubd_sleep_t test_read_sleep = 0.0;
+lfs_emubd_sleep_t test_prog_sleep = 0.0;
+lfs_emubd_sleep_t test_erase_sleep = 0.0;
+
+// this determines both the backtrace buffer and the trace printf buffer, if
+// trace ends up interleaved or truncated this may need to be increased
+#ifndef TEST_TRACE_BACKTRACE_BUFFER_SIZE
+#define TEST_TRACE_BACKTRACE_BUFFER_SIZE 8192
+#endif
+void *test_trace_backtrace_buffer[
+ TEST_TRACE_BACKTRACE_BUFFER_SIZE / sizeof(void*)];
+
+// trace printing
+void test_trace(const char *fmt, ...) {
+ if (test_trace_path) {
+ // sample at a specific period?
+ if (test_trace_period) {
+ if (test_trace_cycles % test_trace_period != 0) {
+ test_trace_cycles += 1;
+ return;
+ }
+ test_trace_cycles += 1;
+ }
+
+ // sample at a specific frequency?
+ if (test_trace_freq) {
+ struct timespec t;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000
+ + (uint64_t)t.tv_nsec;
+ if (now - test_trace_time < (1000*1000*1000) / test_trace_freq) {
+ return;
+ }
+ test_trace_time = now;
+ }
+
+ if (!test_trace_file) {
+ // Tracing output is heavy and trying to open every trace
+ // call is slow, so we only try to open the trace file every
+ // so often. Note this doesn't affect successfully opened files
+ struct timespec t;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000
+ + (uint64_t)t.tv_nsec;
+ if (now - test_trace_open_time < 100*1000*1000) {
+ return;
+ }
+ test_trace_open_time = now;
+
+ // try to open the trace file
+ int fd;
+ if (strcmp(test_trace_path, "-") == 0) {
+ fd = dup(1);
+ if (fd < 0) {
+ return;
+ }
+ } else {
+ fd = open(
+ test_trace_path,
+ O_WRONLY | O_CREAT | O_APPEND | O_NONBLOCK,
+ 0666);
+ if (fd < 0) {
+ return;
+ }
+ int err = fcntl(fd, F_SETFL, O_WRONLY | O_CREAT | O_APPEND);
+ assert(!err);
+ }
+
+ FILE *f = fdopen(fd, "a");
+ assert(f);
+ int err = setvbuf(f, NULL, _IOFBF,
+ TEST_TRACE_BACKTRACE_BUFFER_SIZE);
+ assert(!err);
+ test_trace_file = f;
+ }
+
+ // print trace
+ va_list va;
+ va_start(va, fmt);
+ int res = vfprintf(test_trace_file, fmt, va);
+ va_end(va);
+ if (res < 0) {
+ fclose(test_trace_file);
+ test_trace_file = NULL;
+ return;
+ }
+
+ if (test_trace_backtrace) {
+ // print backtrace
+ size_t count = backtrace(
+ test_trace_backtrace_buffer,
+ TEST_TRACE_BACKTRACE_BUFFER_SIZE);
+ // note we skip our own stack frame
+ for (size_t i = 1; i < count; i++) {
+ res = fprintf(test_trace_file, "\tat %p\n",
+ test_trace_backtrace_buffer[i]);
+ if (res < 0) {
+ fclose(test_trace_file);
+ test_trace_file = NULL;
+ return;
+ }
+ }
+ }
+
+ // flush immediately
+ fflush(test_trace_file);
+ }
+}
+
+
+// test prng
+uint32_t test_prng(uint32_t *state) {
+ // A simple xorshift32 generator, easily reproducible. Keep in mind
+ // determinism is much more important than actual randomness here.
+ uint32_t x = *state;
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ *state = x;
+ return x;
+}
+
+
+// encode our permutation into a reusable id
+static void perm_printid(
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count) {
+ (void)suite;
+ // case[:permutation[:powercycles]]
+ printf("%s:", case_->name);
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (test_define_ispermutation(d)) {
+ leb16_print(d);
+ leb16_print(TEST_DEFINE(d));
+ }
+ }
+
+ // only print power-cycles if any occured
+ if (cycles) {
+ printf(":");
+ for (size_t i = 0; i < cycle_count; i++) {
+ leb16_print(cycles[i]);
+ }
+ }
+}
+
+
+// a quick trie for keeping track of permutations we've seen
+typedef struct test_seen {
+ struct test_seen_branch *branches;
+ size_t branch_count;
+ size_t branch_capacity;
+} test_seen_t;
+
+struct test_seen_branch {
+ intmax_t define;
+ struct test_seen branch;
+};
+
+bool test_seen_insert(
+ test_seen_t *seen,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)case_;
+ bool was_seen = true;
+
+ // use the currently set defines
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // treat unpermuted defines the same as 0
+ intmax_t define = test_define_ispermutation(d) ? TEST_DEFINE(d) : 0;
+
+ // already seen?
+ struct test_seen_branch *branch = NULL;
+ for (size_t i = 0; i < seen->branch_count; i++) {
+ if (seen->branches[i].define == define) {
+ branch = &seen->branches[i];
+ break;
+ }
+ }
+
+ // need to create a new node
+ if (!branch) {
+ was_seen = false;
+ branch = mappend(
+ (void**)&seen->branches,
+ sizeof(struct test_seen_branch),
+ &seen->branch_count,
+ &seen->branch_capacity);
+ branch->define = define;
+ branch->branch = (test_seen_t){NULL, 0, 0};
+ }
+
+ seen = &branch->branch;
+ }
+
+ return was_seen;
+}
+
+void test_seen_cleanup(test_seen_t *seen) {
+ for (size_t i = 0; i < seen->branch_count; i++) {
+ test_seen_cleanup(&seen->branches[i].branch);
+ }
+ free(seen->branches);
+}
+
+static void run_powerloss_none(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_);
+static void run_powerloss_cycles(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_);
+
+// iterate through permutations in a test case
+static void case_forperm(
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_define_t *defines,
+ size_t define_count,
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ void (*cb)(
+ void *data,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_powerloss_t *powerloss),
+ void *data) {
+ // explicit permutation?
+ if (defines) {
+ test_define_explicit(defines, define_count);
+
+ for (size_t v = 0; v < test_override_define_permutations; v++) {
+ // define override permutation
+ test_define_override(v);
+ test_define_flush();
+
+ // explicit powerloss cycles?
+ if (cycles) {
+ cb(data, suite, case_, &(test_powerloss_t){
+ .run=run_powerloss_cycles,
+ .cycles=cycles,
+ .cycle_count=cycle_count});
+ } else {
+ for (size_t p = 0; p < test_powerloss_count; p++) {
+ // skip non-reentrant tests when powerloss testing
+ if (test_powerlosses[p].run != run_powerloss_none
+ && !(case_->flags & TEST_REENTRANT)) {
+ continue;
+ }
+
+ cb(data, suite, case_, &test_powerlosses[p]);
+ }
+ }
+ }
+
+ return;
+ }
+
+ test_seen_t seen = {NULL, 0, 0};
+
+ for (size_t k = 0; k < case_->permutations; k++) {
+ // define permutation
+ test_define_perm(suite, case_, k);
+
+ for (size_t v = 0; v < test_override_define_permutations; v++) {
+ // define override permutation
+ test_define_override(v);
+
+ for (size_t g = 0; g < test_geometry_count; g++) {
+ // define geometry
+ test_define_geometry(&test_geometries[g]);
+ test_define_flush();
+
+ // have we seen this permutation before?
+ bool was_seen = test_seen_insert(&seen, suite, case_);
+ if (!(k == 0 && v == 0 && g == 0) && was_seen) {
+ continue;
+ }
+
+ if (cycles) {
+ cb(data, suite, case_, &(test_powerloss_t){
+ .run=run_powerloss_cycles,
+ .cycles=cycles,
+ .cycle_count=cycle_count});
+ } else {
+ for (size_t p = 0; p < test_powerloss_count; p++) {
+ // skip non-reentrant tests when powerloss testing
+ if (test_powerlosses[p].run != run_powerloss_none
+ && !(case_->flags & TEST_REENTRANT)) {
+ continue;
+ }
+
+ cb(data, suite, case_, &test_powerlosses[p]);
+ }
+ }
+ }
+ }
+ }
+
+ test_seen_cleanup(&seen);
+}
+
+
+// how many permutations are there actually in a test case
+struct perm_count_state {
+ size_t total;
+ size_t filtered;
+};
+
+void perm_count(
+ void *data,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_powerloss_t *powerloss) {
+ struct perm_count_state *state = data;
+ (void)suite;
+ (void)case_;
+ (void)powerloss;
+
+ state->total += 1;
+
+ if (case_->filter && !case_->filter()) {
+ return;
+ }
+
+ state->filtered += 1;
+}
+
+
+// operations we can do
+static void summary(void) {
+ printf("%-23s %7s %7s %7s %11s\n",
+ "", "flags", "suites", "cases", "perms");
+ size_t suites = 0;
+ size_t cases = 0;
+ test_flags_t flags = 0;
+ struct perm_count_state perms = {0, 0};
+
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ cases += 1;
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_count,
+ &perms);
+ }
+
+ suites += 1;
+ flags |= test_suites[i].flags;
+ }
+ }
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (flags & TEST_REENTRANT) ? "r" : "",
+ (!flags) ? "-" : "");
+ printf("%-23s %7s %7zu %7zu %11s\n",
+ "TOTAL",
+ flag_buf,
+ suites,
+ cases,
+ perm_buf);
+}
+
+static void list_suites(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ size_t len = strlen(test_suites[i].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %7s %7s %11s\n",
+ name_width, "suite", "flags", "cases", "perms");
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ size_t cases = 0;
+ struct perm_count_state perms = {0, 0};
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ cases += 1;
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_count,
+ &perms);
+ }
+
+ // no tests found?
+ if (!cases) {
+ continue;
+ }
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (test_suites[i].flags & TEST_REENTRANT) ? "r" : "",
+ (!test_suites[i].flags) ? "-" : "");
+ printf("%-*s %7s %7zu %11s\n",
+ name_width,
+ test_suites[i].name,
+ flag_buf,
+ cases,
+ perm_buf);
+ }
+ }
+}
+
+static void list_cases(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ size_t len = strlen(test_suites[i].cases[j].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %7s %11s\n", name_width, "case", "flags", "perms");
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ struct perm_count_state perms = {0, 0};
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_count,
+ &perms);
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (test_suites[i].cases[j].flags & TEST_REENTRANT)
+ ? "r" : "",
+ (!test_suites[i].cases[j].flags)
+ ? "-" : "");
+ printf("%-*s %7s %11s\n",
+ name_width,
+ test_suites[i].cases[j].name,
+ flag_buf,
+ perm_buf);
+ }
+ }
+ }
+}
+
+static void list_suite_paths(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ size_t len = strlen(test_suites[i].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %s\n", name_width, "suite", "path");
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ size_t cases = 0;
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ cases += 1;
+ }
+
+ // no tests found?
+ if (!cases) {
+ continue;
+ }
+
+ printf("%-*s %s\n",
+ name_width,
+ test_suites[i].name,
+ test_suites[i].path);
+ }
+ }
+}
+
+static void list_case_paths(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ size_t len = strlen(test_suites[i].cases[j].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %s\n", name_width, "case", "path");
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ printf("%-*s %s\n",
+ name_width,
+ test_suites[i].cases[j].name,
+ test_suites[i].cases[j].path);
+ }
+ }
+ }
+}
+
+struct list_defines_define {
+ const char *name;
+ intmax_t *values;
+ size_t value_count;
+ size_t value_capacity;
+};
+
+struct list_defines_defines {
+ struct list_defines_define *defines;
+ size_t define_count;
+ size_t define_capacity;
+};
+
+static void list_defines_add(
+ struct list_defines_defines *defines,
+ size_t d) {
+ const char *name = test_define_name(d);
+ intmax_t value = TEST_DEFINE(d);
+
+ // define already in defines?
+ for (size_t i = 0; i < defines->define_count; i++) {
+ if (strcmp(defines->defines[i].name, name) == 0) {
+ // value already in values?
+ for (size_t j = 0; j < defines->defines[i].value_count; j++) {
+ if (defines->defines[i].values[j] == value) {
+ return;
+ }
+ }
+
+ *(intmax_t*)mappend(
+ (void**)&defines->defines[i].values,
+ sizeof(intmax_t),
+ &defines->defines[i].value_count,
+ &defines->defines[i].value_capacity) = value;
+
+ return;
+ }
+ }
+
+ // new define?
+ struct list_defines_define *define = mappend(
+ (void**)&defines->defines,
+ sizeof(struct list_defines_define),
+ &defines->define_count,
+ &defines->define_capacity);
+ define->name = name;
+ define->values = malloc(sizeof(intmax_t));
+ define->values[0] = value;
+ define->value_count = 1;
+ define->value_capacity = 1;
+}
+
+void perm_list_defines(
+ void *data,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_powerloss_t *powerloss) {
+ struct list_defines_defines *defines = data;
+ (void)suite;
+ (void)case_;
+ (void)powerloss;
+
+ // collect defines
+ for (size_t d = 0;
+ d < lfs_max(suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (d < TEST_IMPLICIT_DEFINE_COUNT
+ || test_define_ispermutation(d)) {
+ list_defines_add(defines, d);
+ }
+ }
+}
+
+void perm_list_permutation_defines(
+ void *data,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_powerloss_t *powerloss) {
+ struct list_defines_defines *defines = data;
+ (void)suite;
+ (void)case_;
+ (void)powerloss;
+
+ // collect permutation_defines
+ for (size_t d = 0;
+ d < lfs_max(suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (test_define_ispermutation(d)) {
+ list_defines_add(defines, d);
+ }
+ }
+}
+
+extern const test_geometry_t builtin_geometries[];
+
+static void list_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // add defines
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_list_defines,
+ &defines);
+ }
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+static void list_permutation_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // add permutation defines
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_list_permutation_defines,
+ &defines);
+ }
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+static void list_implicit_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // yes we do need to define a suite, this does a bit of bookeeping
+ // such as setting up the define cache
+ test_define_suite(&(const struct test_suite){0});
+
+ // make sure to include builtin geometries here
+ extern const test_geometry_t builtin_geometries[];
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ test_define_geometry(&builtin_geometries[g]);
+ test_define_flush();
+
+ // add implicit defines
+ for (size_t d = 0; d < TEST_IMPLICIT_DEFINE_COUNT; d++) {
+ list_defines_add(&defines, d);
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+
+
+// geometries to test
+
+const test_geometry_t builtin_geometries[] = {
+ {"default", {{0}, TEST_CONST(16), TEST_CONST(512), {0}}},
+ {"eeprom", {{0}, TEST_CONST(1), TEST_CONST(512), {0}}},
+ {"emmc", {{0}, {0}, TEST_CONST(512), {0}}},
+ {"nor", {{0}, TEST_CONST(1), TEST_CONST(4096), {0}}},
+ {"nand", {{0}, TEST_CONST(4096), TEST_CONST(32768), {0}}},
+ {NULL, {{0}, {0}, {0}, {0}}},
+};
+
+const test_geometry_t *test_geometries = builtin_geometries;
+size_t test_geometry_count = 5;
+
+static void list_geometries(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ size_t len = strlen(builtin_geometries[g].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ // yes we do need to define a suite, this does a bit of bookeeping
+ // such as setting up the define cache
+ test_define_suite(&(const struct test_suite){0});
+
+ printf("%-*s %7s %7s %7s %7s %11s\n",
+ name_width, "geometry", "read", "prog", "erase", "count", "size");
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ test_define_geometry(&builtin_geometries[g]);
+ test_define_flush();
+ printf("%-*s %7ju %7ju %7ju %7ju %11ju\n",
+ name_width,
+ builtin_geometries[g].name,
+ READ_SIZE,
+ PROG_SIZE,
+ BLOCK_SIZE,
+ BLOCK_COUNT,
+ BLOCK_SIZE*BLOCK_COUNT);
+ }
+}
+
+
+// scenarios to run tests under power-loss
+
+static void run_powerloss_none(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)cycles;
+ (void)cycle_count;
+ (void)suite;
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = test_disk_path,
+ .read_sleep = test_read_sleep,
+ .prog_sleep = test_prog_sleep,
+ .erase_sleep = test_erase_sleep,
+ };
+
+ int err = lfs_emubd_createcfg(&cfg, test_disk_path, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the test
+ printf("running ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ case_->run(&cfg);
+
+ printf("finished ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ // cleanup
+ err = lfs_emubd_destroy(&cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+}
+
+static void powerloss_longjmp(void *c) {
+ jmp_buf *powerloss_jmp = c;
+ longjmp(*powerloss_jmp, 1);
+}
+
+static void run_powerloss_linear(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)cycles;
+ (void)cycle_count;
+ (void)suite;
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+ jmp_buf powerloss_jmp;
+ volatile lfs_emubd_powercycles_t i = 1;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = test_disk_path,
+ .read_sleep = test_read_sleep,
+ .prog_sleep = test_prog_sleep,
+ .erase_sleep = test_erase_sleep,
+ .power_cycles = i,
+ .powerloss_behavior = POWERLOSS_BEHAVIOR,
+ .powerloss_cb = powerloss_longjmp,
+ .powerloss_data = &powerloss_jmp,
+ };
+
+ int err = lfs_emubd_createcfg(&cfg, test_disk_path, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the test, increasing power-cycles as power-loss events occur
+ printf("running ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ while (true) {
+ if (!setjmp(powerloss_jmp)) {
+ // run the test
+ case_->run(&cfg);
+ break;
+ }
+
+ // power-loss!
+ printf("powerloss ");
+ perm_printid(suite, case_, NULL, 0);
+ printf(":");
+ for (lfs_emubd_powercycles_t j = 1; j <= i; j++) {
+ leb16_print(j);
+ }
+ printf("\n");
+
+ i += 1;
+ lfs_emubd_setpowercycles(&cfg, i);
+ }
+
+ printf("finished ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ // cleanup
+ err = lfs_emubd_destroy(&cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+}
+
+static void run_powerloss_log(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)cycles;
+ (void)cycle_count;
+ (void)suite;
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+ jmp_buf powerloss_jmp;
+ volatile lfs_emubd_powercycles_t i = 1;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = test_disk_path,
+ .read_sleep = test_read_sleep,
+ .prog_sleep = test_prog_sleep,
+ .erase_sleep = test_erase_sleep,
+ .power_cycles = i,
+ .powerloss_behavior = POWERLOSS_BEHAVIOR,
+ .powerloss_cb = powerloss_longjmp,
+ .powerloss_data = &powerloss_jmp,
+ };
+
+ int err = lfs_emubd_createcfg(&cfg, test_disk_path, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the test, increasing power-cycles as power-loss events occur
+ printf("running ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ while (true) {
+ if (!setjmp(powerloss_jmp)) {
+ // run the test
+ case_->run(&cfg);
+ break;
+ }
+
+ // power-loss!
+ printf("powerloss ");
+ perm_printid(suite, case_, NULL, 0);
+ printf(":");
+ for (lfs_emubd_powercycles_t j = 1; j <= i; j *= 2) {
+ leb16_print(j);
+ }
+ printf("\n");
+
+ i *= 2;
+ lfs_emubd_setpowercycles(&cfg, i);
+ }
+
+ printf("finished ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ // cleanup
+ err = lfs_emubd_destroy(&cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+}
+
+static void run_powerloss_cycles(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)suite;
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+ jmp_buf powerloss_jmp;
+ volatile size_t i = 0;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = test_disk_path,
+ .read_sleep = test_read_sleep,
+ .prog_sleep = test_prog_sleep,
+ .erase_sleep = test_erase_sleep,
+ .power_cycles = (i < cycle_count) ? cycles[i] : 0,
+ .powerloss_behavior = POWERLOSS_BEHAVIOR,
+ .powerloss_cb = powerloss_longjmp,
+ .powerloss_data = &powerloss_jmp,
+ };
+
+ int err = lfs_emubd_createcfg(&cfg, test_disk_path, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the test, increasing power-cycles as power-loss events occur
+ printf("running ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ while (true) {
+ if (!setjmp(powerloss_jmp)) {
+ // run the test
+ case_->run(&cfg);
+ break;
+ }
+
+ // power-loss!
+ assert(i <= cycle_count);
+ printf("powerloss ");
+ perm_printid(suite, case_, cycles, i+1);
+ printf("\n");
+
+ i += 1;
+ lfs_emubd_setpowercycles(&cfg,
+ (i < cycle_count) ? cycles[i] : 0);
+ }
+
+ printf("finished ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ // cleanup
+ err = lfs_emubd_destroy(&cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+}
+
+struct powerloss_exhaustive_state {
+ struct lfs_config *cfg;
+
+ lfs_emubd_t *branches;
+ size_t branch_count;
+ size_t branch_capacity;
+};
+
+struct powerloss_exhaustive_cycles {
+ lfs_emubd_powercycles_t *cycles;
+ size_t cycle_count;
+ size_t cycle_capacity;
+};
+
+static void powerloss_exhaustive_branch(void *c) {
+ struct powerloss_exhaustive_state *state = c;
+ // append to branches
+ lfs_emubd_t *branch = mappend(
+ (void**)&state->branches,
+ sizeof(lfs_emubd_t),
+ &state->branch_count,
+ &state->branch_capacity);
+ if (!branch) {
+ fprintf(stderr, "error: exhaustive: out of memory\n");
+ exit(-1);
+ }
+
+ // create copy-on-write copy
+ int err = lfs_emubd_copy(state->cfg, branch);
+ if (err) {
+ fprintf(stderr, "error: exhaustive: could not create bd copy\n");
+ exit(-1);
+ }
+
+ // also trigger on next power cycle
+ lfs_emubd_setpowercycles(state->cfg, 1);
+}
+
+static void run_powerloss_exhaustive_layer(
+ struct powerloss_exhaustive_cycles *cycles,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ struct lfs_config *cfg,
+ struct lfs_emubd_config *bdcfg,
+ size_t depth) {
+ (void)suite;
+
+ struct powerloss_exhaustive_state state = {
+ .cfg = cfg,
+ .branches = NULL,
+ .branch_count = 0,
+ .branch_capacity = 0,
+ };
+
+ // run through the test without additional powerlosses, collecting possible
+ // branches as we do so
+ lfs_emubd_setpowercycles(state.cfg, depth > 0 ? 1 : 0);
+ bdcfg->powerloss_data = &state;
+
+ // run the tests
+ case_->run(cfg);
+
+ // aggressively clean up memory here to try to keep our memory usage low
+ int err = lfs_emubd_destroy(cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+
+ // recurse into each branch
+ for (size_t i = 0; i < state.branch_count; i++) {
+ // first push and print the branch
+ lfs_emubd_powercycles_t *cycle = mappend(
+ (void**)&cycles->cycles,
+ sizeof(lfs_emubd_powercycles_t),
+ &cycles->cycle_count,
+ &cycles->cycle_capacity);
+ if (!cycle) {
+ fprintf(stderr, "error: exhaustive: out of memory\n");
+ exit(-1);
+ }
+ *cycle = i+1;
+
+ printf("powerloss ");
+ perm_printid(suite, case_, cycles->cycles, cycles->cycle_count);
+ printf("\n");
+
+ // now recurse
+ cfg->context = &state.branches[i];
+ run_powerloss_exhaustive_layer(cycles,
+ suite, case_,
+ cfg, bdcfg, depth-1);
+
+ // pop the cycle
+ cycles->cycle_count -= 1;
+ }
+
+ // clean up memory
+ free(state.branches);
+}
+
+static void run_powerloss_exhaustive(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)cycles;
+ (void)suite;
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = test_disk_path,
+ .read_sleep = test_read_sleep,
+ .prog_sleep = test_prog_sleep,
+ .erase_sleep = test_erase_sleep,
+ .powerloss_behavior = POWERLOSS_BEHAVIOR,
+ .powerloss_cb = powerloss_exhaustive_branch,
+ .powerloss_data = NULL,
+ };
+
+ int err = lfs_emubd_createcfg(&cfg, test_disk_path, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the test, increasing power-cycles as power-loss events occur
+ printf("running ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ // recursively exhaust each layer of powerlosses
+ run_powerloss_exhaustive_layer(
+ &(struct powerloss_exhaustive_cycles){NULL, 0, 0},
+ suite, case_,
+ &cfg, &bdcfg, cycle_count);
+
+ printf("finished ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+}
+
+
+const test_powerloss_t builtin_powerlosses[] = {
+ {"none", run_powerloss_none, NULL, 0},
+ {"log", run_powerloss_log, NULL, 0},
+ {"linear", run_powerloss_linear, NULL, 0},
+ {"exhaustive", run_powerloss_exhaustive, NULL, SIZE_MAX},
+ {NULL, NULL, NULL, 0},
+};
+
+const char *const builtin_powerlosses_help[] = {
+ "Run with no power-losses.",
+ "Run with exponentially-decreasing power-losses.",
+ "Run with linearly-decreasing power-losses.",
+ "Run a all permutations of power-losses, this may take a while.",
+ "Run a all permutations of n power-losses.",
+ "Run a custom comma-separated set of power-losses.",
+ "Run a custom leb16-encoded set of power-losses.",
+};
+
+// default to -Pnone,linear, which provides a good heuristic while still
+// running quickly
+const test_powerloss_t *test_powerlosses = (const test_powerloss_t[]){
+ {"none", run_powerloss_none, NULL, 0},
+ {"linear", run_powerloss_linear, NULL, 0},
+};
+size_t test_powerloss_count = 2;
+
+static void list_powerlosses(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; builtin_powerlosses[i].name; i++) {
+ size_t len = strlen(builtin_powerlosses[i].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %s\n", name_width, "scenario", "description");
+ size_t i = 0;
+ for (; builtin_powerlosses[i].name; i++) {
+ printf("%-*s %s\n",
+ name_width,
+ builtin_powerlosses[i].name,
+ builtin_powerlosses_help[i]);
+ }
+
+ // a couple more options with special parsing
+ printf("%-*s %s\n", name_width, "1,2,3", builtin_powerlosses_help[i+0]);
+ printf("%-*s %s\n", name_width, "{1,2,3}", builtin_powerlosses_help[i+1]);
+ printf("%-*s %s\n", name_width, ":1248g1", builtin_powerlosses_help[i+2]);
+}
+
+
+// global test step count
+size_t test_step = 0;
+
+void perm_run(
+ void *data,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_powerloss_t *powerloss) {
+ (void)data;
+
+ // skip this step?
+ if (!(test_step >= test_step_start
+ && test_step < test_step_stop
+ && (test_step-test_step_start) % test_step_step == 0)) {
+ test_step += 1;
+ return;
+ }
+ test_step += 1;
+
+ // filter?
+ if (case_->filter && !case_->filter()) {
+ printf("skipped ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+ return;
+ }
+
+ powerloss->run(
+ powerloss->cycles, powerloss->cycle_count,
+ suite, case_);
+}
+
+static void run(void) {
+ // ignore disconnected pipes
+ signal(SIGPIPE, SIG_IGN);
+
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_run,
+ NULL);
+ }
+ }
+ }
+}
+
+
+
+// option handling
+enum opt_flags {
+ OPT_HELP = 'h',
+ OPT_SUMMARY = 'Y',
+ OPT_LIST_SUITES = 'l',
+ OPT_LIST_CASES = 'L',
+ OPT_LIST_SUITE_PATHS = 1,
+ OPT_LIST_CASE_PATHS = 2,
+ OPT_LIST_DEFINES = 3,
+ OPT_LIST_PERMUTATION_DEFINES = 4,
+ OPT_LIST_IMPLICIT_DEFINES = 5,
+ OPT_LIST_GEOMETRIES = 6,
+ OPT_LIST_POWERLOSSES = 7,
+ OPT_DEFINE = 'D',
+ OPT_GEOMETRY = 'G',
+ OPT_POWERLOSS = 'P',
+ OPT_STEP = 's',
+ OPT_DISK = 'd',
+ OPT_TRACE = 't',
+ OPT_TRACE_BACKTRACE = 8,
+ OPT_TRACE_PERIOD = 9,
+ OPT_TRACE_FREQ = 10,
+ OPT_READ_SLEEP = 11,
+ OPT_PROG_SLEEP = 12,
+ OPT_ERASE_SLEEP = 13,
+};
+
+const char *short_opts = "hYlLD:G:P:s:d:t:";
+
+const struct option long_opts[] = {
+ {"help", no_argument, NULL, OPT_HELP},
+ {"summary", no_argument, NULL, OPT_SUMMARY},
+ {"list-suites", no_argument, NULL, OPT_LIST_SUITES},
+ {"list-cases", no_argument, NULL, OPT_LIST_CASES},
+ {"list-suite-paths", no_argument, NULL, OPT_LIST_SUITE_PATHS},
+ {"list-case-paths", no_argument, NULL, OPT_LIST_CASE_PATHS},
+ {"list-defines", no_argument, NULL, OPT_LIST_DEFINES},
+ {"list-permutation-defines",
+ no_argument, NULL, OPT_LIST_PERMUTATION_DEFINES},
+ {"list-implicit-defines",
+ no_argument, NULL, OPT_LIST_IMPLICIT_DEFINES},
+ {"list-geometries", no_argument, NULL, OPT_LIST_GEOMETRIES},
+ {"list-powerlosses", no_argument, NULL, OPT_LIST_POWERLOSSES},
+ {"define", required_argument, NULL, OPT_DEFINE},
+ {"geometry", required_argument, NULL, OPT_GEOMETRY},
+ {"powerloss", required_argument, NULL, OPT_POWERLOSS},
+ {"step", required_argument, NULL, OPT_STEP},
+ {"disk", required_argument, NULL, OPT_DISK},
+ {"trace", required_argument, NULL, OPT_TRACE},
+ {"trace-backtrace", no_argument, NULL, OPT_TRACE_BACKTRACE},
+ {"trace-period", required_argument, NULL, OPT_TRACE_PERIOD},
+ {"trace-freq", required_argument, NULL, OPT_TRACE_FREQ},
+ {"read-sleep", required_argument, NULL, OPT_READ_SLEEP},
+ {"prog-sleep", required_argument, NULL, OPT_PROG_SLEEP},
+ {"erase-sleep", required_argument, NULL, OPT_ERASE_SLEEP},
+ {NULL, 0, NULL, 0},
+};
+
+const char *const help_text[] = {
+ "Show this help message.",
+ "Show quick summary.",
+ "List test suites.",
+ "List test cases.",
+ "List the path for each test suite.",
+ "List the path and line number for each test case.",
+ "List all defines in this test-runner.",
+ "List explicit defines in this test-runner.",
+ "List implicit defines in this test-runner.",
+ "List the available disk geometries.",
+ "List the available power-loss scenarios.",
+ "Override a test define.",
+ "Comma-separated list of disk geometries to test.",
+ "Comma-separated list of power-loss scenarios to test.",
+ "Comma-separated range of test permutations to run (start,stop,step).",
+ "Direct block device operations to this file.",
+ "Direct trace output to this file.",
+ "Include a backtrace with every trace statement.",
+ "Sample trace output at this period in cycles.",
+ "Sample trace output at this frequency in hz.",
+ "Artificial read delay in seconds.",
+ "Artificial prog delay in seconds.",
+ "Artificial erase delay in seconds.",
+};
+
+int main(int argc, char **argv) {
+ void (*op)(void) = run;
+
+ size_t test_override_capacity = 0;
+ size_t test_geometry_capacity = 0;
+ size_t test_powerloss_capacity = 0;
+ size_t test_id_capacity = 0;
+
+ // parse options
+ while (true) {
+ int c = getopt_long(argc, argv, short_opts, long_opts, NULL);
+ switch (c) {
+ // generate help message
+ case OPT_HELP: {
+ printf("usage: %s [options] [test_id]\n", argv[0]);
+ printf("\n");
+
+ printf("options:\n");
+ size_t i = 0;
+ while (long_opts[i].name) {
+ size_t indent;
+ if (long_opts[i].has_arg == no_argument) {
+ if (long_opts[i].val >= '0' && long_opts[i].val < 'z') {
+ indent = printf(" -%c, --%s ",
+ long_opts[i].val,
+ long_opts[i].name);
+ } else {
+ indent = printf(" --%s ",
+ long_opts[i].name);
+ }
+ } else {
+ if (long_opts[i].val >= '0' && long_opts[i].val < 'z') {
+ indent = printf(" -%c %s, --%s %s ",
+ long_opts[i].val,
+ long_opts[i].name,
+ long_opts[i].name,
+ long_opts[i].name);
+ } else {
+ indent = printf(" --%s %s ",
+ long_opts[i].name,
+ long_opts[i].name);
+ }
+ }
+
+ // a quick, hacky, byte-level method for text wrapping
+ size_t len = strlen(help_text[i]);
+ size_t j = 0;
+ if (indent < 24) {
+ printf("%*s %.80s\n",
+ (int)(24-1-indent),
+ "",
+ &help_text[i][j]);
+ j += 80;
+ } else {
+ printf("\n");
+ }
+
+ while (j < len) {
+ printf("%24s%.80s\n", "", &help_text[i][j]);
+ j += 80;
+ }
+
+ i += 1;
+ }
+
+ printf("\n");
+ exit(0);
+ }
+ // summary/list flags
+ case OPT_SUMMARY:
+ op = summary;
+ break;
+ case OPT_LIST_SUITES:
+ op = list_suites;
+ break;
+ case OPT_LIST_CASES:
+ op = list_cases;
+ break;
+ case OPT_LIST_SUITE_PATHS:
+ op = list_suite_paths;
+ break;
+ case OPT_LIST_CASE_PATHS:
+ op = list_case_paths;
+ break;
+ case OPT_LIST_DEFINES:
+ op = list_defines;
+ break;
+ case OPT_LIST_PERMUTATION_DEFINES:
+ op = list_permutation_defines;
+ break;
+ case OPT_LIST_IMPLICIT_DEFINES:
+ op = list_implicit_defines;
+ break;
+ case OPT_LIST_GEOMETRIES:
+ op = list_geometries;
+ break;
+ case OPT_LIST_POWERLOSSES:
+ op = list_powerlosses;
+ break;
+ // configuration
+ case OPT_DEFINE: {
+ // allocate space
+ test_override_t *override = mappend(
+ (void**)&test_overrides,
+ sizeof(test_override_t),
+ &test_override_count,
+ &test_override_capacity);
+
+ // parse into string key/intmax_t value, cannibalizing the
+ // arg in the process
+ char *sep = strchr(optarg, '=');
+ char *parsed = NULL;
+ if (!sep) {
+ goto invalid_define;
+ }
+ *sep = '\0';
+ override->name = optarg;
+ optarg = sep+1;
+
+ // parse comma-separated permutations
+ {
+ override->defines = NULL;
+ override->permutations = 0;
+ size_t override_capacity = 0;
+ while (true) {
+ optarg += strspn(optarg, " ");
+
+ if (strncmp(optarg, "range", strlen("range")) == 0) {
+ // range of values
+ optarg += strlen("range");
+ optarg += strspn(optarg, " ");
+ if (*optarg != '(') {
+ goto invalid_define;
+ }
+ optarg += 1;
+
+ intmax_t start = strtoumax(optarg, &parsed, 0);
+ intmax_t stop = -1;
+ intmax_t step = 1;
+ // allow empty string for start=0
+ if (parsed == optarg) {
+ start = 0;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != ')') {
+ goto invalid_define;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ stop = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=end
+ if (parsed == optarg) {
+ stop = -1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != ')') {
+ goto invalid_define;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ step = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=1
+ if (parsed == optarg) {
+ step = 1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ')') {
+ goto invalid_define;
+ }
+ }
+ } else {
+ // single value = stop only
+ stop = start;
+ start = 0;
+ }
+
+ if (*optarg != ')') {
+ goto invalid_define;
+ }
+ optarg += 1;
+
+ // calculate the range of values
+ assert(step != 0);
+ for (intmax_t i = start;
+ (step < 0)
+ ? i > stop
+ : (uintmax_t)i < (uintmax_t)stop;
+ i += step) {
+ *(intmax_t*)mappend(
+ (void**)&override->defines,
+ sizeof(intmax_t),
+ &override->permutations,
+ &override_capacity) = i;
+ }
+ } else if (*optarg != '\0') {
+ // single value
+ intmax_t define = strtoimax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ goto invalid_define;
+ }
+ optarg = parsed + strspn(parsed, " ");
+ *(intmax_t*)mappend(
+ (void**)&override->defines,
+ sizeof(intmax_t),
+ &override->permutations,
+ &override_capacity) = define;
+ } else {
+ break;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ }
+ }
+ }
+ assert(override->permutations > 0);
+ break;
+
+invalid_define:
+ fprintf(stderr, "error: invalid define: %s\n", optarg);
+ exit(-1);
+ }
+ case OPT_GEOMETRY: {
+ // reset our geometry scenarios
+ if (test_geometry_capacity > 0) {
+ free((test_geometry_t*)test_geometries);
+ }
+ test_geometries = NULL;
+ test_geometry_count = 0;
+ test_geometry_capacity = 0;
+
+ // parse the comma separated list of disk geometries
+ while (*optarg) {
+ // allocate space
+ test_geometry_t *geometry = mappend(
+ (void**)&test_geometries,
+ sizeof(test_geometry_t),
+ &test_geometry_count,
+ &test_geometry_capacity);
+
+ // parse the disk geometry
+ optarg += strspn(optarg, " ");
+
+ // named disk geometry
+ size_t len = strcspn(optarg, " ,");
+ for (size_t i = 0; builtin_geometries[i].name; i++) {
+ if (len == strlen(builtin_geometries[i].name)
+ && memcmp(optarg,
+ builtin_geometries[i].name,
+ len) == 0) {
+ *geometry = builtin_geometries[i];
+ optarg += len;
+ goto geometry_next;
+ }
+ }
+
+ // comma-separated read/prog/erase/count
+ if (*optarg == '{') {
+ lfs_size_t sizes[4];
+ size_t count = 0;
+
+ char *s = optarg + 1;
+ while (count < 4) {
+ char *parsed = NULL;
+ sizes[count] = strtoumax(s, &parsed, 0);
+ count += 1;
+
+ s = parsed + strspn(parsed, " ");
+ if (*s == ',') {
+ s += 1;
+ continue;
+ } else if (*s == '}') {
+ s += 1;
+ break;
+ } else {
+ goto geometry_unknown;
+ }
+ }
+
+ // allow implicit r=p and p=e for common geometries
+ memset(geometry, 0, sizeof(test_geometry_t));
+ if (count >= 3) {
+ geometry->defines[READ_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ geometry->defines[PROG_SIZE_i]
+ = TEST_LIT(sizes[1]);
+ geometry->defines[BLOCK_SIZE_i]
+ = TEST_LIT(sizes[2]);
+ } else if (count >= 2) {
+ geometry->defines[PROG_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ geometry->defines[BLOCK_SIZE_i]
+ = TEST_LIT(sizes[1]);
+ } else {
+ geometry->defines[BLOCK_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ }
+ if (count >= 4) {
+ geometry->defines[BLOCK_COUNT_i]
+ = TEST_LIT(sizes[3]);
+ }
+ optarg = s;
+ goto geometry_next;
+ }
+
+ // leb16-encoded read/prog/erase/count
+ if (*optarg == ':') {
+ lfs_size_t sizes[4];
+ size_t count = 0;
+
+ char *s = optarg + 1;
+ while (true) {
+ char *parsed = NULL;
+ uintmax_t x = leb16_parse(s, &parsed);
+ if (parsed == s || count >= 4) {
+ break;
+ }
+
+ sizes[count] = x;
+ count += 1;
+ s = parsed;
+ }
+
+ // allow implicit r=p and p=e for common geometries
+ memset(geometry, 0, sizeof(test_geometry_t));
+ if (count >= 3) {
+ geometry->defines[READ_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ geometry->defines[PROG_SIZE_i]
+ = TEST_LIT(sizes[1]);
+ geometry->defines[BLOCK_SIZE_i]
+ = TEST_LIT(sizes[2]);
+ } else if (count >= 2) {
+ geometry->defines[PROG_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ geometry->defines[BLOCK_SIZE_i]
+ = TEST_LIT(sizes[1]);
+ } else {
+ geometry->defines[BLOCK_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ }
+ if (count >= 4) {
+ geometry->defines[BLOCK_COUNT_i]
+ = TEST_LIT(sizes[3]);
+ }
+ optarg = s;
+ goto geometry_next;
+ }
+
+geometry_unknown:
+ // unknown scenario?
+ fprintf(stderr, "error: unknown disk geometry: %s\n",
+ optarg);
+ exit(-1);
+
+geometry_next:
+ optarg += strspn(optarg, " ");
+ if (*optarg == ',') {
+ optarg += 1;
+ } else if (*optarg == '\0') {
+ break;
+ } else {
+ goto geometry_unknown;
+ }
+ }
+ break;
+ }
+ case OPT_POWERLOSS: {
+ // reset our powerloss scenarios
+ if (test_powerloss_capacity > 0) {
+ free((test_powerloss_t*)test_powerlosses);
+ }
+ test_powerlosses = NULL;
+ test_powerloss_count = 0;
+ test_powerloss_capacity = 0;
+
+ // parse the comma separated list of power-loss scenarios
+ while (*optarg) {
+ // allocate space
+ test_powerloss_t *powerloss = mappend(
+ (void**)&test_powerlosses,
+ sizeof(test_powerloss_t),
+ &test_powerloss_count,
+ &test_powerloss_capacity);
+
+ // parse the power-loss scenario
+ optarg += strspn(optarg, " ");
+
+ // named power-loss scenario
+ size_t len = strcspn(optarg, " ,");
+ for (size_t i = 0; builtin_powerlosses[i].name; i++) {
+ if (len == strlen(builtin_powerlosses[i].name)
+ && memcmp(optarg,
+ builtin_powerlosses[i].name,
+ len) == 0) {
+ *powerloss = builtin_powerlosses[i];
+ optarg += len;
+ goto powerloss_next;
+ }
+ }
+
+ // comma-separated permutation
+ if (*optarg == '{') {
+ lfs_emubd_powercycles_t *cycles = NULL;
+ size_t cycle_count = 0;
+ size_t cycle_capacity = 0;
+
+ char *s = optarg + 1;
+ while (true) {
+ char *parsed = NULL;
+ *(lfs_emubd_powercycles_t*)mappend(
+ (void**)&cycles,
+ sizeof(lfs_emubd_powercycles_t),
+ &cycle_count,
+ &cycle_capacity)
+ = strtoumax(s, &parsed, 0);
+
+ s = parsed + strspn(parsed, " ");
+ if (*s == ',') {
+ s += 1;
+ continue;
+ } else if (*s == '}') {
+ s += 1;
+ break;
+ } else {
+ goto powerloss_unknown;
+ }
+ }
+
+ *powerloss = (test_powerloss_t){
+ .run = run_powerloss_cycles,
+ .cycles = cycles,
+ .cycle_count = cycle_count,
+ };
+ optarg = s;
+ goto powerloss_next;
+ }
+
+ // leb16-encoded permutation
+ if (*optarg == ':') {
+ lfs_emubd_powercycles_t *cycles = NULL;
+ size_t cycle_count = 0;
+ size_t cycle_capacity = 0;
+
+ char *s = optarg + 1;
+ while (true) {
+ char *parsed = NULL;
+ uintmax_t x = leb16_parse(s, &parsed);
+ if (parsed == s) {
+ break;
+ }
+
+ *(lfs_emubd_powercycles_t*)mappend(
+ (void**)&cycles,
+ sizeof(lfs_emubd_powercycles_t),
+ &cycle_count,
+ &cycle_capacity) = x;
+ s = parsed;
+ }
+
+ *powerloss = (test_powerloss_t){
+ .run = run_powerloss_cycles,
+ .cycles = cycles,
+ .cycle_count = cycle_count,
+ };
+ optarg = s;
+ goto powerloss_next;
+ }
+
+ // exhaustive permutations
+ {
+ char *parsed = NULL;
+ size_t count = strtoumax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ goto powerloss_unknown;
+ }
+ *powerloss = (test_powerloss_t){
+ .run = run_powerloss_exhaustive,
+ .cycles = NULL,
+ .cycle_count = count,
+ };
+ optarg = (char*)parsed;
+ goto powerloss_next;
+ }
+
+powerloss_unknown:
+ // unknown scenario?
+ fprintf(stderr, "error: unknown power-loss scenario: %s\n",
+ optarg);
+ exit(-1);
+
+powerloss_next:
+ optarg += strspn(optarg, " ");
+ if (*optarg == ',') {
+ optarg += 1;
+ } else if (*optarg == '\0') {
+ break;
+ } else {
+ goto powerloss_unknown;
+ }
+ }
+ break;
+ }
+ case OPT_STEP: {
+ char *parsed = NULL;
+ test_step_start = strtoumax(optarg, &parsed, 0);
+ test_step_stop = -1;
+ test_step_step = 1;
+ // allow empty string for start=0
+ if (parsed == optarg) {
+ test_step_start = 0;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != '\0') {
+ goto step_unknown;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ test_step_stop = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=end
+ if (parsed == optarg) {
+ test_step_stop = -1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != '\0') {
+ goto step_unknown;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ test_step_step = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=1
+ if (parsed == optarg) {
+ test_step_step = 1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != '\0') {
+ goto step_unknown;
+ }
+ }
+ } else {
+ // single value = stop only
+ test_step_stop = test_step_start;
+ test_step_start = 0;
+ }
+
+ break;
+step_unknown:
+ fprintf(stderr, "error: invalid step: %s\n", optarg);
+ exit(-1);
+ }
+ case OPT_DISK:
+ test_disk_path = optarg;
+ break;
+ case OPT_TRACE:
+ test_trace_path = optarg;
+ break;
+ case OPT_TRACE_BACKTRACE:
+ test_trace_backtrace = true;
+ break;
+ case OPT_TRACE_PERIOD: {
+ char *parsed = NULL;
+ test_trace_period = strtoumax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid trace-period: %s\n", optarg);
+ exit(-1);
+ }
+ break;
+ }
+ case OPT_TRACE_FREQ: {
+ char *parsed = NULL;
+ test_trace_freq = strtoumax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid trace-freq: %s\n", optarg);
+ exit(-1);
+ }
+ break;
+ }
+ case OPT_READ_SLEEP: {
+ char *parsed = NULL;
+ double read_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid read-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ test_read_sleep = read_sleep*1.0e9;
+ break;
+ }
+ case OPT_PROG_SLEEP: {
+ char *parsed = NULL;
+ double prog_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid prog-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ test_prog_sleep = prog_sleep*1.0e9;
+ break;
+ }
+ case OPT_ERASE_SLEEP: {
+ char *parsed = NULL;
+ double erase_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid erase-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ test_erase_sleep = erase_sleep*1.0e9;
+ break;
+ }
+ // done parsing
+ case -1:
+ goto getopt_done;
+ // unknown arg, getopt prints a message for us
+ default:
+ exit(-1);
+ }
+ }
+getopt_done: ;
+
+ if (argc > optind) {
+ // reset our test identifier list
+ test_ids = NULL;
+ test_id_count = 0;
+ test_id_capacity = 0;
+ }
+
+ // parse test identifier, if any, cannibalizing the arg in the process
+ for (; argc > optind; optind++) {
+ test_define_t *defines = NULL;
+ size_t define_count = 0;
+ lfs_emubd_powercycles_t *cycles = NULL;
+ size_t cycle_count = 0;
+
+ // parse name, can be suite or case
+ char *name = argv[optind];
+ char *defines_ = strchr(name, ':');
+ if (defines_) {
+ *defines_ = '\0';
+ defines_ += 1;
+ }
+
+ // remove optional path and .toml suffix
+ char *slash = strrchr(name, '/');
+ if (slash) {
+ name = slash+1;
+ }
+
+ size_t name_len = strlen(name);
+ if (name_len > 5 && strcmp(&name[name_len-5], ".toml") == 0) {
+ name[name_len-5] = '\0';
+ }
+
+ if (defines_) {
+ // parse defines
+ char *cycles_ = strchr(defines_, ':');
+ if (cycles_) {
+ *cycles_ = '\0';
+ cycles_ += 1;
+ }
+
+ while (true) {
+ char *parsed;
+ size_t d = leb16_parse(defines_, &parsed);
+ intmax_t v = leb16_parse(parsed, &parsed);
+ if (parsed == defines_) {
+ break;
+ }
+ defines_ = parsed;
+
+ if (d >= define_count) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncount = 1 << lfs_npw2(d+1);
+ defines = realloc(defines,
+ ncount*sizeof(test_define_t));
+ memset(defines+define_count, 0,
+ (ncount-define_count)*sizeof(test_define_t));
+ define_count = ncount;
+ }
+ defines[d] = TEST_LIT(v);
+ }
+
+ if (cycles_) {
+ // parse power cycles
+ size_t cycle_capacity = 0;
+ while (*cycles_ != '\0') {
+ char *parsed = NULL;
+ *(lfs_emubd_powercycles_t*)mappend(
+ (void**)&cycles,
+ sizeof(lfs_emubd_powercycles_t),
+ &cycle_count,
+ &cycle_capacity)
+ = leb16_parse(cycles_, &parsed);
+ if (parsed == cycles_) {
+ fprintf(stderr, "error: "
+ "could not parse test cycles: %s\n",
+ cycles_);
+ exit(-1);
+ }
+ cycles_ = parsed;
+ }
+ }
+ }
+
+ // append to identifier list
+ *(test_id_t*)mappend(
+ (void**)&test_ids,
+ sizeof(test_id_t),
+ &test_id_count,
+ &test_id_capacity) = (test_id_t){
+ .name = name,
+ .defines = defines,
+ .define_count = define_count,
+ .cycles = cycles,
+ .cycle_count = cycle_count,
+ };
+ }
+
+ // do the thing
+ op();
+
+ // cleanup (need to be done for valgrind testing)
+ test_define_cleanup();
+ if (test_overrides) {
+ for (size_t i = 0; i < test_override_count; i++) {
+ free((void*)test_overrides[i].defines);
+ }
+ free((void*)test_overrides);
+ }
+ if (test_geometry_capacity) {
+ free((void*)test_geometries);
+ }
+ if (test_powerloss_capacity) {
+ for (size_t i = 0; i < test_powerloss_count; i++) {
+ free((void*)test_powerlosses[i].cycles);
+ }
+ free((void*)test_powerlosses);
+ }
+ if (test_id_capacity) {
+ for (size_t i = 0; i < test_id_count; i++) {
+ free((void*)test_ids[i].defines);
+ free((void*)test_ids[i].cycles);
+ }
+ free((void*)test_ids);
+ }
+}
diff --git a/runners/test_runner.h b/runners/test_runner.h
new file mode 100644
index 00000000..9ff1f790
--- /dev/null
+++ b/runners/test_runner.h
@@ -0,0 +1,124 @@
+/*
+ * Runner for littlefs tests
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef TEST_RUNNER_H
+#define TEST_RUNNER_H
+
+
+// override LFS_TRACE
+void test_trace(const char *fmt, ...);
+
+#define LFS_TRACE_(fmt, ...) \
+ test_trace("%s:%d:trace: " fmt "%s\n", \
+ __FILE__, \
+ __LINE__, \
+ __VA_ARGS__)
+#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+#define LFS_EMUBD_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+
+
+// note these are indirectly included in any generated files
+#include "bd/lfs_emubd.h"
+#include
+
+// give source a chance to define feature macros
+#undef _FEATURES_H
+#undef _STDIO_H
+
+
+// generated test configurations
+struct lfs_config;
+
+enum test_flags {
+ TEST_REENTRANT = 0x1,
+};
+typedef uint8_t test_flags_t;
+
+typedef struct test_define {
+ intmax_t (*cb)(void *data);
+ void *data;
+} test_define_t;
+
+struct test_case {
+ const char *name;
+ const char *path;
+ test_flags_t flags;
+ size_t permutations;
+
+ const test_define_t *defines;
+
+ bool (*filter)(void);
+ void (*run)(struct lfs_config *cfg);
+};
+
+struct test_suite {
+ const char *name;
+ const char *path;
+ test_flags_t flags;
+
+ const char *const *define_names;
+ size_t define_count;
+
+ const struct test_case *cases;
+ size_t case_count;
+};
+
+
+// deterministic prng for pseudo-randomness in testes
+uint32_t test_prng(uint32_t *state);
+
+#define TEST_PRNG(state) test_prng(state)
+
+
+// access generated test defines
+intmax_t test_define(size_t define);
+
+#define TEST_DEFINE(i) test_define(i)
+
+// a few preconfigured defines that control how tests run
+
+#define READ_SIZE_i 0
+#define PROG_SIZE_i 1
+#define BLOCK_SIZE_i 2
+#define BLOCK_COUNT_i 3
+#define CACHE_SIZE_i 4
+#define LOOKAHEAD_SIZE_i 5
+#define BLOCK_CYCLES_i 6
+#define ERASE_VALUE_i 7
+#define ERASE_CYCLES_i 8
+#define BADBLOCK_BEHAVIOR_i 9
+#define POWERLOSS_BEHAVIOR_i 10
+
+#define READ_SIZE TEST_DEFINE(READ_SIZE_i)
+#define PROG_SIZE TEST_DEFINE(PROG_SIZE_i)
+#define BLOCK_SIZE TEST_DEFINE(BLOCK_SIZE_i)
+#define BLOCK_COUNT TEST_DEFINE(BLOCK_COUNT_i)
+#define CACHE_SIZE TEST_DEFINE(CACHE_SIZE_i)
+#define LOOKAHEAD_SIZE TEST_DEFINE(LOOKAHEAD_SIZE_i)
+#define BLOCK_CYCLES TEST_DEFINE(BLOCK_CYCLES_i)
+#define ERASE_VALUE TEST_DEFINE(ERASE_VALUE_i)
+#define ERASE_CYCLES TEST_DEFINE(ERASE_CYCLES_i)
+#define BADBLOCK_BEHAVIOR TEST_DEFINE(BADBLOCK_BEHAVIOR_i)
+#define POWERLOSS_BEHAVIOR TEST_DEFINE(POWERLOSS_BEHAVIOR_i)
+
+#define TEST_IMPLICIT_DEFINES \
+ TEST_DEF(READ_SIZE, PROG_SIZE) \
+ TEST_DEF(PROG_SIZE, BLOCK_SIZE) \
+ TEST_DEF(BLOCK_SIZE, 0) \
+ TEST_DEF(BLOCK_COUNT, (1024*1024)/BLOCK_SIZE) \
+ TEST_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
+ TEST_DEF(LOOKAHEAD_SIZE, 16) \
+ TEST_DEF(BLOCK_CYCLES, -1) \
+ TEST_DEF(ERASE_VALUE, 0xff) \
+ TEST_DEF(ERASE_CYCLES, 0) \
+ TEST_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \
+ TEST_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP)
+
+#define TEST_IMPLICIT_DEFINE_COUNT 11
+#define TEST_GEOMETRY_DEFINE_COUNT 4
+
+
+#endif
diff --git a/scripts/bench.py b/scripts/bench.py
new file mode 100755
index 00000000..f22841ea
--- /dev/null
+++ b/scripts/bench.py
@@ -0,0 +1,1430 @@
+#!/usr/bin/env python3
+#
+# Script to compile and runs benches.
+#
+# Example:
+# ./scripts/bench.py runners/bench_runner -b
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import collections as co
+import csv
+import errno
+import glob
+import itertools as it
+import math as m
+import os
+import pty
+import re
+import shlex
+import shutil
+import signal
+import subprocess as sp
+import threading as th
+import time
+import toml
+
+
+RUNNER_PATH = './runners/bench_runner'
+HEADER_PATH = 'runners/bench_runner.h'
+
+GDB_PATH = ['gdb']
+VALGRIND_PATH = ['valgrind']
+PERF_SCRIPT = ['./scripts/perf.py']
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+class BenchCase:
+ # create a BenchCase object from a config
+ def __init__(self, config, args={}):
+ self.name = config.pop('name')
+ self.path = config.pop('path')
+ self.suite = config.pop('suite')
+ self.lineno = config.pop('lineno', None)
+ self.if_ = config.pop('if', None)
+ if isinstance(self.if_, bool):
+ self.if_ = 'true' if self.if_ else 'false'
+ self.code = config.pop('code')
+ self.code_lineno = config.pop('code_lineno', None)
+ self.in_ = config.pop('in',
+ config.pop('suite_in', None))
+
+ # figure out defines and build possible permutations
+ self.defines = set()
+ self.permutations = []
+
+ # defines can be a dict or a list or dicts
+ suite_defines = config.pop('suite_defines', {})
+ if not isinstance(suite_defines, list):
+ suite_defines = [suite_defines]
+ defines = config.pop('defines', {})
+ if not isinstance(defines, list):
+ defines = [defines]
+
+ def csplit(v):
+ # split commas but only outside of parens
+ parens = 0
+ i_ = 0
+ for i in range(len(v)):
+ if v[i] == ',' and parens == 0:
+ yield v[i_:i]
+ i_ = i+1
+ elif v[i] in '([{':
+ parens += 1
+ elif v[i] in '}])':
+ parens -= 1
+ if v[i_:].strip():
+ yield v[i_:]
+
+ def parse_define(v):
+ # a define entry can be a list
+ if isinstance(v, list):
+ for v_ in v:
+ yield from parse_define(v_)
+ # or a string
+ elif isinstance(v, str):
+ # which can be comma-separated values, with optional
+ # range statements. This matches the runtime define parser in
+ # the runner itself.
+ for v_ in csplit(v):
+ m = re.search(r'\brange\b\s*\('
+ '(?P[^,\s]*)'
+ '\s*(?:,\s*(?P[^,\s]*)'
+ '\s*(?:,\s*(?P[^,\s]*)\s*)?)?\)',
+ v_)
+ if m:
+ start = (int(m.group('start'), 0)
+ if m.group('start') else 0)
+ stop = (int(m.group('stop'), 0)
+ if m.group('stop') else None)
+ step = (int(m.group('step'), 0)
+ if m.group('step') else 1)
+ if m.lastindex <= 1:
+ start, stop = 0, start
+ for x in range(start, stop, step):
+ yield from parse_define('%s(%d)%s' % (
+ v_[:m.start()], x, v_[m.end():]))
+ else:
+ yield v_
+ # or a literal value
+ elif isinstance(v, bool):
+ yield 'true' if v else 'false'
+ else:
+ yield v
+
+ # build possible permutations
+ for suite_defines_ in suite_defines:
+ self.defines |= suite_defines_.keys()
+ for defines_ in defines:
+ self.defines |= defines_.keys()
+ self.permutations.extend(dict(perm) for perm in it.product(*(
+ [(k, v) for v in parse_define(vs)]
+ for k, vs in sorted((suite_defines_ | defines_).items()))))
+
+ for k in config.keys():
+ print('%swarning:%s in %s, found unused key %r' % (
+ '\x1b[01;33m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ self.name,
+ k),
+ file=sys.stderr)
+
+
+class BenchSuite:
+ # create a BenchSuite object from a toml file
+ def __init__(self, path, args={}):
+ self.path = path
+ self.name = os.path.basename(path)
+ if self.name.endswith('.toml'):
+ self.name = self.name[:-len('.toml')]
+
+ # load toml file and parse bench cases
+ with open(self.path) as f:
+ # load benches
+ config = toml.load(f)
+
+ # find line numbers
+ f.seek(0)
+ case_linenos = []
+ code_linenos = []
+ for i, line in enumerate(f):
+ match = re.match(
+ '(?P\[\s*cases\s*\.\s*(?P\w+)\s*\])'
+ '|' '(?Pcode\s*=)',
+ line)
+ if match and match.group('case'):
+ case_linenos.append((i+1, match.group('name')))
+ elif match and match.group('code'):
+ code_linenos.append(i+2)
+
+ # sort in case toml parsing did not retain order
+ case_linenos.sort()
+
+ cases = config.pop('cases')
+ for (lineno, name), (nlineno, _) in it.zip_longest(
+ case_linenos, case_linenos[1:],
+ fillvalue=(float('inf'), None)):
+ code_lineno = min(
+ (l for l in code_linenos if l >= lineno and l < nlineno),
+ default=None)
+ cases[name]['lineno'] = lineno
+ cases[name]['code_lineno'] = code_lineno
+
+ self.if_ = config.pop('if', None)
+ if isinstance(self.if_, bool):
+ self.if_ = 'true' if self.if_ else 'false'
+
+ self.code = config.pop('code', None)
+ self.code_lineno = min(
+ (l for l in code_linenos
+ if not case_linenos or l < case_linenos[0][0]),
+ default=None)
+
+ # a couple of these we just forward to all cases
+ defines = config.pop('defines', {})
+ in_ = config.pop('in', None)
+
+ self.cases = []
+ for name, case in sorted(cases.items(),
+ key=lambda c: c[1].get('lineno')):
+ self.cases.append(BenchCase(config={
+ 'name': name,
+ 'path': path + (':%d' % case['lineno']
+ if 'lineno' in case else ''),
+ 'suite': self.name,
+ 'suite_defines': defines,
+ 'suite_in': in_,
+ **case},
+ args=args))
+
+ # combine per-case defines
+ self.defines = set.union(*(
+ set(case.defines) for case in self.cases))
+
+ for k in config.keys():
+ print('%swarning:%s in %s, found unused key %r' % (
+ '\x1b[01;33m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ self.name,
+ k),
+ file=sys.stderr)
+
+
+
+def compile(bench_paths, **args):
+ # find .toml files
+ paths = []
+ for path in bench_paths:
+ if os.path.isdir(path):
+ path = path + '/*.toml'
+
+ for path in glob.glob(path):
+ paths.append(path)
+
+ if not paths:
+ print('no bench suites found in %r?' % bench_paths)
+ sys.exit(-1)
+
+ # load the suites
+ suites = [BenchSuite(path, args) for path in paths]
+ suites.sort(key=lambda s: s.name)
+
+ # check for name conflicts, these will cause ambiguity problems later
+ # when running benches
+ seen = {}
+ for suite in suites:
+ if suite.name in seen:
+ print('%swarning:%s conflicting suite %r, %s and %s' % (
+ '\x1b[01;33m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ suite.name,
+ suite.path,
+ seen[suite.name].path),
+ file=sys.stderr)
+ seen[suite.name] = suite
+
+ for case in suite.cases:
+ # only allow conflicts if a case and its suite share a name
+ if case.name in seen and not (
+ isinstance(seen[case.name], BenchSuite)
+ and seen[case.name].cases == [case]):
+ print('%swarning:%s conflicting case %r, %s and %s' % (
+ '\x1b[01;33m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ case.name,
+ case.path,
+ seen[case.name].path),
+ file=sys.stderr)
+ seen[case.name] = case
+
+ # we can only compile one bench suite at a time
+ if not args.get('source'):
+ if len(suites) > 1:
+ print('more than one bench suite for compilation? (%r)' % bench_paths)
+ sys.exit(-1)
+
+ suite = suites[0]
+
+ # write generated bench source
+ if 'output' in args:
+ with openio(args['output'], 'w') as f:
+ _write = f.write
+ def write(s):
+ f.lineno += s.count('\n')
+ _write(s)
+ def writeln(s=''):
+ f.lineno += s.count('\n') + 1
+ _write(s)
+ _write('\n')
+ f.lineno = 1
+ f.write = write
+ f.writeln = writeln
+
+ f.writeln("// Generated by %s:" % sys.argv[0])
+ f.writeln("//")
+ f.writeln("// %s" % ' '.join(sys.argv))
+ f.writeln("//")
+ f.writeln()
+
+ # include bench_runner.h in every generated file
+ f.writeln("#include \"%s\"" % args['include'])
+ f.writeln()
+
+ # write out generated functions, this can end up in different
+ # files depending on the "in" attribute
+ #
+ # note it's up to the specific generated file to declare
+ # the bench defines
+ def write_case_functions(f, suite, case):
+ # create case define functions
+ if case.defines:
+ # deduplicate defines by value to try to reduce the
+ # number of functions we generate
+ define_cbs = {}
+ for i, defines in enumerate(case.permutations):
+ for k, v in sorted(defines.items()):
+ if v not in define_cbs:
+ name = ('__bench__%s__%s__%d'
+ % (case.name, k, i))
+ define_cbs[v] = name
+ f.writeln('intmax_t %s('
+ '__attribute__((unused)) '
+ 'void *data) {' % name)
+ f.writeln(4*' '+'return %s;' % v)
+ f.writeln('}')
+ f.writeln()
+ f.writeln('const bench_define_t '
+ '__bench__%s__defines[]['
+ 'BENCH_IMPLICIT_DEFINE_COUNT+%d] = {'
+ % (case.name, len(suite.defines)))
+ for defines in case.permutations:
+ f.writeln(4*' '+'{')
+ for k, v in sorted(defines.items()):
+ f.writeln(8*' '+'[%-24s] = {%s, NULL},' % (
+ k+'_i', define_cbs[v]))
+ f.writeln(4*' '+'},')
+ f.writeln('};')
+ f.writeln()
+
+ # create case filter function
+ if suite.if_ is not None or case.if_ is not None:
+ f.writeln('bool __bench__%s__filter(void) {'
+ % (case.name))
+ f.writeln(4*' '+'return %s;'
+ % ' && '.join('(%s)' % if_
+ for if_ in [suite.if_, case.if_]
+ if if_ is not None))
+ f.writeln('}')
+ f.writeln()
+
+ # create case run function
+ f.writeln('void __bench__%s__run('
+ '__attribute__((unused)) struct lfs_config *cfg) {'
+ % (case.name))
+ f.writeln(4*' '+'// bench case %s' % case.name)
+ if case.code_lineno is not None:
+ f.writeln(4*' '+'#line %d "%s"'
+ % (case.code_lineno, suite.path))
+ f.write(case.code)
+ if case.code_lineno is not None:
+ f.writeln(4*' '+'#line %d "%s"'
+ % (f.lineno+1, args['output']))
+ f.writeln('}')
+ f.writeln()
+
+ if not args.get('source'):
+ if suite.code is not None:
+ if suite.code_lineno is not None:
+ f.writeln('#line %d "%s"'
+ % (suite.code_lineno, suite.path))
+ f.write(suite.code)
+ if suite.code_lineno is not None:
+ f.writeln('#line %d "%s"'
+ % (f.lineno+1, args['output']))
+ f.writeln()
+
+ if suite.defines:
+ for i, define in enumerate(sorted(suite.defines)):
+ f.writeln('#ifndef %s' % define)
+ f.writeln('#define %-24s '
+ 'BENCH_IMPLICIT_DEFINE_COUNT+%d' % (define+'_i', i))
+ f.writeln('#define %-24s '
+ 'BENCH_DEFINE(%s)' % (define, define+'_i'))
+ f.writeln('#endif')
+ f.writeln()
+
+ # create case functions
+ for case in suite.cases:
+ if case.in_ is None:
+ write_case_functions(f, suite, case)
+ else:
+ if case.defines:
+ f.writeln('extern const bench_define_t '
+ '__bench__%s__defines[]['
+ 'BENCH_IMPLICIT_DEFINE_COUNT+%d];'
+ % (case.name, len(suite.defines)))
+ if suite.if_ is not None or case.if_ is not None:
+ f.writeln('extern bool __bench__%s__filter('
+ 'void);'
+ % (case.name))
+ f.writeln('extern void __bench__%s__run('
+ 'struct lfs_config *cfg);'
+ % (case.name))
+ f.writeln()
+
+ # create suite struct
+ #
+ # note we place this in the custom bench_suites section with
+ # minimum alignment, otherwise GCC ups the alignment to
+ # 32-bytes for some reason
+ f.writeln('__attribute__((section("_bench_suites"), '
+ 'aligned(1)))')
+ f.writeln('const struct bench_suite __bench__%s__suite = {'
+ % suite.name)
+ f.writeln(4*' '+'.name = "%s",' % suite.name)
+ f.writeln(4*' '+'.path = "%s",' % suite.path)
+ f.writeln(4*' '+'.flags = 0,')
+ if suite.defines:
+ # create suite define names
+ f.writeln(4*' '+'.define_names = (const char *const['
+ 'BENCH_IMPLICIT_DEFINE_COUNT+%d]){' % (
+ len(suite.defines)))
+ for k in sorted(suite.defines):
+ f.writeln(8*' '+'[%-24s] = "%s",' % (k+'_i', k))
+ f.writeln(4*' '+'},')
+ f.writeln(4*' '+'.define_count = '
+ 'BENCH_IMPLICIT_DEFINE_COUNT+%d,' % len(suite.defines))
+ f.writeln(4*' '+'.cases = (const struct bench_case[]){')
+ for case in suite.cases:
+ # create case structs
+ f.writeln(8*' '+'{')
+ f.writeln(12*' '+'.name = "%s",' % case.name)
+ f.writeln(12*' '+'.path = "%s",' % case.path)
+ f.writeln(12*' '+'.flags = 0,')
+ f.writeln(12*' '+'.permutations = %d,'
+ % len(case.permutations))
+ if case.defines:
+ f.writeln(12*' '+'.defines '
+ '= (const bench_define_t*)__bench__%s__defines,'
+ % (case.name))
+ if suite.if_ is not None or case.if_ is not None:
+ f.writeln(12*' '+'.filter = __bench__%s__filter,'
+ % (case.name))
+ f.writeln(12*' '+'.run = __bench__%s__run,'
+ % (case.name))
+ f.writeln(8*' '+'},')
+ f.writeln(4*' '+'},')
+ f.writeln(4*' '+'.case_count = %d,' % len(suite.cases))
+ f.writeln('};')
+ f.writeln()
+
+ else:
+ # copy source
+ f.writeln('#line 1 "%s"' % args['source'])
+ with open(args['source']) as sf:
+ shutil.copyfileobj(sf, f)
+ f.writeln()
+
+ # write any internal benches
+ for suite in suites:
+ for case in suite.cases:
+ if (case.in_ is not None
+ and os.path.normpath(case.in_)
+ == os.path.normpath(args['source'])):
+ # write defines, but note we need to undef any
+ # new defines since we're in someone else's file
+ if suite.defines:
+ for i, define in enumerate(
+ sorted(suite.defines)):
+ f.writeln('#ifndef %s' % define)
+ f.writeln('#define %-24s '
+ 'BENCH_IMPLICIT_DEFINE_COUNT+%d' % (
+ define+'_i', i))
+ f.writeln('#define %-24s '
+ 'BENCH_DEFINE(%s)' % (
+ define, define+'_i'))
+ f.writeln('#define '
+ '__BENCH__%s__NEEDS_UNDEF' % (
+ define))
+ f.writeln('#endif')
+ f.writeln()
+
+ write_case_functions(f, suite, case)
+
+ if suite.defines:
+ for define in sorted(suite.defines):
+ f.writeln('#ifdef __BENCH__%s__NEEDS_UNDEF'
+ % define)
+ f.writeln('#undef __BENCH__%s__NEEDS_UNDEF'
+ % define)
+ f.writeln('#undef %s' % define)
+ f.writeln('#undef %s' % (define+'_i'))
+ f.writeln('#endif')
+ f.writeln()
+
+def find_runner(runner, **args):
+ cmd = runner.copy()
+
+ # run under some external command?
+ if args.get('exec'):
+ cmd[:0] = args['exec']
+
+ # run under valgrind?
+ if args.get('valgrind'):
+ cmd[:0] = args['valgrind_path'] + [
+ '--leak-check=full',
+ '--track-origins=yes',
+ '--error-exitcode=4',
+ '-q']
+
+ # run under perf?
+ if args.get('perf'):
+ cmd[:0] = args['perf_script'] + list(filter(None, [
+ '-R',
+ '--perf-freq=%s' % args['perf_freq']
+ if args.get('perf_freq') else None,
+ '--perf-period=%s' % args['perf_period']
+ if args.get('perf_period') else None,
+ '--perf-events=%s' % args['perf_events']
+ if args.get('perf_events') else None,
+ '--perf-path=%s' % args['perf_path']
+ if args.get('perf_path') else None,
+ '-o%s' % args['perf']]))
+
+ # other context
+ if args.get('geometry'):
+ cmd.append('-G%s' % args['geometry'])
+ if args.get('disk'):
+ cmd.append('-d%s' % args['disk'])
+ if args.get('trace'):
+ cmd.append('-t%s' % args['trace'])
+ if args.get('trace_backtrace'):
+ cmd.append('--trace-backtrace')
+ if args.get('trace_period'):
+ cmd.append('--trace-period=%s' % args['trace_period'])
+ if args.get('trace_freq'):
+ cmd.append('--trace-freq=%s' % args['trace_freq'])
+ if args.get('read_sleep'):
+ cmd.append('--read-sleep=%s' % args['read_sleep'])
+ if args.get('prog_sleep'):
+ cmd.append('--prog-sleep=%s' % args['prog_sleep'])
+ if args.get('erase_sleep'):
+ cmd.append('--erase-sleep=%s' % args['erase_sleep'])
+
+ # defines?
+ if args.get('define'):
+ for define in args.get('define'):
+ cmd.append('-D%s' % define)
+
+ return cmd
+
+def list_(runner, bench_ids=[], **args):
+ cmd = find_runner(runner, **args) + bench_ids
+ if args.get('summary'): cmd.append('--summary')
+ if args.get('list_suites'): cmd.append('--list-suites')
+ if args.get('list_cases'): cmd.append('--list-cases')
+ if args.get('list_suite_paths'): cmd.append('--list-suite-paths')
+ if args.get('list_case_paths'): cmd.append('--list-case-paths')
+ if args.get('list_defines'): cmd.append('--list-defines')
+ if args.get('list_permutation_defines'):
+ cmd.append('--list-permutation-defines')
+ if args.get('list_implicit_defines'):
+ cmd.append('--list-implicit-defines')
+ if args.get('list_geometries'): cmd.append('--list-geometries')
+
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ return sp.call(cmd)
+
+
+def find_perms(runner_, ids=[], **args):
+ case_suites = {}
+ expected_case_perms = co.defaultdict(lambda: 0)
+ expected_perms = 0
+ total_perms = 0
+
+ # query cases from the runner
+ cmd = runner_ + ['--list-cases'] + ids
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ pattern = re.compile(
+ '^(?P[^\s]+)'
+ '\s+(?P[^\s]+)'
+ '\s+(?P\d+)/(?P\d+)')
+ # skip the first line
+ for line in it.islice(proc.stdout, 1, None):
+ m = pattern.match(line)
+ if m:
+ filtered = int(m.group('filtered'))
+ perms = int(m.group('perms'))
+ expected_case_perms[m.group('case')] += filtered
+ expected_perms += filtered
+ total_perms += perms
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ # get which suite each case belongs to via paths
+ cmd = runner_ + ['--list-case-paths'] + ids
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ pattern = re.compile(
+ '^(?P[^\s]+)'
+ '\s+(?P[^:]+):(?P\d+)')
+ # skip the first line
+ for line in it.islice(proc.stdout, 1, None):
+ m = pattern.match(line)
+ if m:
+ path = m.group('path')
+ # strip path/suffix here
+ suite = os.path.basename(path)
+ if suite.endswith('.toml'):
+ suite = suite[:-len('.toml')]
+ case_suites[m.group('case')] = suite
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ # figure out expected suite perms
+ expected_suite_perms = co.defaultdict(lambda: 0)
+ for case, suite in case_suites.items():
+ expected_suite_perms[suite] += expected_case_perms[case]
+
+ return (
+ case_suites,
+ expected_suite_perms,
+ expected_case_perms,
+ expected_perms,
+ total_perms)
+
+def find_path(runner_, id, **args):
+ path = None
+ # query from runner
+ cmd = runner_ + ['--list-case-paths', id]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ pattern = re.compile(
+ '^(?P[^\s]+)'
+ '\s+(?P[^:]+):(?P\d+)')
+ # skip the first line
+ for line in it.islice(proc.stdout, 1, None):
+ m = pattern.match(line)
+ if m and path is None:
+ path_ = m.group('path')
+ lineno = int(m.group('lineno'))
+ path = (path_, lineno)
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ return path
+
+def find_defines(runner_, id, **args):
+ # query permutation defines from runner
+ cmd = runner_ + ['--list-permutation-defines', id]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ defines = co.OrderedDict()
+ pattern = re.compile('^(?P\w+)=(?P.+)')
+ for line in proc.stdout:
+ m = pattern.match(line)
+ if m:
+ define = m.group('define')
+ value = m.group('value')
+ defines[define] = value
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ return defines
+
+
+# Thread-safe CSV writer
+class BenchOutput:
+ def __init__(self, path, head=None, tail=None):
+ self.f = openio(path, 'w+', 1)
+ self.lock = th.Lock()
+ self.head = head or []
+ self.tail = tail or []
+ self.writer = csv.DictWriter(self.f, self.head + self.tail)
+ self.rows = []
+
+ def close(self):
+ self.f.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *_):
+ self.f.close()
+
+ def writerow(self, row):
+ with self.lock:
+ self.rows.append(row)
+ if all(k in self.head or k in self.tail for k in row.keys()):
+ # can simply append
+ self.writer.writerow(row)
+ else:
+ # need to rewrite the file
+ self.head.extend(row.keys() - (self.head + self.tail))
+ self.f.seek(0)
+ self.f.truncate()
+ self.writer = csv.DictWriter(self.f, self.head + self.tail)
+ self.writer.writeheader()
+ for row in self.rows:
+ self.writer.writerow(row)
+
+# A bench failure
+class BenchFailure(Exception):
+ def __init__(self, id, returncode, stdout, assert_=None):
+ self.id = id
+ self.returncode = returncode
+ self.stdout = stdout
+ self.assert_ = assert_
+
+def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
+ # get expected suite/case/perm counts
+ (case_suites,
+ expected_suite_perms,
+ expected_case_perms,
+ expected_perms,
+ total_perms) = find_perms(runner_, ids, **args)
+
+ passed_suite_perms = co.defaultdict(lambda: 0)
+ passed_case_perms = co.defaultdict(lambda: 0)
+ passed_perms = 0
+ readed = 0
+ proged = 0
+ erased = 0
+ failures = []
+ killed = False
+
+ pattern = re.compile('^(?:'
+ '(?Prunning|finished|skipped|powerloss)'
+ ' (?P(?P[^:]+)[^\s]*)'
+ '(?: (?P\d+))?'
+ '(?: (?P\d+))?'
+ '(?: (?P\d+))?'
+ '|' '(?P[^:]+):(?P\d+):(?Passert):'
+ ' *(?P.*)'
+ ')$')
+ locals = th.local()
+ children = set()
+
+ def run_runner(runner_, ids=[]):
+ nonlocal passed_suite_perms
+ nonlocal passed_case_perms
+ nonlocal passed_perms
+ nonlocal readed
+ nonlocal proged
+ nonlocal erased
+ nonlocal locals
+
+ # run the benches!
+ cmd = runner_ + ids
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+
+ mpty, spty = pty.openpty()
+ proc = sp.Popen(cmd, stdout=spty, stderr=spty, close_fds=False)
+ os.close(spty)
+ children.add(proc)
+ mpty = os.fdopen(mpty, 'r', 1)
+
+ last_id = None
+ last_stdout = co.deque(maxlen=args.get('context', 5) + 1)
+ last_assert = None
+ try:
+ while True:
+ # parse a line for state changes
+ try:
+ line = mpty.readline()
+ except OSError as e:
+ if e.errno != errno.EIO:
+ raise
+ break
+ if not line:
+ break
+ last_stdout.append(line)
+ if stdout_:
+ try:
+ stdout_.write(line)
+ stdout_.flush()
+ except BrokenPipeError:
+ pass
+
+ m = pattern.match(line)
+ if m:
+ op = m.group('op') or m.group('op_')
+ if op == 'running':
+ locals.seen_perms += 1
+ last_id = m.group('id')
+ last_stdout.clear()
+ last_assert = None
+ elif op == 'finished':
+ case = m.group('case')
+ suite = case_suites[case]
+ readed_ = int(m.group('readed'))
+ proged_ = int(m.group('proged'))
+ erased_ = int(m.group('erased'))
+ passed_suite_perms[suite] += 1
+ passed_case_perms[case] += 1
+ passed_perms += 1
+ readed += readed_
+ proged += proged_
+ erased += erased_
+ if output_:
+ # get defines and write to csv
+ defines = find_defines(
+ runner_, m.group('id'), **args)
+ output_.writerow({
+ 'suite': suite,
+ 'case': case,
+ 'bench_readed': readed_,
+ 'bench_proged': proged_,
+ 'bench_erased': erased_,
+ **defines})
+ elif op == 'skipped':
+ locals.seen_perms += 1
+ elif op == 'assert':
+ last_assert = (
+ m.group('path'),
+ int(m.group('lineno')),
+ m.group('message'))
+ # go ahead and kill the process, aborting takes a while
+ if args.get('keep_going'):
+ proc.kill()
+ except KeyboardInterrupt:
+ raise BenchFailure(last_id, 1, list(last_stdout))
+ finally:
+ children.remove(proc)
+ mpty.close()
+
+ proc.wait()
+ if proc.returncode != 0:
+ raise BenchFailure(
+ last_id,
+ proc.returncode,
+ list(last_stdout),
+ last_assert)
+
+ def run_job(runner_, ids=[], start=None, step=None):
+ nonlocal failures
+ nonlocal killed
+ nonlocal locals
+
+ start = start or 0
+ step = step or 1
+ while start < total_perms:
+ job_runner = runner_.copy()
+ if args.get('isolate') or args.get('valgrind'):
+ job_runner.append('-s%s,%s,%s' % (start, start+step, step))
+ else:
+ job_runner.append('-s%s,,%s' % (start, step))
+
+ try:
+ # run the benches
+ locals.seen_perms = 0
+ run_runner(job_runner, ids)
+ assert locals.seen_perms > 0
+ start += locals.seen_perms*step
+
+ except BenchFailure as failure:
+ # keep track of failures
+ if output_:
+ case, _ = failure.id.split(':', 1)
+ suite = case_suites[case]
+ # get defines and write to csv
+ defines = find_defines(runner_, failure.id, **args)
+ output_.writerow({
+ 'suite': suite,
+ 'case': case,
+ **defines})
+
+ # race condition for multiple failures?
+ if failures and not args.get('keep_going'):
+ break
+
+ failures.append(failure)
+
+ if args.get('keep_going') and not killed:
+ # resume after failed bench
+ assert locals.seen_perms > 0
+ start += locals.seen_perms*step
+ continue
+ else:
+ # stop other benches
+ killed = True
+ for child in children.copy():
+ child.kill()
+ break
+
+
+ # parallel jobs?
+ runners = []
+ if 'jobs' in args:
+ for job in range(args['jobs']):
+ runners.append(th.Thread(
+ target=run_job, args=(runner_, ids, job, args['jobs']),
+ daemon=True))
+ else:
+ runners.append(th.Thread(
+ target=run_job, args=(runner_, ids, None, None),
+ daemon=True))
+
+ def print_update(done):
+ if not args.get('verbose') and (args['color'] or done):
+ sys.stdout.write('%s%srunning %s%s:%s %s%s' % (
+ '\r\x1b[K' if args['color'] else '',
+ '\x1b[?7l' if not done else '',
+ ('\x1b[34m' if not failures else '\x1b[31m')
+ if args['color'] else '',
+ name,
+ '\x1b[m' if args['color'] else '',
+ ', '.join(filter(None, [
+ '%d/%d suites' % (
+ sum(passed_suite_perms[k] == v
+ for k, v in expected_suite_perms.items()),
+ len(expected_suite_perms))
+ if (not args.get('by_suites')
+ and not args.get('by_cases')) else None,
+ '%d/%d cases' % (
+ sum(passed_case_perms[k] == v
+ for k, v in expected_case_perms.items()),
+ len(expected_case_perms))
+ if not args.get('by_cases') else None,
+ '%d/%d perms' % (passed_perms, expected_perms),
+ '%s%d/%d failures%s' % (
+ '\x1b[31m' if args['color'] else '',
+ len(failures),
+ expected_perms,
+ '\x1b[m' if args['color'] else '')
+ if failures else None])),
+ '\x1b[?7h' if not done else '\n'))
+ sys.stdout.flush()
+
+ for r in runners:
+ r.start()
+
+ try:
+ while any(r.is_alive() for r in runners):
+ time.sleep(0.01)
+ print_update(False)
+ except KeyboardInterrupt:
+ # this is handled by the runner threads, we just
+ # need to not abort here
+ killed = True
+ finally:
+ print_update(True)
+
+ for r in runners:
+ r.join()
+
+ return (
+ expected_perms,
+ passed_perms,
+ readed,
+ proged,
+ erased,
+ failures,
+ killed)
+
+
+def run(runner, bench_ids=[], **args):
+ # query runner for benches
+ runner_ = find_runner(runner, **args)
+ print('using runner: %s' % ' '.join(shlex.quote(c) for c in runner_))
+ (_,
+ expected_suite_perms,
+ expected_case_perms,
+ expected_perms,
+ total_perms) = find_perms(runner_, bench_ids, **args)
+ print('found %d suites, %d cases, %d/%d permutations' % (
+ len(expected_suite_perms),
+ len(expected_case_perms),
+ expected_perms,
+ total_perms))
+ print()
+
+ # automatic job detection?
+ if args.get('jobs') == 0:
+ args['jobs'] = len(os.sched_getaffinity(0))
+
+ # truncate and open logs here so they aren't disconnected between benches
+ stdout = None
+ if args.get('stdout'):
+ stdout = openio(args['stdout'], 'w', 1)
+ trace = None
+ if args.get('trace'):
+ trace = openio(args['trace'], 'w', 1)
+ output = None
+ if args.get('output'):
+ output = BenchOutput(args['output'],
+ ['suite', 'case'],
+ ['bench_readed', 'bench_proged', 'bench_erased'])
+
+ # measure runtime
+ start = time.time()
+
+ # spawn runners
+ expected = 0
+ passed = 0
+ readed = 0
+ proged = 0
+ erased = 0
+ failures = []
+ for by in (bench_ids if bench_ids
+ else expected_case_perms.keys() if args.get('by_cases')
+ else expected_suite_perms.keys() if args.get('by_suites')
+ else [None]):
+ # spawn jobs for stage
+ (expected_,
+ passed_,
+ readed_,
+ proged_,
+ erased_,
+ failures_,
+ killed) = run_stage(
+ by or 'benches',
+ runner_,
+ [by] if by is not None else [],
+ stdout,
+ trace,
+ output,
+ **args)
+ # collect passes/failures
+ expected += expected_
+ passed += passed_
+ readed += readed_
+ proged += proged_
+ erased += erased_
+ failures.extend(failures_)
+ if (failures and not args.get('keep_going')) or killed:
+ break
+
+ stop = time.time()
+
+ if stdout:
+ try:
+ stdout.close()
+ except BrokenPipeError:
+ pass
+ if trace:
+ try:
+ trace.close()
+ except BrokenPipeError:
+ pass
+ if output:
+ output.close()
+
+ # show summary
+ print()
+ print('%sdone:%s %s' % (
+ ('\x1b[34m' if not failures else '\x1b[31m')
+ if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ ', '.join(filter(None, [
+ '%d readed' % readed,
+ '%d proged' % proged,
+ '%d erased' % erased,
+ 'in %.2fs' % (stop-start)]))))
+ print()
+
+ # print each failure
+ for failure in failures:
+ assert failure.id is not None, '%s broken? %r' % (
+ ' '.join(shlex.quote(c) for c in runner_),
+ failure)
+
+ # get some extra info from runner
+ path, lineno = find_path(runner_, failure.id, **args)
+ defines = find_defines(runner_, failure.id, **args)
+
+ # show summary of failure
+ print('%s%s:%d:%sfailure:%s %s%s failed' % (
+ '\x1b[01m' if args['color'] else '',
+ path, lineno,
+ '\x1b[01;31m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ failure.id,
+ ' (%s)' % ', '.join('%s=%s' % (k,v) for k,v in defines.items())
+ if defines else ''))
+
+ if failure.stdout:
+ stdout = failure.stdout
+ if failure.assert_ is not None:
+ stdout = stdout[:-1]
+ for line in stdout[-args.get('context', 5):]:
+ sys.stdout.write(line)
+
+ if failure.assert_ is not None:
+ path, lineno, message = failure.assert_
+ print('%s%s:%d:%sassert:%s %s' % (
+ '\x1b[01m' if args['color'] else '',
+ path, lineno,
+ '\x1b[01;31m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ message))
+ with open(path) as f:
+ line = next(it.islice(f, lineno-1, None)).strip('\n')
+ print(line)
+ print()
+
+ # drop into gdb?
+ if failures and (args.get('gdb')
+ or args.get('gdb_case')
+ or args.get('gdb_main')):
+ failure = failures[0]
+ cmd = runner_ + [failure.id]
+
+ if args.get('gdb_main'):
+ # we don't really need the case breakpoint here, but it
+ # can be helpful
+ path, lineno = find_path(runner_, failure.id, **args)
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'break main',
+ '-ex', 'break %s:%d' % (path, lineno),
+ '-ex', 'run',
+ '--args']
+ elif args.get('gdb_case'):
+ path, lineno = find_path(runner_, failure.id, **args)
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'break %s:%d' % (path, lineno),
+ '-ex', 'run',
+ '--args']
+ elif failure.assert_ is not None:
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'run',
+ '-ex', 'frame function raise',
+ '-ex', 'up 2',
+ '--args']
+ else:
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'run',
+ '--args']
+
+ # exec gdb interactively
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ os.execvp(cmd[0], cmd)
+
+ return 1 if failures else 0
+
+
+def main(**args):
+ # figure out what color should be
+ if args.get('color') == 'auto':
+ args['color'] = sys.stdout.isatty()
+ elif args.get('color') == 'always':
+ args['color'] = True
+ else:
+ args['color'] = False
+
+ if args.get('compile'):
+ return compile(**args)
+ elif (args.get('summary')
+ or args.get('list_suites')
+ or args.get('list_cases')
+ or args.get('list_suite_paths')
+ or args.get('list_case_paths')
+ or args.get('list_defines')
+ or args.get('list_permutation_defines')
+ or args.get('list_implicit_defines')
+ or args.get('list_geometries')):
+ return list_(**args)
+ else:
+ return run(**args)
+
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ argparse.ArgumentParser._handle_conflict_ignore = lambda *_: None
+ argparse._ArgumentGroup._handle_conflict_ignore = lambda *_: None
+ parser = argparse.ArgumentParser(
+ description="Build and run benches.",
+ allow_abbrev=False,
+ conflict_handler='ignore')
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument(
+ '--color',
+ choices=['never', 'always', 'auto'],
+ default='auto',
+ help="When to use terminal colors. Defaults to 'auto'.")
+
+ # bench flags
+ bench_parser = parser.add_argument_group('bench options')
+ bench_parser.add_argument(
+ 'runner',
+ nargs='?',
+ type=lambda x: x.split(),
+ help="Bench runner to use for benching. Defaults to %r." % RUNNER_PATH)
+ bench_parser.add_argument(
+ 'bench_ids',
+ nargs='*',
+ help="Description of benches to run.")
+ bench_parser.add_argument(
+ '-Y', '--summary',
+ action='store_true',
+ help="Show quick summary.")
+ bench_parser.add_argument(
+ '-l', '--list-suites',
+ action='store_true',
+ help="List bench suites.")
+ bench_parser.add_argument(
+ '-L', '--list-cases',
+ action='store_true',
+ help="List bench cases.")
+ bench_parser.add_argument(
+ '--list-suite-paths',
+ action='store_true',
+ help="List the path for each bench suite.")
+ bench_parser.add_argument(
+ '--list-case-paths',
+ action='store_true',
+ help="List the path and line number for each bench case.")
+ bench_parser.add_argument(
+ '--list-defines',
+ action='store_true',
+ help="List all defines in this bench-runner.")
+ bench_parser.add_argument(
+ '--list-permutation-defines',
+ action='store_true',
+ help="List explicit defines in this bench-runner.")
+ bench_parser.add_argument(
+ '--list-implicit-defines',
+ action='store_true',
+ help="List implicit defines in this bench-runner.")
+ bench_parser.add_argument(
+ '--list-geometries',
+ action='store_true',
+ help="List the available disk geometries.")
+ bench_parser.add_argument(
+ '-D', '--define',
+ action='append',
+ help="Override a bench define.")
+ bench_parser.add_argument(
+ '-G', '--geometry',
+ help="Comma-separated list of disk geometries to bench.")
+ bench_parser.add_argument(
+ '-d', '--disk',
+ help="Direct block device operations to this file.")
+ bench_parser.add_argument(
+ '-t', '--trace',
+ help="Direct trace output to this file.")
+ bench_parser.add_argument(
+ '--trace-backtrace',
+ action='store_true',
+ help="Include a backtrace with every trace statement.")
+ bench_parser.add_argument(
+ '--trace-period',
+ help="Sample trace output at this period in cycles.")
+ bench_parser.add_argument(
+ '--trace-freq',
+ help="Sample trace output at this frequency in hz.")
+ bench_parser.add_argument(
+ '-O', '--stdout',
+ help="Direct stdout to this file. Note stderr is already merged here.")
+ bench_parser.add_argument(
+ '-o', '--output',
+ help="CSV file to store results.")
+ bench_parser.add_argument(
+ '--read-sleep',
+ help="Artificial read delay in seconds.")
+ bench_parser.add_argument(
+ '--prog-sleep',
+ help="Artificial prog delay in seconds.")
+ bench_parser.add_argument(
+ '--erase-sleep',
+ help="Artificial erase delay in seconds.")
+ bench_parser.add_argument(
+ '-j', '--jobs',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Number of parallel runners to run. 0 runs one runner per core.")
+ bench_parser.add_argument(
+ '-k', '--keep-going',
+ action='store_true',
+ help="Don't stop on first error.")
+ bench_parser.add_argument(
+ '-i', '--isolate',
+ action='store_true',
+ help="Run each bench permutation in a separate process.")
+ bench_parser.add_argument(
+ '-b', '--by-suites',
+ action='store_true',
+ help="Step through benches by suite.")
+ bench_parser.add_argument(
+ '-B', '--by-cases',
+ action='store_true',
+ help="Step through benches by case.")
+ bench_parser.add_argument(
+ '--context',
+ type=lambda x: int(x, 0),
+ default=5,
+ help="Show this many lines of stdout on bench failure. "
+ "Defaults to 5.")
+ bench_parser.add_argument(
+ '--gdb',
+ action='store_true',
+ help="Drop into gdb on bench failure.")
+ bench_parser.add_argument(
+ '--gdb-case',
+ action='store_true',
+ help="Drop into gdb on bench failure but stop at the beginning "
+ "of the failing bench case.")
+ bench_parser.add_argument(
+ '--gdb-main',
+ action='store_true',
+ help="Drop into gdb on bench failure but stop at the beginning "
+ "of main.")
+ bench_parser.add_argument(
+ '--gdb-path',
+ type=lambda x: x.split(),
+ default=GDB_PATH,
+ help="Path to the gdb executable, may include flags. "
+ "Defaults to %r." % GDB_PATH)
+ bench_parser.add_argument(
+ '--exec',
+ type=lambda e: e.split(),
+ help="Run under another executable.")
+ bench_parser.add_argument(
+ '--valgrind',
+ action='store_true',
+ help="Run under Valgrind to find memory errors. Implicitly sets "
+ "--isolate.")
+ bench_parser.add_argument(
+ '--valgrind-path',
+ type=lambda x: x.split(),
+ default=VALGRIND_PATH,
+ help="Path to the Valgrind executable, may include flags. "
+ "Defaults to %r." % VALGRIND_PATH)
+ bench_parser.add_argument(
+ '-p', '--perf',
+ help="Run under Linux's perf to sample performance counters, writing "
+ "samples to this file.")
+ bench_parser.add_argument(
+ '--perf-freq',
+ help="perf sampling frequency. This is passed directly to the perf "
+ "script.")
+ bench_parser.add_argument(
+ '--perf-period',
+ help="perf sampling period. This is passed directly to the perf "
+ "script.")
+ bench_parser.add_argument(
+ '--perf-events',
+ help="perf events to record. This is passed directly to the perf "
+ "script.")
+ bench_parser.add_argument(
+ '--perf-script',
+ type=lambda x: x.split(),
+ default=PERF_SCRIPT,
+ help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT)
+ bench_parser.add_argument(
+ '--perf-path',
+ type=lambda x: x.split(),
+ help="Path to the perf executable, may include flags. This is passed "
+ "directly to the perf script")
+
+ # compilation flags
+ comp_parser = parser.add_argument_group('compilation options')
+ comp_parser.add_argument(
+ 'bench_paths',
+ nargs='*',
+ help="Description of *.toml files to compile. May be a directory "
+ "or a list of paths.")
+ comp_parser.add_argument(
+ '-c', '--compile',
+ action='store_true',
+ help="Compile a bench suite or source file.")
+ comp_parser.add_argument(
+ '-s', '--source',
+ help="Source file to compile, possibly injecting internal benches.")
+ comp_parser.add_argument(
+ '--include',
+ default=HEADER_PATH,
+ help="Inject this header file into every compiled bench file. "
+ "Defaults to %r." % HEADER_PATH)
+ comp_parser.add_argument(
+ '-o', '--output',
+ help="Output file.")
+
+ # runner/bench_paths overlap, so need to do some munging here
+ args = parser.parse_intermixed_args()
+ args.bench_paths = [' '.join(args.runner or [])] + args.bench_ids
+ args.runner = args.runner or [RUNNER_PATH]
+
+ sys.exit(main(**{k: v
+ for k, v in vars(args).items()
+ if v is not None}))
diff --git a/scripts/changeprefix.py b/scripts/changeprefix.py
new file mode 100755
index 00000000..381a4568
--- /dev/null
+++ b/scripts/changeprefix.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python3
+#
+# Change prefixes in files/filenames. Useful for creating different versions
+# of a codebase that don't conflict at compile time.
+#
+# Example:
+# $ ./scripts/changeprefix.py lfs lfs3
+#
+# Copyright (c) 2022, The littlefs authors.
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import glob
+import itertools
+import os
+import os.path
+import re
+import shlex
+import shutil
+import subprocess
+import tempfile
+
+GIT_PATH = ['git']
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def changeprefix(from_prefix, to_prefix, line):
+ line, count1 = re.subn(
+ '\\b'+from_prefix,
+ to_prefix,
+ line)
+ line, count2 = re.subn(
+ '\\b'+from_prefix.upper(),
+ to_prefix.upper(),
+ line)
+ line, count3 = re.subn(
+ '\\B-D'+from_prefix.upper(),
+ '-D'+to_prefix.upper(),
+ line)
+ return line, count1+count2+count3
+
+def changefile(from_prefix, to_prefix, from_path, to_path, *,
+ no_replacements=False):
+ # rename any prefixes in file
+ count = 0
+
+ # create a temporary file to avoid overwriting ourself
+ if from_path == to_path and to_path != '-':
+ to_path_temp = tempfile.NamedTemporaryFile('w', delete=False)
+ to_path = to_path_temp.name
+ else:
+ to_path_temp = None
+
+ with openio(from_path) as from_f:
+ with openio(to_path, 'w') as to_f:
+ for line in from_f:
+ if not no_replacements:
+ line, n = changeprefix(from_prefix, to_prefix, line)
+ count += n
+ to_f.write(line)
+
+ if from_path != '-' and to_path != '-':
+ shutil.copystat(from_path, to_path)
+
+ if to_path_temp:
+ os.rename(to_path, from_path)
+ elif from_path != '-':
+ os.remove(from_path)
+
+ # Summary
+ print('%s: %d replacements' % (
+ '%s -> %s' % (from_path, to_path) if not to_path_temp else from_path,
+ count))
+
+def main(from_prefix, to_prefix, paths=[], *,
+ verbose=False,
+ output=None,
+ no_replacements=False,
+ no_renames=False,
+ git=False,
+ no_stage=False,
+ git_path=GIT_PATH):
+ if not paths:
+ if git:
+ cmd = git_path + ['ls-tree', '-r', '--name-only', 'HEAD']
+ if verbose:
+ print(' '.join(shlex.quote(c) for c in cmd))
+ paths = subprocess.check_output(cmd, encoding='utf8').split()
+ else:
+ print('no paths?', file=sys.stderr)
+ sys.exit(1)
+
+ for from_path in paths:
+ # rename filename?
+ if output:
+ to_path = output
+ elif no_renames:
+ to_path = from_path
+ else:
+ to_path = os.path.join(
+ os.path.dirname(from_path),
+ changeprefix(from_prefix, to_prefix,
+ os.path.basename(from_path))[0])
+
+ # rename contents
+ changefile(from_prefix, to_prefix, from_path, to_path,
+ no_replacements=no_replacements)
+
+ # stage?
+ if git and not no_stage:
+ if from_path != to_path:
+ cmd = git_path + ['rm', '-q', from_path]
+ if verbose:
+ print(' '.join(shlex.quote(c) for c in cmd))
+ subprocess.check_call(cmd)
+ cmd = git_path + ['add', to_path]
+ if verbose:
+ print(' '.join(shlex.quote(c) for c in cmd))
+ subprocess.check_call(cmd)
+
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Change prefixes in files/filenames. Useful for creating "
+ "different versions of a codebase that don't conflict at compile "
+ "time.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'from_prefix',
+ help="Prefix to replace.")
+ parser.add_argument(
+ 'to_prefix',
+ help="Prefix to replace with.")
+ parser.add_argument(
+ 'paths',
+ nargs='*',
+ help="Files to operate on.")
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument(
+ '-o', '--output',
+ help="Output file.")
+ parser.add_argument(
+ '-N', '--no-replacements',
+ action='store_true',
+ help="Don't change prefixes in files")
+ parser.add_argument(
+ '-R', '--no-renames',
+ action='store_true',
+ help="Don't rename files")
+ parser.add_argument(
+ '--git',
+ action='store_true',
+ help="Use git to find/update files.")
+ parser.add_argument(
+ '--no-stage',
+ action='store_true',
+ help="Don't stage changes with git.")
+ parser.add_argument(
+ '--git-path',
+ type=lambda x: x.split(),
+ default=GIT_PATH,
+ help="Path to git executable, may include flags. "
+ "Defaults to %r." % GIT_PATH)
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/code.py b/scripts/code.py
index b394e9cd..ba8bd1e0 100755
--- a/scripts/code.py
+++ b/scripts/code.py
@@ -1,42 +1,188 @@
#!/usr/bin/env python3
#
-# Script to find code size at the function level. Basically just a bit wrapper
+# Script to find code size at the function level. Basically just a big wrapper
# around nm with some extra conveniences for comparing builds. Heavily inspired
# by Linux's Bloat-O-Meter.
#
+# Example:
+# ./scripts/code.py lfs.o lfs_util.o -Ssize
+#
+# Copyright (c) 2022, The littlefs authors.
+# Copyright (c) 2020, Arm Limited. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
-import os
-import glob
+import collections as co
+import csv
+import difflib
import itertools as it
-import subprocess as sp
-import shlex
+import math as m
+import os
import re
-import csv
-import collections as co
+import shlex
+import subprocess as sp
+
+
+NM_PATH = ['nm']
+NM_TYPES = 'tTrRdD'
+OBJDUMP_PATH = ['objdump']
+
+
+# integer fields
+class Int(co.namedtuple('Int', 'x')):
+ __slots__ = ()
+ def __new__(cls, x=0):
+ if isinstance(x, Int):
+ return x
+ if isinstance(x, str):
+ try:
+ x = int(x, 0)
+ except ValueError:
+ # also accept +-∞ and +-inf
+ if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
+ x = m.inf
+ elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
+ x = -m.inf
+ else:
+ raise
+ assert isinstance(x, int) or m.isinf(x), x
+ return super().__new__(cls, x)
+
+ def __str__(self):
+ if self.x == m.inf:
+ return '∞'
+ elif self.x == -m.inf:
+ return '-∞'
+ else:
+ return str(self.x)
+
+ def __int__(self):
+ assert not m.isinf(self.x)
+ return self.x
+
+ def __float__(self):
+ return float(self.x)
+
+ none = '%7s' % '-'
+ def table(self):
+ return '%7s' % (self,)
+
+ diff_none = '%7s' % '-'
+ diff_table = table
+
+ def diff_diff(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ diff = new - old
+ if diff == +m.inf:
+ return '%7s' % '+∞'
+ elif diff == -m.inf:
+ return '%7s' % '-∞'
+ else:
+ return '%+7d' % diff
+
+ def ratio(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ if m.isinf(new) and m.isinf(old):
+ return 0.0
+ elif m.isinf(new):
+ return +m.inf
+ elif m.isinf(old):
+ return -m.inf
+ elif not old and not new:
+ return 0.0
+ elif not old:
+ return 1.0
+ else:
+ return (new-old) / old
+
+ def __add__(self, other):
+ return self.__class__(self.x + other.x)
+
+ def __sub__(self, other):
+ return self.__class__(self.x - other.x)
+
+ def __mul__(self, other):
+ return self.__class__(self.x * other.x)
+# code size results
+class CodeResult(co.namedtuple('CodeResult', [
+ 'file', 'function',
+ 'size'])):
+ _by = ['file', 'function']
+ _fields = ['size']
+ _sort = ['size']
+ _types = {'size': Int}
-OBJ_PATHS = ['*.o']
+ __slots__ = ()
+ def __new__(cls, file='', function='', size=0):
+ return super().__new__(cls, file, function,
+ Int(size))
-def collect(paths, **args):
- results = co.defaultdict(lambda: 0)
- pattern = re.compile(
+ def __add__(self, other):
+ return CodeResult(self.file, self.function,
+ self.size + other.size)
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def collect(obj_paths, *,
+ nm_path=NM_PATH,
+ nm_types=NM_TYPES,
+ objdump_path=OBJDUMP_PATH,
+ sources=None,
+ everything=False,
+ **args):
+ size_pattern = re.compile(
'^(?P[0-9a-fA-F]+)' +
- ' (?P[%s])' % re.escape(args['type']) +
+ ' (?P[%s])' % re.escape(nm_types) +
' (?P.+?)$')
- for path in paths:
- # note nm-tool may contain extra args
- cmd = args['nm_tool'] + ['--size-sort', path]
+ line_pattern = re.compile(
+ '^\s+(?P[0-9]+)'
+ '(?:\s+(?P[0-9]+))?'
+ '\s+.*'
+ '\s+(?P[^\s]+)$')
+ info_pattern = re.compile(
+ '^(?:.*(?PDW_TAG_[a-z_]+).*'
+ '|.*DW_AT_name.*:\s*(?P[^:\s]+)\s*'
+ '|.*DW_AT_decl_file.*:\s*(?P[0-9]+)\s*)$')
+
+ results = []
+ for path in obj_paths:
+ # guess the source, if we have debug-info we'll replace this later
+ file = re.sub('(\.o)?$', '.c', path, 1)
+
+ # find symbol sizes
+ results_ = []
+ # note nm-path may contain extra args
+ cmd = nm_path + ['--size-sort', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
- errors='replace')
+ errors='replace',
+ close_fds=False)
for line in proc.stdout:
- m = pattern.match(line)
+ m = size_pattern.match(line)
if m:
- results[(path, m.group('func'))] += int(m.group('size'), 16)
+ func = m.group('func')
+ # discard internal functions
+ if not everything and func.startswith('__'):
+ continue
+ results_.append(CodeResult(
+ file, func,
+ int(m.group('size'), 16)))
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
@@ -44,241 +190,518 @@ def collect(paths, **args):
sys.stdout.write(line)
sys.exit(-1)
- flat_results = []
- for (file, func), size in results.items():
- # map to source files
- if args.get('build_dir'):
- file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
- # replace .o with .c, different scripts report .o/.c, we need to
- # choose one if we want to deduplicate csv files
- file = re.sub('\.o$', '.c', file)
- # discard internal functions
- if not args.get('everything'):
- if func.startswith('__'):
- continue
- # discard .8449 suffixes created by optimizer
- func = re.sub('\.[0-9]+', '', func)
-
- flat_results.append((file, func, size))
-
- return flat_results
-
-def main(**args):
- def openio(path, mode='r'):
- if path == '-':
- if 'r' in mode:
- return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+
+ # try to figure out the source file if we have debug-info
+ dirs = {}
+ files = {}
+ # note objdump-path may contain extra args
+ cmd = objdump_path + ['--dwarf=rawline', path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ for line in proc.stdout:
+ # note that files contain references to dirs, which we
+ # dereference as soon as we see them as each file table follows a
+ # dir table
+ m = line_pattern.match(line)
+ if m:
+ if not m.group('dir'):
+ # found a directory entry
+ dirs[int(m.group('no'))] = m.group('path')
+ else:
+ # found a file entry
+ dir = int(m.group('dir'))
+ if dir in dirs:
+ files[int(m.group('no'))] = os.path.join(
+ dirs[dir],
+ m.group('path'))
+ else:
+ files[int(m.group('no'))] = m.group('path')
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ # do nothing on error, we don't need objdump to work, source files
+ # may just be inaccurate
+ pass
+
+ defs = {}
+ is_func = False
+ f_name = None
+ f_file = None
+ # note objdump-path may contain extra args
+ cmd = objdump_path + ['--dwarf=info', path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ for line in proc.stdout:
+ # state machine here to find definitions
+ m = info_pattern.match(line)
+ if m:
+ if m.group('tag'):
+ if is_func:
+ defs[f_name] = files.get(f_file, '?')
+ is_func = (m.group('tag') == 'DW_TAG_subprogram')
+ elif m.group('name'):
+ f_name = m.group('name')
+ elif m.group('file'):
+ f_file = int(m.group('file'))
+ if is_func:
+ defs[f_name] = files.get(f_file, '?')
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ # do nothing on error, we don't need objdump to work, source files
+ # may just be inaccurate
+ pass
+
+ for r in results_:
+ # find best matching debug symbol, this may be slightly different
+ # due to optimizations
+ if defs:
+ # exact match? avoid difflib if we can for speed
+ if r.function in defs:
+ file = defs[r.function]
+ else:
+ _, file = max(
+ defs.items(),
+ key=lambda d: difflib.SequenceMatcher(None,
+ d[0],
+ r.function, False).ratio())
else:
- return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
- else:
- return open(path, mode)
+ file = r.file
- # find sizes
- if not args.get('use', None):
- # find .o files
- paths = []
- for path in args['obj_paths']:
- if os.path.isdir(path):
- path = path + '/*.o'
+ # ignore filtered sources
+ if sources is not None:
+ if not any(
+ os.path.abspath(file) == os.path.abspath(s)
+ for s in sources):
+ continue
+ else:
+ # default to only cwd
+ if not everything and not os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file)]) == os.getcwd():
+ continue
+
+ # simplify path
+ if os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file)]) == os.getcwd():
+ file = os.path.relpath(file)
+ else:
+ file = os.path.abspath(file)
+
+ results.append(r._replace(file=file))
+
+ return results
- for path in glob.glob(path):
- paths.append(path)
- if not paths:
- print('no .obj files found in %r?' % args['obj_paths'])
+def fold(Result, results, *,
+ by=None,
+ defines=None,
+ **_):
+ if by is None:
+ by = Result._by
+
+ for k in it.chain(by or [], (k for k, _ in defines or [])):
+ if k not in Result._by and k not in Result._fields:
+ print("error: could not find field %r?" % k)
sys.exit(-1)
- results = collect(paths, **args)
- else:
- with openio(args['use']) as f:
- r = csv.DictReader(f)
- results = [
- ( result['file'],
- result['name'],
- int(result['code_size']))
- for result in r
- if result.get('code_size') not in {None, ''}]
-
- total = 0
- for _, _, size in results:
- total += size
+ # filter by matching defines
+ if defines is not None:
+ results_ = []
+ for r in results:
+ if all(getattr(r, k) in vs for k, vs in defines):
+ results_.append(r)
+ results = results_
- # find previous results?
- if args.get('diff'):
- try:
- with openio(args['diff']) as f:
- r = csv.DictReader(f)
- prev_results = [
- ( result['file'],
- result['name'],
- int(result['code_size']))
- for result in r
- if result.get('code_size') not in {None, ''}]
- except FileNotFoundError:
- prev_results = []
+ # organize results into conflicts
+ folding = co.OrderedDict()
+ for r in results:
+ name = tuple(getattr(r, k) for k in by)
+ if name not in folding:
+ folding[name] = []
+ folding[name].append(r)
- prev_total = 0
- for _, _, size in prev_results:
- prev_total += size
+ # merge conflicts
+ folded = []
+ for name, rs in folding.items():
+ folded.append(sum(rs[1:], start=rs[0]))
- # write results to CSV
- if args.get('output'):
- merged_results = co.defaultdict(lambda: {})
- other_fields = []
+ return folded
- # merge?
- if args.get('merge'):
- try:
- with openio(args['merge']) as f:
- r = csv.DictReader(f)
- for result in r:
- file = result.pop('file', '')
- func = result.pop('name', '')
- result.pop('code_size', None)
- merged_results[(file, func)] = result
- other_fields = result.keys()
- except FileNotFoundError:
- pass
-
- for file, func, size in results:
- merged_results[(file, func)]['code_size'] = size
+def table(Result, results, diff_results=None, *,
+ by=None,
+ fields=None,
+ sort=None,
+ summary=False,
+ all=False,
+ percent=False,
+ **_):
+ all_, all = all, __builtins__.all
- with openio(args['output'], 'w') as f:
- w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
- w.writeheader()
- for (file, func), result in sorted(merged_results.items()):
- w.writerow({'file': file, 'name': func, **result})
-
- # print results
- def dedup_entries(results, by='name'):
- entries = co.defaultdict(lambda: 0)
- for file, func, size in results:
- entry = (file if by == 'file' else func)
- entries[entry] += size
- return entries
-
- def diff_entries(olds, news):
- diff = co.defaultdict(lambda: (0, 0, 0, 0))
- for name, new in news.items():
- diff[name] = (0, new, new, 1.0)
- for name, old in olds.items():
- _, new, _, _ = diff[name]
- diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
- return diff
-
- def sorted_entries(entries):
- if args.get('size_sort'):
- return sorted(entries, key=lambda x: (-x[1], x))
- elif args.get('reverse_size_sort'):
- return sorted(entries, key=lambda x: (+x[1], x))
- else:
- return sorted(entries)
+ if by is None:
+ by = Result._by
+ if fields is None:
+ fields = Result._fields
+ types = Result._types
- def sorted_diff_entries(entries):
- if args.get('size_sort'):
- return sorted(entries, key=lambda x: (-x[1][1], x))
- elif args.get('reverse_size_sort'):
- return sorted(entries, key=lambda x: (+x[1][1], x))
- else:
- return sorted(entries, key=lambda x: (-x[1][3], x))
+ # fold again
+ results = fold(Result, results, by=by)
+ if diff_results is not None:
+ diff_results = fold(Result, diff_results, by=by)
- def print_header(by=''):
- if not args.get('diff'):
- print('%-36s %7s' % (by, 'size'))
- else:
- print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
+ # organize by name
+ table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in results}
+ diff_table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in diff_results or []}
+ names = list(table.keys() | diff_table.keys())
+
+ # sort again, now with diff info, note that python's sort is stable
+ names.sort()
+ if diff_results is not None:
+ names.sort(key=lambda n: tuple(
+ types[k].ratio(
+ getattr(table.get(n), k, None),
+ getattr(diff_table.get(n), k, None))
+ for k in fields),
+ reverse=True)
+ if sort:
+ for k, reverse in reversed(sort):
+ names.sort(
+ key=lambda n: tuple(
+ (getattr(table[n], k),)
+ if getattr(table.get(n), k, None) is not None else ()
+ for k in ([k] if k else [
+ k for k in Result._sort if k in fields])),
+ reverse=reverse ^ (not k or k in Result._fields))
- def print_entry(name, size):
- print("%-36s %7d" % (name, size))
- def print_diff_entry(name, old, new, diff, ratio):
- print("%-36s %7s %7s %+7d%s" % (name,
- old or "-",
- new or "-",
- diff,
- ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+ # build up our lines
+ lines = []
- def print_entries(by='name'):
- entries = dedup_entries(results, by=by)
+ # header
+ header = []
+ header.append('%s%s' % (
+ ','.join(by),
+ ' (%d added, %d removed)' % (
+ sum(1 for n in table if n not in diff_table),
+ sum(1 for n in diff_table if n not in table))
+ if diff_results is not None and not percent else '')
+ if not summary else '')
+ if diff_results is None:
+ for k in fields:
+ header.append(k)
+ elif percent:
+ for k in fields:
+ header.append(k)
+ else:
+ for k in fields:
+ header.append('o'+k)
+ for k in fields:
+ header.append('n'+k)
+ for k in fields:
+ header.append('d'+k)
+ header.append('')
+ lines.append(header)
- if not args.get('diff'):
- print_header(by=by)
- for name, size in sorted_entries(entries.items()):
- print_entry(name, size)
+ def table_entry(name, r, diff_r=None, ratios=[]):
+ entry = []
+ entry.append(name)
+ if diff_results is None:
+ for k in fields:
+ entry.append(getattr(r, k).table()
+ if getattr(r, k, None) is not None
+ else types[k].none)
+ elif percent:
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
else:
- prev_entries = dedup_entries(prev_results, by=by)
- diff = diff_entries(prev_entries, entries)
- print_header(by='%s (%d added, %d removed)' % (by,
- sum(1 for old, _, _, _ in diff.values() if not old),
- sum(1 for _, new, _, _ in diff.values() if not new)))
- for name, (old, new, diff, ratio) in sorted_diff_entries(
- diff.items()):
- if ratio or args.get('all'):
- print_diff_entry(name, old, new, diff, ratio)
-
- def print_totals():
- if not args.get('diff'):
- print_entry('TOTAL', total)
+ for k in fields:
+ entry.append(getattr(diff_r, k).diff_table()
+ if getattr(diff_r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(types[k].diff_diff(
+ getattr(r, k, None),
+ getattr(diff_r, k, None)))
+ if diff_results is None:
+ entry.append('')
+ elif percent:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios))
else:
- ratio = (0.0 if not prev_total and not total
- else 1.0 if not prev_total
- else (total-prev_total)/prev_total)
- print_diff_entry('TOTAL',
- prev_total, total,
- total-prev_total,
- ratio)
-
- if args.get('quiet'):
- pass
- elif args.get('summary'):
- print_header()
- print_totals()
- elif args.get('files'):
- print_entries(by='file')
- print_totals()
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios
+ if t)
+ if any(ratios) else '')
+ return entry
+
+ # entries
+ if not summary:
+ for name in names:
+ r = table.get(name)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = diff_table.get(name)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ if not all_ and not any(ratios):
+ continue
+ lines.append(table_entry(name, r, diff_r, ratios))
+
+ # total
+ r = next(iter(fold(Result, results, by=[])), None)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
else:
- print_entries(by='name')
- print_totals()
+ diff_r = next(iter(fold(Result, diff_results, by=[])), None)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ lines.append(table_entry('TOTAL', r, diff_r, ratios))
+
+ # find the best widths, note that column 0 contains the names and column -1
+ # the ratios, so those are handled a bit differently
+ widths = [
+ ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
+ for w, i in zip(
+ it.chain([23], it.repeat(7)),
+ range(len(lines[0])-1))]
+
+ # print our table
+ for line in lines:
+ print('%-*s %s%s' % (
+ widths[0], line[0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], line[1:-1])),
+ line[-1]))
+
+
+def main(obj_paths, *,
+ by=None,
+ fields=None,
+ defines=None,
+ sort=None,
+ **args):
+ # find sizes
+ if not args.get('use', None):
+ results = collect(obj_paths, **args)
+ else:
+ results = []
+ with openio(args['use']) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('code_'+k in r and r['code_'+k].strip()
+ for k in CodeResult._fields):
+ continue
+ try:
+ results.append(CodeResult(
+ **{k: r[k] for k in CodeResult._by
+ if k in r and r[k].strip()},
+ **{k: r['code_'+k] for k in CodeResult._fields
+ if 'code_'+k in r and r['code_'+k].strip()}))
+ except TypeError:
+ pass
+
+ # fold
+ results = fold(CodeResult, results, by=by, defines=defines)
+
+ # sort, note that python's sort is stable
+ results.sort()
+ if sort:
+ for k, reverse in reversed(sort):
+ results.sort(
+ key=lambda r: tuple(
+ (getattr(r, k),) if getattr(r, k) is not None else ()
+ for k in ([k] if k else CodeResult._sort)),
+ reverse=reverse ^ (not k or k in CodeResult._fields))
+
+ # write results to CSV
+ if args.get('output'):
+ with openio(args['output'], 'w') as f:
+ writer = csv.DictWriter(f,
+ (by if by is not None else CodeResult._by)
+ + ['code_'+k for k in (
+ fields if fields is not None else CodeResult._fields)])
+ writer.writeheader()
+ for r in results:
+ writer.writerow(
+ {k: getattr(r, k) for k in (
+ by if by is not None else CodeResult._by)}
+ | {'code_'+k: getattr(r, k) for k in (
+ fields if fields is not None else CodeResult._fields)})
+
+ # find previous results?
+ if args.get('diff'):
+ diff_results = []
+ try:
+ with openio(args['diff']) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('code_'+k in r and r['code_'+k].strip()
+ for k in CodeResult._fields):
+ continue
+ try:
+ diff_results.append(CodeResult(
+ **{k: r[k] for k in CodeResult._by
+ if k in r and r[k].strip()},
+ **{k: r['code_'+k] for k in CodeResult._fields
+ if 'code_'+k in r and r['code_'+k].strip()}))
+ except TypeError:
+ pass
+ except FileNotFoundError:
+ pass
+
+ # fold
+ diff_results = fold(CodeResult, diff_results, by=by, defines=defines)
+
+ # print table
+ if not args.get('quiet'):
+ table(CodeResult, results,
+ diff_results if args.get('diff') else None,
+ by=by if by is not None else ['function'],
+ fields=fields,
+ sort=sort,
+ **args)
+
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
- description="Find code size at the function level.")
- parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
- help="Description of where to find *.o files. May be a directory \
- or a list of paths. Defaults to %r." % OBJ_PATHS)
- parser.add_argument('-v', '--verbose', action='store_true',
+ description="Find code size at the function level.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'obj_paths',
+ nargs='*',
+ help="Input *.o files.")
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
help="Output commands that run behind the scenes.")
- parser.add_argument('-q', '--quiet', action='store_true',
+ parser.add_argument(
+ '-q', '--quiet',
+ action='store_true',
help="Don't show anything, useful with -o.")
- parser.add_argument('-o', '--output',
+ parser.add_argument(
+ '-o', '--output',
help="Specify CSV file to store results.")
- parser.add_argument('-u', '--use',
- help="Don't compile and find code sizes, instead use this CSV file.")
- parser.add_argument('-d', '--diff',
- help="Specify CSV file to diff code size against.")
- parser.add_argument('-m', '--merge',
- help="Merge with an existing CSV file when writing to output.")
- parser.add_argument('-a', '--all', action='store_true',
- help="Show all functions, not just the ones that changed.")
- parser.add_argument('-A', '--everything', action='store_true',
+ parser.add_argument(
+ '-u', '--use',
+ help="Don't parse anything, use this CSV file.")
+ parser.add_argument(
+ '-d', '--diff',
+ help="Specify CSV file to diff against.")
+ parser.add_argument(
+ '-a', '--all',
+ action='store_true',
+ help="Show all, not just the ones that changed.")
+ parser.add_argument(
+ '-p', '--percent',
+ action='store_true',
+ help="Only show percentage change, not a full diff.")
+ parser.add_argument(
+ '-b', '--by',
+ action='append',
+ choices=CodeResult._by,
+ help="Group by this field.")
+ parser.add_argument(
+ '-f', '--field',
+ dest='fields',
+ action='append',
+ choices=CodeResult._fields,
+ help="Show this field.")
+ parser.add_argument(
+ '-D', '--define',
+ dest='defines',
+ action='append',
+ type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
+ help="Only include results where this field is this value.")
+ class AppendSort(argparse.Action):
+ def __call__(self, parser, namespace, value, option):
+ if namespace.sort is None:
+ namespace.sort = []
+ namespace.sort.append((value, True if option == '-S' else False))
+ parser.add_argument(
+ '-s', '--sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field.")
+ parser.add_argument(
+ '-S', '--reverse-sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field, but backwards.")
+ parser.add_argument(
+ '-Y', '--summary',
+ action='store_true',
+ help="Only show the total.")
+ parser.add_argument(
+ '-F', '--source',
+ dest='sources',
+ action='append',
+ help="Only consider definitions in this file. Defaults to anything "
+ "in the current directory.")
+ parser.add_argument(
+ '--everything',
+ action='store_true',
help="Include builtin and libc specific symbols.")
- parser.add_argument('-s', '--size-sort', action='store_true',
- help="Sort by size.")
- parser.add_argument('-S', '--reverse-size-sort', action='store_true',
- help="Sort by size, but backwards.")
- parser.add_argument('-F', '--files', action='store_true',
- help="Show file-level code sizes. Note this does not include padding! "
- "So sizes may differ from other tools.")
- parser.add_argument('-Y', '--summary', action='store_true',
- help="Only show the total code size.")
- parser.add_argument('--type', default='tTrRdD',
+ parser.add_argument(
+ '--nm-types',
+ default=NM_TYPES,
help="Type of symbols to report, this uses the same single-character "
- "type-names emitted by nm. Defaults to %(default)r.")
- parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
- help="Path to the nm tool to use.")
- parser.add_argument('--build-dir',
- help="Specify the relative build directory. Used to map object files \
- to the correct source files.")
- sys.exit(main(**vars(parser.parse_args())))
+ "type-names emitted by nm. Defaults to %r." % NM_TYPES)
+ parser.add_argument(
+ '--nm-path',
+ type=lambda x: x.split(),
+ default=NM_PATH,
+ help="Path to the nm executable, may include flags. "
+ "Defaults to %r." % NM_PATH)
+ parser.add_argument(
+ '--objdump-path',
+ type=lambda x: x.split(),
+ default=OBJDUMP_PATH,
+ help="Path to the objdump executable, may include flags. "
+ "Defaults to %r." % OBJDUMP_PATH)
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/cov.py b/scripts/cov.py
new file mode 100755
index 00000000..b61b2e52
--- /dev/null
+++ b/scripts/cov.py
@@ -0,0 +1,828 @@
+#!/usr/bin/env python3
+#
+# Script to find coverage info after running tests.
+#
+# Example:
+# ./scripts/cov.py \
+# lfs.t.a.gcda lfs_util.t.a.gcda \
+# -Flfs.c -Flfs_util.c -slines
+#
+# Copyright (c) 2022, The littlefs authors.
+# Copyright (c) 2020, Arm Limited. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import collections as co
+import csv
+import itertools as it
+import json
+import math as m
+import os
+import re
+import shlex
+import subprocess as sp
+
+# TODO use explode_asserts to avoid counting assert branches?
+# TODO use dwarf=info to find functions for inline functions?
+
+GCOV_PATH = ['gcov']
+
+
+# integer fields
+class Int(co.namedtuple('Int', 'x')):
+ __slots__ = ()
+ def __new__(cls, x=0):
+ if isinstance(x, Int):
+ return x
+ if isinstance(x, str):
+ try:
+ x = int(x, 0)
+ except ValueError:
+ # also accept +-∞ and +-inf
+ if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
+ x = m.inf
+ elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
+ x = -m.inf
+ else:
+ raise
+ assert isinstance(x, int) or m.isinf(x), x
+ return super().__new__(cls, x)
+
+ def __str__(self):
+ if self.x == m.inf:
+ return '∞'
+ elif self.x == -m.inf:
+ return '-∞'
+ else:
+ return str(self.x)
+
+ def __int__(self):
+ assert not m.isinf(self.x)
+ return self.x
+
+ def __float__(self):
+ return float(self.x)
+
+ none = '%7s' % '-'
+ def table(self):
+ return '%7s' % (self,)
+
+ diff_none = '%7s' % '-'
+ diff_table = table
+
+ def diff_diff(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ diff = new - old
+ if diff == +m.inf:
+ return '%7s' % '+∞'
+ elif diff == -m.inf:
+ return '%7s' % '-∞'
+ else:
+ return '%+7d' % diff
+
+ def ratio(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ if m.isinf(new) and m.isinf(old):
+ return 0.0
+ elif m.isinf(new):
+ return +m.inf
+ elif m.isinf(old):
+ return -m.inf
+ elif not old and not new:
+ return 0.0
+ elif not old:
+ return 1.0
+ else:
+ return (new-old) / old
+
+ def __add__(self, other):
+ return self.__class__(self.x + other.x)
+
+ def __sub__(self, other):
+ return self.__class__(self.x - other.x)
+
+ def __mul__(self, other):
+ return self.__class__(self.x * other.x)
+
+# fractional fields, a/b
+class Frac(co.namedtuple('Frac', 'a,b')):
+ __slots__ = ()
+ def __new__(cls, a=0, b=None):
+ if isinstance(a, Frac) and b is None:
+ return a
+ if isinstance(a, str) and b is None:
+ a, b = a.split('/', 1)
+ if b is None:
+ b = a
+ return super().__new__(cls, Int(a), Int(b))
+
+ def __str__(self):
+ return '%s/%s' % (self.a, self.b)
+
+ def __float__(self):
+ return float(self.a)
+
+ none = '%11s %7s' % ('-', '-')
+ def table(self):
+ t = self.a.x/self.b.x if self.b.x else 1.0
+ return '%11s %7s' % (
+ self,
+ '∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%.1f%%' % (100*t))
+
+ diff_none = '%11s' % '-'
+ def diff_table(self):
+ return '%11s' % (self,)
+
+ def diff_diff(self, other):
+ new_a, new_b = self if self else (Int(0), Int(0))
+ old_a, old_b = other if other else (Int(0), Int(0))
+ return '%11s' % ('%s/%s' % (
+ new_a.diff_diff(old_a).strip(),
+ new_b.diff_diff(old_b).strip()))
+
+ def ratio(self, other):
+ new_a, new_b = self if self else (Int(0), Int(0))
+ old_a, old_b = other if other else (Int(0), Int(0))
+ new = new_a.x/new_b.x if new_b.x else 1.0
+ old = old_a.x/old_b.x if old_b.x else 1.0
+ return new - old
+
+ def __add__(self, other):
+ return self.__class__(self.a + other.a, self.b + other.b)
+
+ def __sub__(self, other):
+ return self.__class__(self.a - other.a, self.b - other.b)
+
+ def __mul__(self, other):
+ return self.__class__(self.a * other.a, self.b + other.b)
+
+ def __lt__(self, other):
+ self_t = self.a.x/self.b.x if self.b.x else 1.0
+ other_t = other.a.x/other.b.x if other.b.x else 1.0
+ return (self_t, self.a.x) < (other_t, other.a.x)
+
+ def __gt__(self, other):
+ return self.__class__.__lt__(other, self)
+
+ def __le__(self, other):
+ return not self.__gt__(other)
+
+ def __ge__(self, other):
+ return not self.__lt__(other)
+
+# coverage results
+class CovResult(co.namedtuple('CovResult', [
+ 'file', 'function', 'line',
+ 'calls', 'hits', 'funcs', 'lines', 'branches'])):
+ _by = ['file', 'function', 'line']
+ _fields = ['calls', 'hits', 'funcs', 'lines', 'branches']
+ _sort = ['funcs', 'lines', 'branches', 'hits', 'calls']
+ _types = {
+ 'calls': Int, 'hits': Int,
+ 'funcs': Frac, 'lines': Frac, 'branches': Frac}
+
+ __slots__ = ()
+ def __new__(cls, file='', function='', line=0,
+ calls=0, hits=0, funcs=0, lines=0, branches=0):
+ return super().__new__(cls, file, function, int(Int(line)),
+ Int(calls), Int(hits), Frac(funcs), Frac(lines), Frac(branches))
+
+ def __add__(self, other):
+ return CovResult(self.file, self.function, self.line,
+ max(self.calls, other.calls),
+ max(self.hits, other.hits),
+ self.funcs + other.funcs,
+ self.lines + other.lines,
+ self.branches + other.branches)
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def collect(gcda_paths, *,
+ gcov_path=GCOV_PATH,
+ sources=None,
+ everything=False,
+ **args):
+ results = []
+ for path in gcda_paths:
+ # get coverage info through gcov's json output
+ # note, gcov-path may contain extra args
+ cmd = GCOV_PATH + ['-b', '-t', '--json-format', path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ data = json.load(proc.stdout)
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ # collect line/branch coverage
+ for file in data['files']:
+ # ignore filtered sources
+ if sources is not None:
+ if not any(
+ os.path.abspath(file['file']) == os.path.abspath(s)
+ for s in sources):
+ continue
+ else:
+ # default to only cwd
+ if not everything and not os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file['file'])]) == os.getcwd():
+ continue
+
+ # simplify path
+ if os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file['file'])]) == os.getcwd():
+ file_name = os.path.relpath(file['file'])
+ else:
+ file_name = os.path.abspath(file['file'])
+
+ for func in file['functions']:
+ func_name = func.get('name', '(inlined)')
+ # discard internal functions (this includes injected test cases)
+ if not everything:
+ if func_name.startswith('__'):
+ continue
+
+ # go ahead and add functions, later folding will merge this if
+ # there are other hits on this line
+ results.append(CovResult(
+ file_name, func_name, func['start_line'],
+ func['execution_count'], 0,
+ Frac(1 if func['execution_count'] > 0 else 0, 1),
+ 0,
+ 0))
+
+ for line in file['lines']:
+ func_name = line.get('function_name', '(inlined)')
+ # discard internal function (this includes injected test cases)
+ if not everything:
+ if func_name.startswith('__'):
+ continue
+
+ # go ahead and add lines, later folding will merge this if
+ # there are other hits on this line
+ results.append(CovResult(
+ file_name, func_name, line['line_number'],
+ 0, line['count'],
+ 0,
+ Frac(1 if line['count'] > 0 else 0, 1),
+ Frac(
+ sum(1 if branch['count'] > 0 else 0
+ for branch in line['branches']),
+ len(line['branches']))))
+
+ return results
+
+
+def fold(Result, results, *,
+ by=None,
+ defines=None,
+ **_):
+ if by is None:
+ by = Result._by
+
+ for k in it.chain(by or [], (k for k, _ in defines or [])):
+ if k not in Result._by and k not in Result._fields:
+ print("error: could not find field %r?" % k)
+ sys.exit(-1)
+
+ # filter by matching defines
+ if defines is not None:
+ results_ = []
+ for r in results:
+ if all(getattr(r, k) in vs for k, vs in defines):
+ results_.append(r)
+ results = results_
+
+ # organize results into conflicts
+ folding = co.OrderedDict()
+ for r in results:
+ name = tuple(getattr(r, k) for k in by)
+ if name not in folding:
+ folding[name] = []
+ folding[name].append(r)
+
+ # merge conflicts
+ folded = []
+ for name, rs in folding.items():
+ folded.append(sum(rs[1:], start=rs[0]))
+
+ return folded
+
+def table(Result, results, diff_results=None, *,
+ by=None,
+ fields=None,
+ sort=None,
+ summary=False,
+ all=False,
+ percent=False,
+ **_):
+ all_, all = all, __builtins__.all
+
+ if by is None:
+ by = Result._by
+ if fields is None:
+ fields = Result._fields
+ types = Result._types
+
+ # fold again
+ results = fold(Result, results, by=by)
+ if diff_results is not None:
+ diff_results = fold(Result, diff_results, by=by)
+
+ # organize by name
+ table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in results}
+ diff_table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in diff_results or []}
+ names = list(table.keys() | diff_table.keys())
+
+ # sort again, now with diff info, note that python's sort is stable
+ names.sort()
+ if diff_results is not None:
+ names.sort(key=lambda n: tuple(
+ types[k].ratio(
+ getattr(table.get(n), k, None),
+ getattr(diff_table.get(n), k, None))
+ for k in fields),
+ reverse=True)
+ if sort:
+ for k, reverse in reversed(sort):
+ names.sort(
+ key=lambda n: tuple(
+ (getattr(table[n], k),)
+ if getattr(table.get(n), k, None) is not None else ()
+ for k in ([k] if k else [
+ k for k in Result._sort if k in fields])),
+ reverse=reverse ^ (not k or k in Result._fields))
+
+
+ # build up our lines
+ lines = []
+
+ # header
+ header = []
+ header.append('%s%s' % (
+ ','.join(by),
+ ' (%d added, %d removed)' % (
+ sum(1 for n in table if n not in diff_table),
+ sum(1 for n in diff_table if n not in table))
+ if diff_results is not None and not percent else '')
+ if not summary else '')
+ if diff_results is None:
+ for k in fields:
+ header.append(k)
+ elif percent:
+ for k in fields:
+ header.append(k)
+ else:
+ for k in fields:
+ header.append('o'+k)
+ for k in fields:
+ header.append('n'+k)
+ for k in fields:
+ header.append('d'+k)
+ header.append('')
+ lines.append(header)
+
+ def table_entry(name, r, diff_r=None, ratios=[]):
+ entry = []
+ entry.append(name)
+ if diff_results is None:
+ for k in fields:
+ entry.append(getattr(r, k).table()
+ if getattr(r, k, None) is not None
+ else types[k].none)
+ elif percent:
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ else:
+ for k in fields:
+ entry.append(getattr(diff_r, k).diff_table()
+ if getattr(diff_r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(types[k].diff_diff(
+ getattr(r, k, None),
+ getattr(diff_r, k, None)))
+ if diff_results is None:
+ entry.append('')
+ elif percent:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios))
+ else:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios
+ if t)
+ if any(ratios) else '')
+ return entry
+
+ # entries
+ if not summary:
+ for name in names:
+ r = table.get(name)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = diff_table.get(name)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ if not all_ and not any(ratios):
+ continue
+ lines.append(table_entry(name, r, diff_r, ratios))
+
+ # total
+ r = next(iter(fold(Result, results, by=[])), None)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = next(iter(fold(Result, diff_results, by=[])), None)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ lines.append(table_entry('TOTAL', r, diff_r, ratios))
+
+ # find the best widths, note that column 0 contains the names and column -1
+ # the ratios, so those are handled a bit differently
+ widths = [
+ ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
+ for w, i in zip(
+ it.chain([23], it.repeat(7)),
+ range(len(lines[0])-1))]
+
+ # print our table
+ for line in lines:
+ print('%-*s %s%s' % (
+ widths[0], line[0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], line[1:-1])),
+ line[-1]))
+
+
+def annotate(Result, results, *,
+ annotate=False,
+ lines=False,
+ branches=False,
+ **args):
+ # if neither branches/lines specified, color both
+ if annotate and not lines and not branches:
+ lines, branches = True, True
+
+ for path in co.OrderedDict.fromkeys(r.file for r in results).keys():
+ # flatten to line info
+ results = fold(Result, results, by=['file', 'line'])
+ table = {r.line: r for r in results if r.file == path}
+
+ # calculate spans to show
+ if not annotate:
+ spans = []
+ last = None
+ func = None
+ for line, r in sorted(table.items()):
+ if ((lines and int(r.hits) == 0)
+ or (branches and r.branches.a < r.branches.b)):
+ if last is not None and line - last.stop <= args['context']:
+ last = range(
+ last.start,
+ line+1+args['context'])
+ else:
+ if last is not None:
+ spans.append((last, func))
+ last = range(
+ line-args['context'],
+ line+1+args['context'])
+ func = r.function
+ if last is not None:
+ spans.append((last, func))
+
+ with open(path) as f:
+ skipped = False
+ for i, line in enumerate(f):
+ # skip lines not in spans?
+ if not annotate and not any(i+1 in s for s, _ in spans):
+ skipped = True
+ continue
+
+ if skipped:
+ skipped = False
+ print('%s@@ %s:%d: %s @@%s' % (
+ '\x1b[36m' if args['color'] else '',
+ path,
+ i+1,
+ next(iter(f for _, f in spans)),
+ '\x1b[m' if args['color'] else ''))
+
+ # build line
+ if line.endswith('\n'):
+ line = line[:-1]
+
+ if i+1 in table:
+ r = table[i+1]
+ line = '%-*s // %s hits%s' % (
+ args['width'],
+ line,
+ r.hits,
+ ', %s branches' % (r.branches,)
+ if int(r.branches.b) else '')
+
+ if args['color']:
+ if lines and int(r.hits) == 0:
+ line = '\x1b[1;31m%s\x1b[m' % line
+ elif branches and r.branches.a < r.branches.b:
+ line = '\x1b[35m%s\x1b[m' % line
+
+ print(line)
+
+
+def main(gcda_paths, *,
+ by=None,
+ fields=None,
+ defines=None,
+ sort=None,
+ hits=False,
+ **args):
+ # figure out what color should be
+ if args.get('color') == 'auto':
+ args['color'] = sys.stdout.isatty()
+ elif args.get('color') == 'always':
+ args['color'] = True
+ else:
+ args['color'] = False
+
+ # find sizes
+ if not args.get('use', None):
+ results = collect(gcda_paths, **args)
+ else:
+ results = []
+ with openio(args['use']) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('cov_'+k in r and r['cov_'+k].strip()
+ for k in CovResult._fields):
+ continue
+ try:
+ results.append(CovResult(
+ **{k: r[k] for k in CovResult._by
+ if k in r and r[k].strip()},
+ **{k: r['cov_'+k]
+ for k in CovResult._fields
+ if 'cov_'+k in r
+ and r['cov_'+k].strip()}))
+ except TypeError:
+ pass
+
+ # fold
+ results = fold(CovResult, results, by=by, defines=defines)
+
+ # sort, note that python's sort is stable
+ results.sort()
+ if sort:
+ for k, reverse in reversed(sort):
+ results.sort(
+ key=lambda r: tuple(
+ (getattr(r, k),) if getattr(r, k) is not None else ()
+ for k in ([k] if k else CovResult._sort)),
+ reverse=reverse ^ (not k or k in CovResult._fields))
+
+ # write results to CSV
+ if args.get('output'):
+ with openio(args['output'], 'w') as f:
+ writer = csv.DictWriter(f,
+ (by if by is not None else CovResult._by)
+ + ['cov_'+k for k in (
+ fields if fields is not None else CovResult._fields)])
+ writer.writeheader()
+ for r in results:
+ writer.writerow(
+ {k: getattr(r, k) for k in (
+ by if by is not None else CovResult._by)}
+ | {'cov_'+k: getattr(r, k) for k in (
+ fields if fields is not None else CovResult._fields)})
+
+ # find previous results?
+ if args.get('diff'):
+ diff_results = []
+ try:
+ with openio(args['diff']) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('cov_'+k in r and r['cov_'+k].strip()
+ for k in CovResult._fields):
+ continue
+ try:
+ diff_results.append(CovResult(
+ **{k: r[k] for k in CovResult._by
+ if k in r and r[k].strip()},
+ **{k: r['cov_'+k]
+ for k in CovResult._fields
+ if 'cov_'+k in r
+ and r['cov_'+k].strip()}))
+ except TypeError:
+ pass
+ except FileNotFoundError:
+ pass
+
+ # fold
+ diff_results = fold(CovResult, diff_results,
+ by=by, defines=defines)
+
+ # print table
+ if not args.get('quiet'):
+ if (args.get('annotate')
+ or args.get('lines')
+ or args.get('branches')):
+ # annotate sources
+ annotate(CovResult, results, **args)
+ else:
+ # print table
+ table(CovResult, results,
+ diff_results if args.get('diff') else None,
+ by=by if by is not None else ['function'],
+ fields=fields if fields is not None
+ else ['lines', 'branches'] if not hits
+ else ['calls', 'hits'],
+ sort=sort,
+ **args)
+
+ # catch lack of coverage
+ if args.get('error_on_lines') and any(
+ r.lines.a < r.lines.b for r in results):
+ sys.exit(2)
+ elif args.get('error_on_branches') and any(
+ r.branches.a < r.branches.b for r in results):
+ sys.exit(3)
+
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Find coverage info after running tests.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'gcda_paths',
+ nargs='*',
+ help="Input *.gcda files.")
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument(
+ '-q', '--quiet',
+ action='store_true',
+ help="Don't show anything, useful with -o.")
+ parser.add_argument(
+ '-o', '--output',
+ help="Specify CSV file to store results.")
+ parser.add_argument(
+ '-u', '--use',
+ help="Don't parse anything, use this CSV file.")
+ parser.add_argument(
+ '-d', '--diff',
+ help="Specify CSV file to diff against.")
+ parser.add_argument(
+ '-a', '--all',
+ action='store_true',
+ help="Show all, not just the ones that changed.")
+ parser.add_argument(
+ '-p', '--percent',
+ action='store_true',
+ help="Only show percentage change, not a full diff.")
+ parser.add_argument(
+ '-b', '--by',
+ action='append',
+ choices=CovResult._by,
+ help="Group by this field.")
+ parser.add_argument(
+ '-f', '--field',
+ dest='fields',
+ action='append',
+ choices=CovResult._fields,
+ help="Show this field.")
+ parser.add_argument(
+ '-D', '--define',
+ dest='defines',
+ action='append',
+ type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
+ help="Only include results where this field is this value.")
+ class AppendSort(argparse.Action):
+ def __call__(self, parser, namespace, value, option):
+ if namespace.sort is None:
+ namespace.sort = []
+ namespace.sort.append((value, True if option == '-S' else False))
+ parser.add_argument(
+ '-s', '--sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field.")
+ parser.add_argument(
+ '-S', '--reverse-sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field, but backwards.")
+ parser.add_argument(
+ '-Y', '--summary',
+ action='store_true',
+ help="Only show the total.")
+ parser.add_argument(
+ '-F', '--source',
+ dest='sources',
+ action='append',
+ help="Only consider definitions in this file. Defaults to anything "
+ "in the current directory.")
+ parser.add_argument(
+ '--everything',
+ action='store_true',
+ help="Include builtin and libc specific symbols.")
+ parser.add_argument(
+ '--hits',
+ action='store_true',
+ help="Show total hits instead of coverage.")
+ parser.add_argument(
+ '-A', '--annotate',
+ action='store_true',
+ help="Show source files annotated with coverage info.")
+ parser.add_argument(
+ '-L', '--lines',
+ action='store_true',
+ help="Show uncovered lines.")
+ parser.add_argument(
+ '-B', '--branches',
+ action='store_true',
+ help="Show uncovered branches.")
+ parser.add_argument(
+ '-c', '--context',
+ type=lambda x: int(x, 0),
+ default=3,
+ help="Show n additional lines of context. Defaults to 3.")
+ parser.add_argument(
+ '-W', '--width',
+ type=lambda x: int(x, 0),
+ default=80,
+ help="Assume source is styled with this many columns. Defaults to 80.")
+ parser.add_argument(
+ '--color',
+ choices=['never', 'always', 'auto'],
+ default='auto',
+ help="When to use terminal colors. Defaults to 'auto'.")
+ parser.add_argument(
+ '-e', '--error-on-lines',
+ action='store_true',
+ help="Error if any lines are not covered.")
+ parser.add_argument(
+ '-E', '--error-on-branches',
+ action='store_true',
+ help="Error if any branches are not covered.")
+ parser.add_argument(
+ '--gcov-path',
+ default=GCOV_PATH,
+ type=lambda x: x.split(),
+ help="Path to the gcov executable, may include paths. "
+ "Defaults to %r." % GCOV_PATH)
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/coverage.py b/scripts/coverage.py
deleted file mode 100755
index b3a90ed2..00000000
--- a/scripts/coverage.py
+++ /dev/null
@@ -1,323 +0,0 @@
-#!/usr/bin/env python3
-#
-# Parse and report coverage info from .info files generated by lcov
-#
-import os
-import glob
-import csv
-import re
-import collections as co
-import bisect as b
-
-
-INFO_PATHS = ['tests/*.toml.info']
-
-def collect(paths, **args):
- file = None
- funcs = []
- lines = co.defaultdict(lambda: 0)
- pattern = re.compile(
- '^(?PSF:/?(?P.*))$'
- '|^(?PFN:(?P[0-9]*),(?P.*))$'
- '|^(?PDA:(?P[0-9]*),(?P[0-9]*))$')
- for path in paths:
- with open(path) as f:
- for line in f:
- m = pattern.match(line)
- if m and m.group('file'):
- file = m.group('file_name')
- elif m and file and m.group('func'):
- funcs.append((file, int(m.group('func_lineno')),
- m.group('func_name')))
- elif m and file and m.group('line'):
- lines[(file, int(m.group('line_lineno')))] += (
- int(m.group('line_hits')))
-
- # map line numbers to functions
- funcs.sort()
- def func_from_lineno(file, lineno):
- i = b.bisect(funcs, (file, lineno))
- if i and funcs[i-1][0] == file:
- return funcs[i-1][2]
- else:
- return None
-
- # reduce to function info
- reduced_funcs = co.defaultdict(lambda: (0, 0))
- for (file, line_lineno), line_hits in lines.items():
- func = func_from_lineno(file, line_lineno)
- if not func:
- continue
- hits, count = reduced_funcs[(file, func)]
- reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1)
-
- results = []
- for (file, func), (hits, count) in reduced_funcs.items():
- # discard internal/testing functions (test_* injected with
- # internal testing)
- if not args.get('everything'):
- if func.startswith('__') or func.startswith('test_'):
- continue
- # discard .8449 suffixes created by optimizer
- func = re.sub('\.[0-9]+', '', func)
- results.append((file, func, hits, count))
-
- return results
-
-
-def main(**args):
- def openio(path, mode='r'):
- if path == '-':
- if 'r' in mode:
- return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
- else:
- return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
- else:
- return open(path, mode)
-
- # find coverage
- if not args.get('use'):
- # find *.info files
- paths = []
- for path in args['info_paths']:
- if os.path.isdir(path):
- path = path + '/*.gcov'
-
- for path in glob.glob(path):
- paths.append(path)
-
- if not paths:
- print('no .info files found in %r?' % args['info_paths'])
- sys.exit(-1)
-
- results = collect(paths, **args)
- else:
- with openio(args['use']) as f:
- r = csv.DictReader(f)
- results = [
- ( result['file'],
- result['name'],
- int(result['coverage_hits']),
- int(result['coverage_count']))
- for result in r
- if result.get('coverage_hits') not in {None, ''}
- if result.get('coverage_count') not in {None, ''}]
-
- total_hits, total_count = 0, 0
- for _, _, hits, count in results:
- total_hits += hits
- total_count += count
-
- # find previous results?
- if args.get('diff'):
- try:
- with openio(args['diff']) as f:
- r = csv.DictReader(f)
- prev_results = [
- ( result['file'],
- result['name'],
- int(result['coverage_hits']),
- int(result['coverage_count']))
- for result in r
- if result.get('coverage_hits') not in {None, ''}
- if result.get('coverage_count') not in {None, ''}]
- except FileNotFoundError:
- prev_results = []
-
- prev_total_hits, prev_total_count = 0, 0
- for _, _, hits, count in prev_results:
- prev_total_hits += hits
- prev_total_count += count
-
- # write results to CSV
- if args.get('output'):
- merged_results = co.defaultdict(lambda: {})
- other_fields = []
-
- # merge?
- if args.get('merge'):
- try:
- with openio(args['merge']) as f:
- r = csv.DictReader(f)
- for result in r:
- file = result.pop('file', '')
- func = result.pop('name', '')
- result.pop('coverage_hits', None)
- result.pop('coverage_count', None)
- merged_results[(file, func)] = result
- other_fields = result.keys()
- except FileNotFoundError:
- pass
-
- for file, func, hits, count in results:
- merged_results[(file, func)]['coverage_hits'] = hits
- merged_results[(file, func)]['coverage_count'] = count
-
- with openio(args['output'], 'w') as f:
- w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count'])
- w.writeheader()
- for (file, func), result in sorted(merged_results.items()):
- w.writerow({'file': file, 'name': func, **result})
-
- # print results
- def dedup_entries(results, by='name'):
- entries = co.defaultdict(lambda: (0, 0))
- for file, func, hits, count in results:
- entry = (file if by == 'file' else func)
- entry_hits, entry_count = entries[entry]
- entries[entry] = (entry_hits + hits, entry_count + count)
- return entries
-
- def diff_entries(olds, news):
- diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0))
- for name, (new_hits, new_count) in news.items():
- diff[name] = (
- 0, 0,
- new_hits, new_count,
- new_hits, new_count,
- (new_hits/new_count if new_count else 1.0) - 1.0)
- for name, (old_hits, old_count) in olds.items():
- _, _, new_hits, new_count, _, _, _ = diff[name]
- diff[name] = (
- old_hits, old_count,
- new_hits, new_count,
- new_hits-old_hits, new_count-old_count,
- ((new_hits/new_count if new_count else 1.0)
- - (old_hits/old_count if old_count else 1.0)))
- return diff
-
- def sorted_entries(entries):
- if args.get('coverage_sort'):
- return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x))
- elif args.get('reverse_coverage_sort'):
- return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x))
- else:
- return sorted(entries)
-
- def sorted_diff_entries(entries):
- if args.get('coverage_sort'):
- return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x))
- elif args.get('reverse_coverage_sort'):
- return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x))
- else:
- return sorted(entries, key=lambda x: (-x[1][6], x))
-
- def print_header(by=''):
- if not args.get('diff'):
- print('%-36s %19s' % (by, 'hits/line'))
- else:
- print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
-
- def print_entry(name, hits, count):
- print("%-36s %11s %7s" % (name,
- '%d/%d' % (hits, count)
- if count else '-',
- '%.1f%%' % (100*hits/count)
- if count else '-'))
-
- def print_diff_entry(name,
- old_hits, old_count,
- new_hits, new_count,
- diff_hits, diff_count,
- ratio):
- print("%-36s %11s %7s %11s %7s %11s%s" % (name,
- '%d/%d' % (old_hits, old_count)
- if old_count else '-',
- '%.1f%%' % (100*old_hits/old_count)
- if old_count else '-',
- '%d/%d' % (new_hits, new_count)
- if new_count else '-',
- '%.1f%%' % (100*new_hits/new_count)
- if new_count else '-',
- '%+d/%+d' % (diff_hits, diff_count),
- ' (%+.1f%%)' % (100*ratio) if ratio else ''))
-
- def print_entries(by='name'):
- entries = dedup_entries(results, by=by)
-
- if not args.get('diff'):
- print_header(by=by)
- for name, (hits, count) in sorted_entries(entries.items()):
- print_entry(name, hits, count)
- else:
- prev_entries = dedup_entries(prev_results, by=by)
- diff = diff_entries(prev_entries, entries)
- print_header(by='%s (%d added, %d removed)' % (by,
- sum(1 for _, old, _, _, _, _, _ in diff.values() if not old),
- sum(1 for _, _, _, new, _, _, _ in diff.values() if not new)))
- for name, (
- old_hits, old_count,
- new_hits, new_count,
- diff_hits, diff_count, ratio) in sorted_diff_entries(
- diff.items()):
- if ratio or args.get('all'):
- print_diff_entry(name,
- old_hits, old_count,
- new_hits, new_count,
- diff_hits, diff_count,
- ratio)
-
- def print_totals():
- if not args.get('diff'):
- print_entry('TOTAL', total_hits, total_count)
- else:
- ratio = ((total_hits/total_count
- if total_count else 1.0)
- - (prev_total_hits/prev_total_count
- if prev_total_count else 1.0))
- print_diff_entry('TOTAL',
- prev_total_hits, prev_total_count,
- total_hits, total_count,
- total_hits-prev_total_hits, total_count-prev_total_count,
- ratio)
-
- if args.get('quiet'):
- pass
- elif args.get('summary'):
- print_header()
- print_totals()
- elif args.get('files'):
- print_entries(by='file')
- print_totals()
- else:
- print_entries(by='name')
- print_totals()
-
-if __name__ == "__main__":
- import argparse
- import sys
- parser = argparse.ArgumentParser(
- description="Parse and report coverage info from .info files \
- generated by lcov")
- parser.add_argument('info_paths', nargs='*', default=INFO_PATHS,
- help="Description of where to find *.info files. May be a directory \
- or list of paths. *.info files will be merged to show the total \
- coverage. Defaults to %r." % INFO_PATHS)
- parser.add_argument('-v', '--verbose', action='store_true',
- help="Output commands that run behind the scenes.")
- parser.add_argument('-o', '--output',
- help="Specify CSV file to store results.")
- parser.add_argument('-u', '--use',
- help="Don't do any work, instead use this CSV file.")
- parser.add_argument('-d', '--diff',
- help="Specify CSV file to diff code size against.")
- parser.add_argument('-m', '--merge',
- help="Merge with an existing CSV file when writing to output.")
- parser.add_argument('-a', '--all', action='store_true',
- help="Show all functions, not just the ones that changed.")
- parser.add_argument('-A', '--everything', action='store_true',
- help="Include builtin and libc specific symbols.")
- parser.add_argument('-s', '--coverage-sort', action='store_true',
- help="Sort by coverage.")
- parser.add_argument('-S', '--reverse-coverage-sort', action='store_true',
- help="Sort by coverage, but backwards.")
- parser.add_argument('-F', '--files', action='store_true',
- help="Show file-level coverage.")
- parser.add_argument('-Y', '--summary', action='store_true',
- help="Only show the total coverage.")
- parser.add_argument('-q', '--quiet', action='store_true',
- help="Don't show anything, useful with -o.")
- parser.add_argument('--build-dir',
- help="Specify the relative build directory. Used to map object files \
- to the correct source files.")
- sys.exit(main(**vars(parser.parse_args())))
diff --git a/scripts/data.py b/scripts/data.py
index 4b8e00da..e9770aa1 100755
--- a/scripts/data.py
+++ b/scripts/data.py
@@ -1,42 +1,188 @@
#!/usr/bin/env python3
#
-# Script to find data size at the function level. Basically just a bit wrapper
+# Script to find data size at the function level. Basically just a big wrapper
# around nm with some extra conveniences for comparing builds. Heavily inspired
# by Linux's Bloat-O-Meter.
#
+# Example:
+# ./scripts/data.py lfs.o lfs_util.o -Ssize
+#
+# Copyright (c) 2022, The littlefs authors.
+# Copyright (c) 2020, Arm Limited. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
-import os
-import glob
+import collections as co
+import csv
+import difflib
import itertools as it
-import subprocess as sp
-import shlex
+import math as m
+import os
import re
-import csv
-import collections as co
+import shlex
+import subprocess as sp
+
+
+NM_PATH = ['nm']
+NM_TYPES = 'dDbB'
+OBJDUMP_PATH = ['objdump']
+
+
+# integer fields
+class Int(co.namedtuple('Int', 'x')):
+ __slots__ = ()
+ def __new__(cls, x=0):
+ if isinstance(x, Int):
+ return x
+ if isinstance(x, str):
+ try:
+ x = int(x, 0)
+ except ValueError:
+ # also accept +-∞ and +-inf
+ if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
+ x = m.inf
+ elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
+ x = -m.inf
+ else:
+ raise
+ assert isinstance(x, int) or m.isinf(x), x
+ return super().__new__(cls, x)
+
+ def __str__(self):
+ if self.x == m.inf:
+ return '∞'
+ elif self.x == -m.inf:
+ return '-∞'
+ else:
+ return str(self.x)
+
+ def __int__(self):
+ assert not m.isinf(self.x)
+ return self.x
+
+ def __float__(self):
+ return float(self.x)
+
+ none = '%7s' % '-'
+ def table(self):
+ return '%7s' % (self,)
+
+ diff_none = '%7s' % '-'
+ diff_table = table
+
+ def diff_diff(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ diff = new - old
+ if diff == +m.inf:
+ return '%7s' % '+∞'
+ elif diff == -m.inf:
+ return '%7s' % '-∞'
+ else:
+ return '%+7d' % diff
+
+ def ratio(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ if m.isinf(new) and m.isinf(old):
+ return 0.0
+ elif m.isinf(new):
+ return +m.inf
+ elif m.isinf(old):
+ return -m.inf
+ elif not old and not new:
+ return 0.0
+ elif not old:
+ return 1.0
+ else:
+ return (new-old) / old
+
+ def __add__(self, other):
+ return self.__class__(self.x + other.x)
+
+ def __sub__(self, other):
+ return self.__class__(self.x - other.x)
+
+ def __mul__(self, other):
+ return self.__class__(self.x * other.x)
+# data size results
+class DataResult(co.namedtuple('DataResult', [
+ 'file', 'function',
+ 'size'])):
+ _by = ['file', 'function']
+ _fields = ['size']
+ _sort = ['size']
+ _types = {'size': Int}
-OBJ_PATHS = ['*.o']
+ __slots__ = ()
+ def __new__(cls, file='', function='', size=0):
+ return super().__new__(cls, file, function,
+ Int(size))
-def collect(paths, **args):
- results = co.defaultdict(lambda: 0)
- pattern = re.compile(
+ def __add__(self, other):
+ return DataResult(self.file, self.function,
+ self.size + other.size)
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def collect(obj_paths, *,
+ nm_path=NM_PATH,
+ nm_types=NM_TYPES,
+ objdump_path=OBJDUMP_PATH,
+ sources=None,
+ everything=False,
+ **args):
+ size_pattern = re.compile(
'^(?P[0-9a-fA-F]+)' +
- ' (?P[%s])' % re.escape(args['type']) +
+ ' (?P[%s])' % re.escape(nm_types) +
' (?P.+?)$')
- for path in paths:
- # note nm-tool may contain extra args
- cmd = args['nm_tool'] + ['--size-sort', path]
+ line_pattern = re.compile(
+ '^\s+(?P[0-9]+)'
+ '(?:\s+(?P[0-9]+))?'
+ '\s+.*'
+ '\s+(?P[^\s]+)$')
+ info_pattern = re.compile(
+ '^(?:.*(?PDW_TAG_[a-z_]+).*'
+ '|.*DW_AT_name.*:\s*(?P[^:\s]+)\s*'
+ '|.*DW_AT_decl_file.*:\s*(?P[0-9]+)\s*)$')
+
+ results = []
+ for path in obj_paths:
+ # guess the source, if we have debug-info we'll replace this later
+ file = re.sub('(\.o)?$', '.c', path, 1)
+
+ # find symbol sizes
+ results_ = []
+ # note nm-path may contain extra args
+ cmd = nm_path + ['--size-sort', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
- errors='replace')
+ errors='replace',
+ close_fds=False)
for line in proc.stdout:
- m = pattern.match(line)
+ m = size_pattern.match(line)
if m:
- results[(path, m.group('func'))] += int(m.group('size'), 16)
+ func = m.group('func')
+ # discard internal functions
+ if not everything and func.startswith('__'):
+ continue
+ results_.append(DataResult(
+ file, func,
+ int(m.group('size'), 16)))
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
@@ -44,240 +190,515 @@ def collect(paths, **args):
sys.stdout.write(line)
sys.exit(-1)
- flat_results = []
- for (file, func), size in results.items():
- # map to source files
- if args.get('build_dir'):
- file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
- # replace .o with .c, different scripts report .o/.c, we need to
- # choose one if we want to deduplicate csv files
- file = re.sub('\.o$', '.c', file)
- # discard internal functions
- if not args.get('everything'):
- if func.startswith('__'):
- continue
- # discard .8449 suffixes created by optimizer
- func = re.sub('\.[0-9]+', '', func)
- flat_results.append((file, func, size))
-
- return flat_results
-
-def main(**args):
- def openio(path, mode='r'):
- if path == '-':
- if 'r' in mode:
- return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+
+ # try to figure out the source file if we have debug-info
+ dirs = {}
+ files = {}
+ # note objdump-path may contain extra args
+ cmd = objdump_path + ['--dwarf=rawline', path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ for line in proc.stdout:
+ # note that files contain references to dirs, which we
+ # dereference as soon as we see them as each file table follows a
+ # dir table
+ m = line_pattern.match(line)
+ if m:
+ if not m.group('dir'):
+ # found a directory entry
+ dirs[int(m.group('no'))] = m.group('path')
+ else:
+ # found a file entry
+ dir = int(m.group('dir'))
+ if dir in dirs:
+ files[int(m.group('no'))] = os.path.join(
+ dirs[dir],
+ m.group('path'))
+ else:
+ files[int(m.group('no'))] = m.group('path')
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ # do nothing on error, we don't need objdump to work, source files
+ # may just be inaccurate
+ pass
+
+ defs = {}
+ is_func = False
+ f_name = None
+ f_file = None
+ # note objdump-path may contain extra args
+ cmd = objdump_path + ['--dwarf=info', path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ for line in proc.stdout:
+ # state machine here to find definitions
+ m = info_pattern.match(line)
+ if m:
+ if m.group('tag'):
+ if is_func:
+ defs[f_name] = files.get(f_file, '?')
+ is_func = (m.group('tag') == 'DW_TAG_subprogram')
+ elif m.group('name'):
+ f_name = m.group('name')
+ elif m.group('file'):
+ f_file = int(m.group('file'))
+ if is_func:
+ defs[f_name] = files.get(f_file, '?')
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ # do nothing on error, we don't need objdump to work, source files
+ # may just be inaccurate
+ pass
+
+ for r in results_:
+ # find best matching debug symbol, this may be slightly different
+ # due to optimizations
+ if defs:
+ # exact match? avoid difflib if we can for speed
+ if r.function in defs:
+ file = defs[r.function]
+ else:
+ _, file = max(
+ defs.items(),
+ key=lambda d: difflib.SequenceMatcher(None,
+ d[0],
+ r.function, False).ratio())
else:
- return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
- else:
- return open(path, mode)
+ file = r.file
- # find sizes
- if not args.get('use', None):
- # find .o files
- paths = []
- for path in args['obj_paths']:
- if os.path.isdir(path):
- path = path + '/*.o'
+ # ignore filtered sources
+ if sources is not None:
+ if not any(
+ os.path.abspath(file) == os.path.abspath(s)
+ for s in sources):
+ continue
+ else:
+ # default to only cwd
+ if not everything and not os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file)]) == os.getcwd():
+ continue
+
+ # simplify path
+ if os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file)]) == os.getcwd():
+ file = os.path.relpath(file)
+ else:
+ file = os.path.abspath(file)
+
+ results.append(r._replace(file=file))
+
+ return results
- for path in glob.glob(path):
- paths.append(path)
- if not paths:
- print('no .obj files found in %r?' % args['obj_paths'])
+def fold(Result, results, *,
+ by=None,
+ defines=None,
+ **_):
+ if by is None:
+ by = Result._by
+
+ for k in it.chain(by or [], (k for k, _ in defines or [])):
+ if k not in Result._by and k not in Result._fields:
+ print("error: could not find field %r?" % k)
sys.exit(-1)
- results = collect(paths, **args)
- else:
- with openio(args['use']) as f:
- r = csv.DictReader(f)
- results = [
- ( result['file'],
- result['name'],
- int(result['data_size']))
- for result in r
- if result.get('data_size') not in {None, ''}]
-
- total = 0
- for _, _, size in results:
- total += size
+ # filter by matching defines
+ if defines is not None:
+ results_ = []
+ for r in results:
+ if all(getattr(r, k) in vs for k, vs in defines):
+ results_.append(r)
+ results = results_
- # find previous results?
- if args.get('diff'):
- try:
- with openio(args['diff']) as f:
- r = csv.DictReader(f)
- prev_results = [
- ( result['file'],
- result['name'],
- int(result['data_size']))
- for result in r
- if result.get('data_size') not in {None, ''}]
- except FileNotFoundError:
- prev_results = []
+ # organize results into conflicts
+ folding = co.OrderedDict()
+ for r in results:
+ name = tuple(getattr(r, k) for k in by)
+ if name not in folding:
+ folding[name] = []
+ folding[name].append(r)
- prev_total = 0
- for _, _, size in prev_results:
- prev_total += size
+ # merge conflicts
+ folded = []
+ for name, rs in folding.items():
+ folded.append(sum(rs[1:], start=rs[0]))
- # write results to CSV
- if args.get('output'):
- merged_results = co.defaultdict(lambda: {})
- other_fields = []
+ return folded
- # merge?
- if args.get('merge'):
- try:
- with openio(args['merge']) as f:
- r = csv.DictReader(f)
- for result in r:
- file = result.pop('file', '')
- func = result.pop('name', '')
- result.pop('data_size', None)
- merged_results[(file, func)] = result
- other_fields = result.keys()
- except FileNotFoundError:
- pass
-
- for file, func, size in results:
- merged_results[(file, func)]['data_size'] = size
+def table(Result, results, diff_results=None, *,
+ by=None,
+ fields=None,
+ sort=None,
+ summary=False,
+ all=False,
+ percent=False,
+ **_):
+ all_, all = all, __builtins__.all
- with openio(args['output'], 'w') as f:
- w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
- w.writeheader()
- for (file, func), result in sorted(merged_results.items()):
- w.writerow({'file': file, 'name': func, **result})
-
- # print results
- def dedup_entries(results, by='name'):
- entries = co.defaultdict(lambda: 0)
- for file, func, size in results:
- entry = (file if by == 'file' else func)
- entries[entry] += size
- return entries
-
- def diff_entries(olds, news):
- diff = co.defaultdict(lambda: (0, 0, 0, 0))
- for name, new in news.items():
- diff[name] = (0, new, new, 1.0)
- for name, old in olds.items():
- _, new, _, _ = diff[name]
- diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
- return diff
-
- def sorted_entries(entries):
- if args.get('size_sort'):
- return sorted(entries, key=lambda x: (-x[1], x))
- elif args.get('reverse_size_sort'):
- return sorted(entries, key=lambda x: (+x[1], x))
- else:
- return sorted(entries)
+ if by is None:
+ by = Result._by
+ if fields is None:
+ fields = Result._fields
+ types = Result._types
- def sorted_diff_entries(entries):
- if args.get('size_sort'):
- return sorted(entries, key=lambda x: (-x[1][1], x))
- elif args.get('reverse_size_sort'):
- return sorted(entries, key=lambda x: (+x[1][1], x))
- else:
- return sorted(entries, key=lambda x: (-x[1][3], x))
+ # fold again
+ results = fold(Result, results, by=by)
+ if diff_results is not None:
+ diff_results = fold(Result, diff_results, by=by)
- def print_header(by=''):
- if not args.get('diff'):
- print('%-36s %7s' % (by, 'size'))
- else:
- print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
+ # organize by name
+ table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in results}
+ diff_table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in diff_results or []}
+ names = list(table.keys() | diff_table.keys())
+
+ # sort again, now with diff info, note that python's sort is stable
+ names.sort()
+ if diff_results is not None:
+ names.sort(key=lambda n: tuple(
+ types[k].ratio(
+ getattr(table.get(n), k, None),
+ getattr(diff_table.get(n), k, None))
+ for k in fields),
+ reverse=True)
+ if sort:
+ for k, reverse in reversed(sort):
+ names.sort(
+ key=lambda n: tuple(
+ (getattr(table[n], k),)
+ if getattr(table.get(n), k, None) is not None else ()
+ for k in ([k] if k else [
+ k for k in Result._sort if k in fields])),
+ reverse=reverse ^ (not k or k in Result._fields))
- def print_entry(name, size):
- print("%-36s %7d" % (name, size))
- def print_diff_entry(name, old, new, diff, ratio):
- print("%-36s %7s %7s %+7d%s" % (name,
- old or "-",
- new or "-",
- diff,
- ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+ # build up our lines
+ lines = []
- def print_entries(by='name'):
- entries = dedup_entries(results, by=by)
+ # header
+ header = []
+ header.append('%s%s' % (
+ ','.join(by),
+ ' (%d added, %d removed)' % (
+ sum(1 for n in table if n not in diff_table),
+ sum(1 for n in diff_table if n not in table))
+ if diff_results is not None and not percent else '')
+ if not summary else '')
+ if diff_results is None:
+ for k in fields:
+ header.append(k)
+ elif percent:
+ for k in fields:
+ header.append(k)
+ else:
+ for k in fields:
+ header.append('o'+k)
+ for k in fields:
+ header.append('n'+k)
+ for k in fields:
+ header.append('d'+k)
+ header.append('')
+ lines.append(header)
- if not args.get('diff'):
- print_header(by=by)
- for name, size in sorted_entries(entries.items()):
- print_entry(name, size)
+ def table_entry(name, r, diff_r=None, ratios=[]):
+ entry = []
+ entry.append(name)
+ if diff_results is None:
+ for k in fields:
+ entry.append(getattr(r, k).table()
+ if getattr(r, k, None) is not None
+ else types[k].none)
+ elif percent:
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
else:
- prev_entries = dedup_entries(prev_results, by=by)
- diff = diff_entries(prev_entries, entries)
- print_header(by='%s (%d added, %d removed)' % (by,
- sum(1 for old, _, _, _ in diff.values() if not old),
- sum(1 for _, new, _, _ in diff.values() if not new)))
- for name, (old, new, diff, ratio) in sorted_diff_entries(
- diff.items()):
- if ratio or args.get('all'):
- print_diff_entry(name, old, new, diff, ratio)
-
- def print_totals():
- if not args.get('diff'):
- print_entry('TOTAL', total)
+ for k in fields:
+ entry.append(getattr(diff_r, k).diff_table()
+ if getattr(diff_r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(types[k].diff_diff(
+ getattr(r, k, None),
+ getattr(diff_r, k, None)))
+ if diff_results is None:
+ entry.append('')
+ elif percent:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios))
else:
- ratio = (0.0 if not prev_total and not total
- else 1.0 if not prev_total
- else (total-prev_total)/prev_total)
- print_diff_entry('TOTAL',
- prev_total, total,
- total-prev_total,
- ratio)
-
- if args.get('quiet'):
- pass
- elif args.get('summary'):
- print_header()
- print_totals()
- elif args.get('files'):
- print_entries(by='file')
- print_totals()
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios
+ if t)
+ if any(ratios) else '')
+ return entry
+
+ # entries
+ if not summary:
+ for name in names:
+ r = table.get(name)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = diff_table.get(name)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ if not all_ and not any(ratios):
+ continue
+ lines.append(table_entry(name, r, diff_r, ratios))
+
+ # total
+ r = next(iter(fold(Result, results, by=[])), None)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
else:
- print_entries(by='name')
- print_totals()
+ diff_r = next(iter(fold(Result, diff_results, by=[])), None)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ lines.append(table_entry('TOTAL', r, diff_r, ratios))
+
+ # find the best widths, note that column 0 contains the names and column -1
+ # the ratios, so those are handled a bit differently
+ widths = [
+ ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
+ for w, i in zip(
+ it.chain([23], it.repeat(7)),
+ range(len(lines[0])-1))]
+
+ # print our table
+ for line in lines:
+ print('%-*s %s%s' % (
+ widths[0], line[0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], line[1:-1])),
+ line[-1]))
+
+
+def main(obj_paths, *,
+ by=None,
+ fields=None,
+ defines=None,
+ sort=None,
+ **args):
+ # find sizes
+ if not args.get('use', None):
+ results = collect(obj_paths, **args)
+ else:
+ results = []
+ with openio(args['use']) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ try:
+ results.append(DataResult(
+ **{k: r[k] for k in DataResult._by
+ if k in r and r[k].strip()},
+ **{k: r['data_'+k] for k in DataResult._fields
+ if 'data_'+k in r and r['data_'+k].strip()}))
+ except TypeError:
+ pass
+
+ # fold
+ results = fold(DataResult, results, by=by, defines=defines)
+
+ # sort, note that python's sort is stable
+ results.sort()
+ if sort:
+ for k, reverse in reversed(sort):
+ results.sort(
+ key=lambda r: tuple(
+ (getattr(r, k),) if getattr(r, k) is not None else ()
+ for k in ([k] if k else DataResult._sort)),
+ reverse=reverse ^ (not k or k in DataResult._fields))
+
+ # write results to CSV
+ if args.get('output'):
+ with openio(args['output'], 'w') as f:
+ writer = csv.DictWriter(f,
+ (by if by is not None else DataResult._by)
+ + ['data_'+k for k in (
+ fields if fields is not None else DataResult._fields)])
+ writer.writeheader()
+ for r in results:
+ writer.writerow(
+ {k: getattr(r, k) for k in (
+ by if by is not None else DataResult._by)}
+ | {'data_'+k: getattr(r, k) for k in (
+ fields if fields is not None else DataResult._fields)})
+
+ # find previous results?
+ if args.get('diff'):
+ diff_results = []
+ try:
+ with openio(args['diff']) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('data_'+k in r and r['data_'+k].strip()
+ for k in DataResult._fields):
+ continue
+ try:
+ diff_results.append(DataResult(
+ **{k: r[k] for k in DataResult._by
+ if k in r and r[k].strip()},
+ **{k: r['data_'+k] for k in DataResult._fields
+ if 'data_'+k in r and r['data_'+k].strip()}))
+ except TypeError:
+ pass
+ except FileNotFoundError:
+ pass
+
+ # fold
+ diff_results = fold(DataResult, diff_results, by=by, defines=defines)
+
+ # print table
+ if not args.get('quiet'):
+ table(DataResult, results,
+ diff_results if args.get('diff') else None,
+ by=by if by is not None else ['function'],
+ fields=fields,
+ sort=sort,
+ **args)
+
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
- description="Find data size at the function level.")
- parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
- help="Description of where to find *.o files. May be a directory \
- or a list of paths. Defaults to %r." % OBJ_PATHS)
- parser.add_argument('-v', '--verbose', action='store_true',
+ description="Find data size at the function level.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'obj_paths',
+ nargs='*',
+ help="Input *.o files.")
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
help="Output commands that run behind the scenes.")
- parser.add_argument('-q', '--quiet', action='store_true',
+ parser.add_argument(
+ '-q', '--quiet',
+ action='store_true',
help="Don't show anything, useful with -o.")
- parser.add_argument('-o', '--output',
+ parser.add_argument(
+ '-o', '--output',
help="Specify CSV file to store results.")
- parser.add_argument('-u', '--use',
- help="Don't compile and find data sizes, instead use this CSV file.")
- parser.add_argument('-d', '--diff',
- help="Specify CSV file to diff data size against.")
- parser.add_argument('-m', '--merge',
- help="Merge with an existing CSV file when writing to output.")
- parser.add_argument('-a', '--all', action='store_true',
- help="Show all functions, not just the ones that changed.")
- parser.add_argument('-A', '--everything', action='store_true',
+ parser.add_argument(
+ '-u', '--use',
+ help="Don't parse anything, use this CSV file.")
+ parser.add_argument(
+ '-d', '--diff',
+ help="Specify CSV file to diff against.")
+ parser.add_argument(
+ '-a', '--all',
+ action='store_true',
+ help="Show all, not just the ones that changed.")
+ parser.add_argument(
+ '-p', '--percent',
+ action='store_true',
+ help="Only show percentage change, not a full diff.")
+ parser.add_argument(
+ '-b', '--by',
+ action='append',
+ choices=DataResult._by,
+ help="Group by this field.")
+ parser.add_argument(
+ '-f', '--field',
+ dest='fields',
+ action='append',
+ choices=DataResult._fields,
+ help="Show this field.")
+ parser.add_argument(
+ '-D', '--define',
+ dest='defines',
+ action='append',
+ type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
+ help="Only include results where this field is this value.")
+ class AppendSort(argparse.Action):
+ def __call__(self, parser, namespace, value, option):
+ if namespace.sort is None:
+ namespace.sort = []
+ namespace.sort.append((value, True if option == '-S' else False))
+ parser.add_argument(
+ '-s', '--sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field.")
+ parser.add_argument(
+ '-S', '--reverse-sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field, but backwards.")
+ parser.add_argument(
+ '-Y', '--summary',
+ action='store_true',
+ help="Only show the total.")
+ parser.add_argument(
+ '-F', '--source',
+ dest='sources',
+ action='append',
+ help="Only consider definitions in this file. Defaults to anything "
+ "in the current directory.")
+ parser.add_argument(
+ '--everything',
+ action='store_true',
help="Include builtin and libc specific symbols.")
- parser.add_argument('-s', '--size-sort', action='store_true',
- help="Sort by size.")
- parser.add_argument('-S', '--reverse-size-sort', action='store_true',
- help="Sort by size, but backwards.")
- parser.add_argument('-F', '--files', action='store_true',
- help="Show file-level data sizes. Note this does not include padding! "
- "So sizes may differ from other tools.")
- parser.add_argument('-Y', '--summary', action='store_true',
- help="Only show the total data size.")
- parser.add_argument('--type', default='dDbB',
+ parser.add_argument(
+ '--nm-types',
+ default=NM_TYPES,
help="Type of symbols to report, this uses the same single-character "
- "type-names emitted by nm. Defaults to %(default)r.")
- parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
- help="Path to the nm tool to use.")
- parser.add_argument('--build-dir',
- help="Specify the relative build directory. Used to map object files \
- to the correct source files.")
- sys.exit(main(**vars(parser.parse_args())))
+ "type-names emitted by nm. Defaults to %r." % NM_TYPES)
+ parser.add_argument(
+ '--nm-path',
+ type=lambda x: x.split(),
+ default=NM_PATH,
+ help="Path to the nm executable, may include flags. "
+ "Defaults to %r." % NM_PATH)
+ parser.add_argument(
+ '--objdump-path',
+ type=lambda x: x.split(),
+ default=OBJDUMP_PATH,
+ help="Path to the objdump executable, may include flags. "
+ "Defaults to %r." % OBJDUMP_PATH)
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/explode_asserts.py b/scripts/explode_asserts.py
deleted file mode 100755
index 8a8e5b1c..00000000
--- a/scripts/explode_asserts.py
+++ /dev/null
@@ -1,383 +0,0 @@
-#!/usr/bin/env python3
-
-import re
-import sys
-
-PATTERN = ['LFS_ASSERT', 'assert']
-PREFIX = 'LFS'
-MAXWIDTH = 16
-
-ASSERT = "__{PREFIX}_ASSERT_{TYPE}_{COMP}"
-FAIL = """
-__attribute__((unused))
-static void __{prefix}_assert_fail_{type}(
- const char *file, int line, const char *comp,
- {ctype} lh, size_t lsize,
- {ctype} rh, size_t rsize) {{
- printf("%s:%d:assert: assert failed with ", file, line);
- __{prefix}_assert_print_{type}(lh, lsize);
- printf(", expected %s ", comp);
- __{prefix}_assert_print_{type}(rh, rsize);
- printf("\\n");
- fflush(NULL);
- raise(SIGABRT);
-}}
-"""
-
-COMP = {
- '==': 'eq',
- '!=': 'ne',
- '<=': 'le',
- '>=': 'ge',
- '<': 'lt',
- '>': 'gt',
-}
-
-TYPE = {
- 'int': {
- 'ctype': 'intmax_t',
- 'fail': FAIL,
- 'print': """
- __attribute__((unused))
- static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
- (void)size;
- printf("%"PRIiMAX, v);
- }}
- """,
- 'assert': """
- #define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
- do {{
- __typeof__(lh) _lh = lh;
- __typeof__(lh) _rh = (__typeof__(lh))rh;
- if (!(_lh {op} _rh)) {{
- __{prefix}_assert_fail_{type}(file, line, "{comp}",
- (intmax_t)_lh, 0, (intmax_t)_rh, 0);
- }}
- }} while (0)
- """
- },
- 'bool': {
- 'ctype': 'bool',
- 'fail': FAIL,
- 'print': """
- __attribute__((unused))
- static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
- (void)size;
- printf("%s", v ? "true" : "false");
- }}
- """,
- 'assert': """
- #define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
- do {{
- bool _lh = !!(lh);
- bool _rh = !!(rh);
- if (!(_lh {op} _rh)) {{
- __{prefix}_assert_fail_{type}(file, line, "{comp}",
- _lh, 0, _rh, 0);
- }}
- }} while (0)
- """
- },
- 'mem': {
- 'ctype': 'const void *',
- 'fail': FAIL,
- 'print': """
- __attribute__((unused))
- static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
- const uint8_t *s = v;
- printf("\\\"");
- for (size_t i = 0; i < size && i < {maxwidth}; i++) {{
- if (s[i] >= ' ' && s[i] <= '~') {{
- printf("%c", s[i]);
- }} else {{
- printf("\\\\x%02x", s[i]);
- }}
- }}
- if (size > {maxwidth}) {{
- printf("...");
- }}
- printf("\\\"");
- }}
- """,
- 'assert': """
- #define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh, size)
- do {{
- const void *_lh = lh;
- const void *_rh = rh;
- if (!(memcmp(_lh, _rh, size) {op} 0)) {{
- __{prefix}_assert_fail_{type}(file, line, "{comp}",
- _lh, size, _rh, size);
- }}
- }} while (0)
- """
- },
- 'str': {
- 'ctype': 'const char *',
- 'fail': FAIL,
- 'print': """
- __attribute__((unused))
- static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
- __{prefix}_assert_print_mem(v, size);
- }}
- """,
- 'assert': """
- #define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
- do {{
- const char *_lh = lh;
- const char *_rh = rh;
- if (!(strcmp(_lh, _rh) {op} 0)) {{
- __{prefix}_assert_fail_{type}(file, line, "{comp}",
- _lh, strlen(_lh), _rh, strlen(_rh));
- }}
- }} while (0)
- """
- }
-}
-
-def mkdecls(outf, maxwidth=16):
- outf.write("#include \n")
- outf.write("#include \n")
- outf.write("#include \n")
- outf.write("#include \n")
- outf.write("#include \n")
-
- for type, desc in sorted(TYPE.items()):
- format = {
- 'type': type.lower(), 'TYPE': type.upper(),
- 'ctype': desc['ctype'],
- 'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
- 'maxwidth': maxwidth,
- }
- outf.write(re.sub('\s+', ' ',
- desc['print'].strip().format(**format))+'\n')
- outf.write(re.sub('\s+', ' ',
- desc['fail'].strip().format(**format))+'\n')
-
- for op, comp in sorted(COMP.items()):
- format.update({
- 'comp': comp.lower(), 'COMP': comp.upper(),
- 'op': op,
- })
- outf.write(re.sub('\s+', ' ',
- desc['assert'].strip().format(**format))+'\n')
-
-def mkassert(type, comp, lh, rh, size=None):
- format = {
- 'type': type.lower(), 'TYPE': type.upper(),
- 'comp': comp.lower(), 'COMP': comp.upper(),
- 'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
- 'lh': lh.strip(' '),
- 'rh': rh.strip(' '),
- 'size': size,
- }
- if size:
- return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh}, {size})')
- .format(**format))
- else:
- return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh})')
- .format(**format))
-
-
-# simple recursive descent parser
-LEX = {
- 'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
- 'assert': PATTERN,
- 'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
- 'arrow': ['=>'],
- 'paren': ['\(', '\)'],
- 'op': ['strcmp', 'memcmp', '->'],
- 'comp': ['==', '!=', '<=', '>=', '<', '>'],
- 'logic': ['\&\&', '\|\|'],
- 'sep': [':', ';', '\{', '\}', ','],
-}
-
-class ParseFailure(Exception):
- def __init__(self, expected, found):
- self.expected = expected
- self.found = found
-
- def __str__(self):
- return "expected %r, found %s..." % (
- self.expected, repr(self.found)[:70])
-
-class Parse:
- def __init__(self, inf, lexemes):
- p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
- for n, l in lexemes.items())
- p = re.compile(p, re.DOTALL)
- data = inf.read()
- tokens = []
- while True:
- m = p.search(data)
- if m:
- if m.start() > 0:
- tokens.append((None, data[:m.start()]))
- tokens.append((m.lastgroup, m.group()))
- data = data[m.end():]
- else:
- tokens.append((None, data))
- break
- self.tokens = tokens
- self.off = 0
-
- def lookahead(self, *pattern):
- if self.off < len(self.tokens):
- token = self.tokens[self.off]
- if token[0] in pattern or token[1] in pattern:
- self.m = token[1]
- return self.m
- self.m = None
- return self.m
-
- def accept(self, *patterns):
- m = self.lookahead(*patterns)
- if m is not None:
- self.off += 1
- return m
-
- def expect(self, *patterns):
- m = self.accept(*patterns)
- if not m:
- raise ParseFailure(patterns, self.tokens[self.off:])
- return m
-
- def push(self):
- return self.off
-
- def pop(self, state):
- self.off = state
-
-def passert(p):
- def pastr(p):
- p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
- p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
- lh = pexpr(p) ; p.accept('ws')
- p.expect(',') ; p.accept('ws')
- rh = pexpr(p) ; p.accept('ws')
- p.expect(')') ; p.accept('ws')
- comp = p.expect('comp') ; p.accept('ws')
- p.expect('0') ; p.accept('ws')
- p.expect(')')
- return mkassert('str', COMP[comp], lh, rh)
-
- def pamem(p):
- p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
- p.expect('memcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
- lh = pexpr(p) ; p.accept('ws')
- p.expect(',') ; p.accept('ws')
- rh = pexpr(p) ; p.accept('ws')
- p.expect(',') ; p.accept('ws')
- size = pexpr(p) ; p.accept('ws')
- p.expect(')') ; p.accept('ws')
- comp = p.expect('comp') ; p.accept('ws')
- p.expect('0') ; p.accept('ws')
- p.expect(')')
- return mkassert('mem', COMP[comp], lh, rh, size)
-
- def paint(p):
- p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
- lh = pexpr(p) ; p.accept('ws')
- comp = p.expect('comp') ; p.accept('ws')
- rh = pexpr(p) ; p.accept('ws')
- p.expect(')')
- return mkassert('int', COMP[comp], lh, rh)
-
- def pabool(p):
- p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
- lh = pexprs(p) ; p.accept('ws')
- p.expect(')')
- return mkassert('bool', 'eq', lh, 'true')
-
- def pa(p):
- return p.expect('assert')
-
- state = p.push()
- lastf = None
- for pa in [pastr, pamem, paint, pabool, pa]:
- try:
- return pa(p)
- except ParseFailure as f:
- p.pop(state)
- lastf = f
- else:
- raise lastf
-
-def pexpr(p):
- res = []
- while True:
- if p.accept('('):
- res.append(p.m)
- while True:
- res.append(pexprs(p))
- if p.accept('sep'):
- res.append(p.m)
- else:
- break
- res.append(p.expect(')'))
- elif p.lookahead('assert'):
- res.append(passert(p))
- elif p.accept('assert', 'ws', 'string', 'op', None):
- res.append(p.m)
- else:
- return ''.join(res)
-
-def pexprs(p):
- res = []
- while True:
- res.append(pexpr(p))
- if p.accept('comp', 'logic', ','):
- res.append(p.m)
- else:
- return ''.join(res)
-
-def pstmt(p):
- ws = p.accept('ws') or ''
- lh = pexprs(p)
- if p.accept('=>'):
- rh = pexprs(p)
- return ws + mkassert('int', 'eq', lh, rh)
- else:
- return ws + lh
-
-
-def main(args):
- inf = open(args.input, 'r') if args.input else sys.stdin
- outf = open(args.output, 'w') if args.output else sys.stdout
-
- lexemes = LEX.copy()
- if args.pattern:
- lexemes['assert'] = args.pattern
- p = Parse(inf, lexemes)
-
- # write extra verbose asserts
- mkdecls(outf, maxwidth=args.maxwidth)
- if args.input:
- outf.write("#line %d \"%s\"\n" % (1, args.input))
-
- # parse and write out stmt at a time
- try:
- while True:
- outf.write(pstmt(p))
- if p.accept('sep'):
- outf.write(p.m)
- else:
- break
- except ParseFailure as f:
- pass
-
- for i in range(p.off, len(p.tokens)):
- outf.write(p.tokens[i][1])
-
-if __name__ == "__main__":
- import argparse
- parser = argparse.ArgumentParser(
- description="Cpp step that increases assert verbosity")
- parser.add_argument('input', nargs='?',
- help="Input C file after cpp.")
- parser.add_argument('-o', '--output', required=True,
- help="Output C file.")
- parser.add_argument('-p', '--pattern', action='append',
- help="Patterns to search for starting an assert statement.")
- parser.add_argument('--maxwidth', default=MAXWIDTH, type=int,
- help="Maximum number of characters to display for strcmp and memcmp.")
- main(parser.parse_args())
diff --git a/scripts/perf.py b/scripts/perf.py
new file mode 100755
index 00000000..2ee006c0
--- /dev/null
+++ b/scripts/perf.py
@@ -0,0 +1,1344 @@
+#!/usr/bin/env python3
+#
+# Script to aggregate and report Linux perf results.
+#
+# Example:
+# ./scripts/perf.py -R -obench.perf ./runners/bench_runner
+# ./scripts/perf.py bench.perf -j -Flfs.c -Flfs_util.c -Scycles
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import bisect
+import collections as co
+import csv
+import errno
+import fcntl
+import functools as ft
+import itertools as it
+import math as m
+import multiprocessing as mp
+import os
+import re
+import shlex
+import shutil
+import subprocess as sp
+import tempfile
+import zipfile
+
+# TODO support non-zip perf results?
+
+
+PERF_PATH = ['perf']
+PERF_EVENTS = 'cycles,branch-misses,branches,cache-misses,cache-references'
+PERF_FREQ = 100
+OBJDUMP_PATH = ['objdump']
+THRESHOLD = (0.5, 0.85)
+
+
+# integer fields
+class Int(co.namedtuple('Int', 'x')):
+ __slots__ = ()
+ def __new__(cls, x=0):
+ if isinstance(x, Int):
+ return x
+ if isinstance(x, str):
+ try:
+ x = int(x, 0)
+ except ValueError:
+ # also accept +-∞ and +-inf
+ if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
+ x = m.inf
+ elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
+ x = -m.inf
+ else:
+ raise
+ assert isinstance(x, int) or m.isinf(x), x
+ return super().__new__(cls, x)
+
+ def __str__(self):
+ if self.x == m.inf:
+ return '∞'
+ elif self.x == -m.inf:
+ return '-∞'
+ else:
+ return str(self.x)
+
+ def __int__(self):
+ assert not m.isinf(self.x)
+ return self.x
+
+ def __float__(self):
+ return float(self.x)
+
+ none = '%7s' % '-'
+ def table(self):
+ return '%7s' % (self,)
+
+ diff_none = '%7s' % '-'
+ diff_table = table
+
+ def diff_diff(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ diff = new - old
+ if diff == +m.inf:
+ return '%7s' % '+∞'
+ elif diff == -m.inf:
+ return '%7s' % '-∞'
+ else:
+ return '%+7d' % diff
+
+ def ratio(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ if m.isinf(new) and m.isinf(old):
+ return 0.0
+ elif m.isinf(new):
+ return +m.inf
+ elif m.isinf(old):
+ return -m.inf
+ elif not old and not new:
+ return 0.0
+ elif not old:
+ return 1.0
+ else:
+ return (new-old) / old
+
+ def __add__(self, other):
+ return self.__class__(self.x + other.x)
+
+ def __sub__(self, other):
+ return self.__class__(self.x - other.x)
+
+ def __mul__(self, other):
+ return self.__class__(self.x * other.x)
+
+# perf results
+class PerfResult(co.namedtuple('PerfResult', [
+ 'file', 'function', 'line',
+ 'cycles', 'bmisses', 'branches', 'cmisses', 'caches',
+ 'children'])):
+ _by = ['file', 'function', 'line']
+ _fields = ['cycles', 'bmisses', 'branches', 'cmisses', 'caches']
+ _sort = ['cycles', 'bmisses', 'cmisses', 'branches', 'caches']
+ _types = {
+ 'cycles': Int,
+ 'bmisses': Int, 'branches': Int,
+ 'cmisses': Int, 'caches': Int}
+
+ __slots__ = ()
+ def __new__(cls, file='', function='', line=0,
+ cycles=0, bmisses=0, branches=0, cmisses=0, caches=0,
+ children=[]):
+ return super().__new__(cls, file, function, int(Int(line)),
+ Int(cycles), Int(bmisses), Int(branches), Int(cmisses), Int(caches),
+ children)
+
+ def __add__(self, other):
+ return PerfResult(self.file, self.function, self.line,
+ self.cycles + other.cycles,
+ self.bmisses + other.bmisses,
+ self.branches + other.branches,
+ self.cmisses + other.cmisses,
+ self.caches + other.caches,
+ self.children + other.children)
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+# run perf as a subprocess, storing measurements into a zip file
+def record(command, *,
+ output=None,
+ perf_freq=PERF_FREQ,
+ perf_period=None,
+ perf_events=PERF_EVENTS,
+ perf_path=PERF_PATH,
+ **args):
+ # create a temporary file for perf to write to, as far as I can tell
+ # this is strictly needed because perf's pipe-mode only works with stdout
+ with tempfile.NamedTemporaryFile('rb') as f:
+ # figure out our perf invocation
+ perf = perf_path + list(filter(None, [
+ 'record',
+ '-F%s' % perf_freq
+ if perf_freq is not None
+ and perf_period is None else None,
+ '-c%s' % perf_period
+ if perf_period is not None else None,
+ '-B',
+ '-g',
+ '--all-user',
+ '-e%s' % perf_events,
+ '-o%s' % f.name]))
+
+ # run our command
+ try:
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in perf + command))
+ err = sp.call(perf + command, close_fds=False)
+
+ except KeyboardInterrupt:
+ err = errno.EOWNERDEAD
+
+ # synchronize access
+ z = os.open(output, os.O_RDWR | os.O_CREAT)
+ fcntl.flock(z, fcntl.LOCK_EX)
+
+ # copy measurements into our zip file
+ with os.fdopen(z, 'r+b') as z:
+ with zipfile.ZipFile(z, 'a',
+ compression=zipfile.ZIP_DEFLATED,
+ compresslevel=1) as z:
+ with z.open('perf.%d' % os.getpid(), 'w') as g:
+ shutil.copyfileobj(f, g)
+
+ # forward the return code
+ return err
+
+
+# try to only process each dso onceS
+#
+# note this only caches with the non-keyword arguments
+def multiprocessing_cache(f):
+ local_cache = {}
+ manager = mp.Manager()
+ global_cache = manager.dict()
+ lock = mp.Lock()
+
+ def multiprocessing_cache(*args, **kwargs):
+ # check local cache?
+ if args in local_cache:
+ return local_cache[args]
+ # check global cache?
+ with lock:
+ if args in global_cache:
+ v = global_cache[args]
+ local_cache[args] = v
+ return v
+ # fall back to calling the function
+ v = f(*args, **kwargs)
+ global_cache[args] = v
+ local_cache[args] = v
+ return v
+
+ return multiprocessing_cache
+
+@multiprocessing_cache
+def collect_syms_and_lines(obj_path, *,
+ objdump_path=None,
+ **args):
+ symbol_pattern = re.compile(
+ '^(?P[0-9a-fA-F]+)'
+ '\s+.*'
+ '\s+(?P[0-9a-fA-F]+)'
+ '\s+(?P[^\s]+)\s*$')
+ line_pattern = re.compile(
+ '^\s+(?:'
+ # matches dir/file table
+ '(?P[0-9]+)'
+ '(?:\s+(?P[0-9]+))?'
+ '\s+.*'
+ '\s+(?P[^\s]+)'
+ # matches line opcodes
+ '|' '\[[^\]]*\]\s+'
+ '(?:'
+ '(?PSpecial)'
+ '|' '(?PCopy)'
+ '|' '(?PEnd of Sequence)'
+ '|' 'File .*?to (?:entry )?(?P\d+)'
+ '|' 'Line .*?to (?P[0-9]+)'
+ '|' '(?:Address|PC) .*?to (?P[0x0-9a-fA-F]+)'
+ '|' '.' ')*'
+ ')$', re.IGNORECASE)
+
+ # figure out symbol addresses and file+line ranges
+ syms = {}
+ sym_at = []
+ cmd = objdump_path + ['-t', obj_path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ for line in proc.stdout:
+ m = symbol_pattern.match(line)
+ if m:
+ name = m.group('name')
+ addr = int(m.group('addr'), 16)
+ size = int(m.group('size'), 16)
+ # ignore zero-sized symbols
+ if not size:
+ continue
+ # note multiple symbols can share a name
+ if name not in syms:
+ syms[name] = set()
+ syms[name].add((addr, size))
+ sym_at.append((addr, name, size))
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ # assume no debug-info on failure
+ pass
+
+ # sort and keep largest/first when duplicates
+ sym_at.sort(key=lambda x: (x[0], -x[2], x[1]))
+ sym_at_ = []
+ for addr, name, size in sym_at:
+ if len(sym_at_) == 0 or sym_at_[-1][0] != addr:
+ sym_at_.append((addr, name, size))
+ sym_at = sym_at_
+
+ # state machine for dwarf line numbers, note that objdump's
+ # decodedline seems to have issues with multiple dir/file
+ # tables, which is why we need this
+ lines = []
+ line_at = []
+ dirs = {}
+ files = {}
+ op_file = 1
+ op_line = 1
+ op_addr = 0
+ cmd = objdump_path + ['--dwarf=rawline', obj_path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ for line in proc.stdout:
+ m = line_pattern.match(line)
+ if m:
+ if m.group('no') and not m.group('dir'):
+ # found a directory entry
+ dirs[int(m.group('no'))] = m.group('path')
+ elif m.group('no'):
+ # found a file entry
+ dir = int(m.group('dir'))
+ if dir in dirs:
+ files[int(m.group('no'))] = os.path.join(
+ dirs[dir],
+ m.group('path'))
+ else:
+ files[int(m.group('no'))] = m.group('path')
+ else:
+ # found a state machine update
+ if m.group('op_file'):
+ op_file = int(m.group('op_file'), 0)
+ if m.group('op_line'):
+ op_line = int(m.group('op_line'), 0)
+ if m.group('op_addr'):
+ op_addr = int(m.group('op_addr'), 0)
+
+ if (m.group('op_special')
+ or m.group('op_copy')
+ or m.group('op_end')):
+ file = os.path.abspath(files.get(op_file, '?'))
+ lines.append((file, op_line, op_addr))
+ line_at.append((op_addr, file, op_line))
+
+ if m.group('op_end'):
+ op_file = 1
+ op_line = 1
+ op_addr = 0
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ # assume no debug-info on failure
+ pass
+
+ # sort and keep first when duplicates
+ lines.sort()
+ lines_ = []
+ for file, line, addr in lines:
+ if len(lines_) == 0 or lines_[-1][0] != file or lines[-1][1] != line:
+ lines_.append((file, line, addr))
+ lines = lines_
+
+ # sort and keep first when duplicates
+ line_at.sort()
+ line_at_ = []
+ for addr, file, line in line_at:
+ if len(line_at_) == 0 or line_at_[-1][0] != addr:
+ line_at_.append((addr, file, line))
+ line_at = line_at_
+
+ return syms, sym_at, lines, line_at
+
+
+def collect_decompressed(path, *,
+ perf_path=PERF_PATH,
+ sources=None,
+ everything=False,
+ propagate=0,
+ depth=1,
+ **args):
+ sample_pattern = re.compile(
+ '(?P\w+)'
+ '\s+(?P\w+)'
+ '\s+(?P[\w.]+):'
+ '\s*(?P\w+)'
+ '\s+(?P[^:]+):')
+ frame_pattern = re.compile(
+ '\s+(?P\w+)'
+ '\s+(?P[^\s\+]+)(?:\+(?P\w+))?'
+ '\s+\((?P[^\)]+)\)')
+ events = {
+ 'cycles': 'cycles',
+ 'branch-misses': 'bmisses',
+ 'branches': 'branches',
+ 'cache-misses': 'cmisses',
+ 'cache-references': 'caches'}
+
+ # note perf_path may contain extra args
+ cmd = perf_path + [
+ 'script',
+ '-i%s' % path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+
+ last_filtered = False
+ last_event = ''
+ last_period = 0
+ last_stack = []
+ deltas = co.defaultdict(lambda: {})
+ syms_ = co.defaultdict(lambda: {})
+ at_cache = {}
+ results = {}
+
+ def commit():
+ # tail-recursively propagate measurements
+ for i in range(len(last_stack)):
+ results_ = results
+ for j in reversed(range(i+1)):
+ if i+1-j > depth:
+ break
+
+ # propagate
+ name = last_stack[j]
+ if name not in results_:
+ results_[name] = (co.defaultdict(lambda: 0), {})
+ results_[name][0][last_event] += last_period
+
+ # recurse
+ results_ = results_[name][1]
+
+ for line in proc.stdout:
+ # we need to process a lot of data, so wait to use regex as late
+ # as possible
+ if not line.startswith('\t'):
+ if last_filtered:
+ commit()
+ last_filtered = False
+
+ if line:
+ m = sample_pattern.match(line)
+ if m and m.group('event') in events:
+ last_filtered = True
+ last_event = m.group('event')
+ last_period = int(m.group('period'), 0)
+ last_stack = []
+
+ elif last_filtered:
+ m = frame_pattern.match(line)
+ if m:
+ # filter out internal/kernel functions
+ if not everything and (
+ m.group('sym').startswith('__')
+ or m.group('sym').startswith('0')
+ or m.group('sym').startswith('-')
+ or m.group('sym').startswith('[')
+ or m.group('dso').startswith('/usr/lib')):
+ continue
+
+ dso = m.group('dso')
+ sym = m.group('sym')
+ off = int(m.group('off'), 0) if m.group('off') else 0
+ addr_ = int(m.group('addr'), 16)
+
+ # get the syms/lines for the dso, this is cached
+ syms, sym_at, lines, line_at = collect_syms_and_lines(
+ dso,
+ **args)
+
+ # ASLR is tricky, we have symbols+offsets, but static symbols
+ # means we may have multiple options for each symbol.
+ #
+ # To try to solve this, we use previous seen symbols to build
+ # confidence for the correct ASLR delta. This means we may
+ # guess incorrectly for early symbols, but this will only affect
+ # a few samples.
+ if sym in syms:
+ sym_addr_ = addr_ - off
+
+ # track possible deltas?
+ for sym_addr, size in syms[sym]:
+ delta = sym_addr - sym_addr_
+ if delta not in deltas[dso]:
+ deltas[dso][delta] = sum(
+ abs(a_+delta - a)
+ for s, (a_, _) in syms_[dso].items()
+ for a, _ in syms[s])
+ for delta in deltas[dso].keys():
+ deltas[dso][delta] += abs(sym_addr_+delta - sym_addr)
+ syms_[dso][sym] = sym_addr_, size
+
+ # guess the best delta
+ delta, _ = min(deltas[dso].items(),
+ key=lambda x: (x[1], x[0]))
+ addr = addr_ + delta
+
+ # cached?
+ if (dso,addr) in at_cache:
+ cached = at_cache[(dso,addr)]
+ if cached is None:
+ # cache says to skip
+ continue
+ file, line = cached
+ else:
+ # find file+line
+ i = bisect.bisect(line_at, addr, key=lambda x: x[0])
+ if i > 0:
+ _, file, line = line_at[i-1]
+ else:
+ file, line = re.sub('(\.o)?$', '.c', dso, 1), 0
+
+ # ignore filtered sources
+ if sources is not None:
+ if not any(
+ os.path.abspath(file) == os.path.abspath(s)
+ for s in sources):
+ at_cache[(dso,addr)] = None
+ continue
+ else:
+ # default to only cwd
+ if not everything and not os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file)]) == os.getcwd():
+ at_cache[(dso,addr)] = None
+ continue
+
+ # simplify path
+ if os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file)]) == os.getcwd():
+ file = os.path.relpath(file)
+ else:
+ file = os.path.abspath(file)
+
+ at_cache[(dso,addr)] = file, line
+ else:
+ file, line = re.sub('(\.o)?$', '.c', dso, 1), 0
+
+ last_stack.append((file, sym, line))
+
+ # stop propogating?
+ if propagate and len(last_stack) >= propagate:
+ commit()
+ last_filtered = False
+ if last_filtered:
+ commit()
+
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ # rearrange results into result type
+ def to_results(results):
+ results_ = []
+ for name, (r, children) in results.items():
+ results_.append(PerfResult(*name,
+ **{events[k]: v for k, v in r.items()},
+ children=to_results(children)))
+ return results_
+
+ return to_results(results)
+
+def collect_job(path, i, **args):
+ # decompress into a temporary file, this is to work around
+ # some limitations of perf
+ with zipfile.ZipFile(path) as z:
+ with z.open(i) as f:
+ with tempfile.NamedTemporaryFile('wb') as g:
+ shutil.copyfileobj(f, g)
+ g.flush()
+
+ return collect_decompressed(g.name, **args)
+
+def starapply(args):
+ f, args, kwargs = args
+ return f(*args, **kwargs)
+
+def collect(perf_paths, *,
+ jobs=None,
+ **args):
+ # automatic job detection?
+ if jobs == 0:
+ jobs = len(os.sched_getaffinity(0))
+
+ records = []
+ for path in perf_paths:
+ # each .perf file is actually a zip file containing perf files from
+ # multiple runs
+ with zipfile.ZipFile(path) as z:
+ records.extend((path, i) for i in z.infolist())
+
+ # we're dealing with a lot of data but also surprisingly
+ # parallelizable
+ if jobs is not None:
+ results = []
+ with mp.Pool(jobs) as p:
+ for results_ in p.imap_unordered(
+ starapply,
+ ((collect_job, (path, i), args) for path, i in records)):
+ results.extend(results_)
+ else:
+ results = []
+ for path, i in records:
+ results.extend(collect_job(path, i, **args))
+
+ return results
+
+
+def fold(Result, results, *,
+ by=None,
+ defines=None,
+ **_):
+ if by is None:
+ by = Result._by
+
+ for k in it.chain(by or [], (k for k, _ in defines or [])):
+ if k not in Result._by and k not in Result._fields:
+ print("error: could not find field %r?" % k)
+ sys.exit(-1)
+
+ # filter by matching defines
+ if defines is not None:
+ results_ = []
+ for r in results:
+ if all(getattr(r, k) in vs for k, vs in defines):
+ results_.append(r)
+ results = results_
+
+ # organize results into conflicts
+ folding = co.OrderedDict()
+ for r in results:
+ name = tuple(getattr(r, k) for k in by)
+ if name not in folding:
+ folding[name] = []
+ folding[name].append(r)
+
+ # merge conflicts
+ folded = []
+ for name, rs in folding.items():
+ folded.append(sum(rs[1:], start=rs[0]))
+
+ # fold recursively
+ folded_ = []
+ for r in folded:
+ folded_.append(r._replace(children=fold(
+ Result, r.children,
+ by=by,
+ defines=defines)))
+ folded = folded_
+
+ return folded
+
+def table(Result, results, diff_results=None, *,
+ by=None,
+ fields=None,
+ sort=None,
+ summary=False,
+ all=False,
+ percent=False,
+ depth=1,
+ **_):
+ all_, all = all, __builtins__.all
+
+ if by is None:
+ by = Result._by
+ if fields is None:
+ fields = Result._fields
+ types = Result._types
+
+ # fold again
+ results = fold(Result, results, by=by)
+ if diff_results is not None:
+ diff_results = fold(Result, diff_results, by=by)
+
+ # organize by name
+ table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in results}
+ diff_table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in diff_results or []}
+ names = list(table.keys() | diff_table.keys())
+
+ # sort again, now with diff info, note that python's sort is stable
+ names.sort()
+ if diff_results is not None:
+ names.sort(key=lambda n: tuple(
+ types[k].ratio(
+ getattr(table.get(n), k, None),
+ getattr(diff_table.get(n), k, None))
+ for k in fields),
+ reverse=True)
+ if sort:
+ for k, reverse in reversed(sort):
+ names.sort(
+ key=lambda n: tuple(
+ (getattr(table[n], k),)
+ if getattr(table.get(n), k, None) is not None else ()
+ for k in ([k] if k else [
+ k for k in Result._sort if k in fields])),
+ reverse=reverse ^ (not k or k in Result._fields))
+
+
+ # build up our lines
+ lines = []
+
+ # header
+ header = []
+ header.append('%s%s' % (
+ ','.join(by),
+ ' (%d added, %d removed)' % (
+ sum(1 for n in table if n not in diff_table),
+ sum(1 for n in diff_table if n not in table))
+ if diff_results is not None and not percent else '')
+ if not summary else '')
+ if diff_results is None:
+ for k in fields:
+ header.append(k)
+ elif percent:
+ for k in fields:
+ header.append(k)
+ else:
+ for k in fields:
+ header.append('o'+k)
+ for k in fields:
+ header.append('n'+k)
+ for k in fields:
+ header.append('d'+k)
+ header.append('')
+ lines.append(header)
+
+ def table_entry(name, r, diff_r=None, ratios=[]):
+ entry = []
+ entry.append(name)
+ if diff_results is None:
+ for k in fields:
+ entry.append(getattr(r, k).table()
+ if getattr(r, k, None) is not None
+ else types[k].none)
+ elif percent:
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ else:
+ for k in fields:
+ entry.append(getattr(diff_r, k).diff_table()
+ if getattr(diff_r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(types[k].diff_diff(
+ getattr(r, k, None),
+ getattr(diff_r, k, None)))
+ if diff_results is None:
+ entry.append('')
+ elif percent:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios))
+ else:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios
+ if t)
+ if any(ratios) else '')
+ return entry
+
+ # entries
+ if not summary:
+ for name in names:
+ r = table.get(name)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = diff_table.get(name)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ if not all_ and not any(ratios):
+ continue
+ lines.append(table_entry(name, r, diff_r, ratios))
+
+ # total
+ r = next(iter(fold(Result, results, by=[])), None)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = next(iter(fold(Result, diff_results, by=[])), None)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ lines.append(table_entry('TOTAL', r, diff_r, ratios))
+
+ # find the best widths, note that column 0 contains the names and column -1
+ # the ratios, so those are handled a bit differently
+ widths = [
+ ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
+ for w, i in zip(
+ it.chain([23], it.repeat(7)),
+ range(len(lines[0])-1))]
+
+ # adjust the name width based on the expected call depth, though
+ # note this doesn't really work with unbounded recursion
+ if not summary and not m.isinf(depth):
+ widths[0] += 4*(depth-1)
+
+ # print the tree recursively
+ print('%-*s %s%s' % (
+ widths[0], lines[0][0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], lines[0][1:-1])),
+ lines[0][-1]))
+
+ if not summary:
+ def recurse(results_, depth_, prefixes=('', '', '', '')):
+ # rebuild our tables at each layer
+ table_ = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in results_}
+ names_ = list(table_.keys())
+
+ # sort again at each layer, keep in mind the numbers are
+ # changing as we descend
+ names_.sort()
+ if sort:
+ for k, reverse in reversed(sort):
+ names_.sort(
+ key=lambda n: tuple(
+ (getattr(table_[n], k),)
+ if getattr(table_.get(n), k, None) is not None
+ else ()
+ for k in ([k] if k else [
+ k for k in Result._sort if k in fields])),
+ reverse=reverse ^ (not k or k in Result._fields))
+
+ for i, name in enumerate(names_):
+ r = table_[name]
+ is_last = (i == len(names_)-1)
+
+ print('%s%-*s %s' % (
+ prefixes[0+is_last],
+ widths[0] - (
+ len(prefixes[0+is_last])
+ if not m.isinf(depth) else 0),
+ name,
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(
+ widths[1:],
+ table_entry(name, r)[1:]))))
+
+ # recurse?
+ if depth_ > 1:
+ recurse(
+ r.children,
+ depth_-1,
+ (prefixes[2+is_last] + "|-> ",
+ prefixes[2+is_last] + "'-> ",
+ prefixes[2+is_last] + "| ",
+ prefixes[2+is_last] + " "))
+
+ # we have enough going on with diffing to make the top layer
+ # a special case
+ for name, line in zip(names, lines[1:-1]):
+ print('%-*s %s%s' % (
+ widths[0], line[0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], line[1:-1])),
+ line[-1]))
+
+ if name in table and depth > 1:
+ recurse(
+ table[name].children,
+ depth-1,
+ ("|-> ",
+ "'-> ",
+ "| ",
+ " "))
+
+ print('%-*s %s%s' % (
+ widths[0], lines[-1][0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], lines[-1][1:-1])),
+ lines[-1][-1]))
+
+
+def annotate(Result, results, *,
+ annotate=None,
+ threshold=None,
+ branches=False,
+ caches=False,
+ **args):
+ # figure out the threshold
+ if threshold is None:
+ t0, t1 = THRESHOLD
+ elif len(threshold) == 1:
+ t0, t1 = threshold[0], threshold[0]
+ else:
+ t0, t1 = threshold
+ t0, t1 = min(t0, t1), max(t0, t1)
+
+ if not branches and not caches:
+ tk = 'cycles'
+ elif branches:
+ tk = 'bmisses'
+ else:
+ tk = 'cmisses'
+
+ # find max cycles
+ max_ = max(it.chain((float(getattr(r, tk)) for r in results), [1]))
+
+ for path in co.OrderedDict.fromkeys(r.file for r in results).keys():
+ # flatten to line info
+ results = fold(Result, results, by=['file', 'line'])
+ table = {r.line: r for r in results if r.file == path}
+
+ # calculate spans to show
+ if not annotate:
+ spans = []
+ last = None
+ func = None
+ for line, r in sorted(table.items()):
+ if float(getattr(r, tk)) / max_ >= t0:
+ if last is not None and line - last.stop <= args['context']:
+ last = range(
+ last.start,
+ line+1+args['context'])
+ else:
+ if last is not None:
+ spans.append((last, func))
+ last = range(
+ line-args['context'],
+ line+1+args['context'])
+ func = r.function
+ if last is not None:
+ spans.append((last, func))
+
+ with open(path) as f:
+ skipped = False
+ for i, line in enumerate(f):
+ # skip lines not in spans?
+ if not annotate and not any(i+1 in s for s, _ in spans):
+ skipped = True
+ continue
+
+ if skipped:
+ skipped = False
+ print('%s@@ %s:%d: %s @@%s' % (
+ '\x1b[36m' if args['color'] else '',
+ path,
+ i+1,
+ next(iter(f for _, f in spans)),
+ '\x1b[m' if args['color'] else ''))
+
+ # build line
+ if line.endswith('\n'):
+ line = line[:-1]
+
+ r = table.get(i+1)
+ if r is not None and (
+ float(r.cycles) > 0
+ if not branches and not caches
+ else float(r.bmisses) > 0 or float(r.branches) > 0
+ if branches
+ else float(r.cmisses) > 0 or float(r.caches) > 0):
+ line = '%-*s // %s' % (
+ args['width'],
+ line,
+ '%s cycles' % r.cycles
+ if not branches and not caches
+ else '%s bmisses, %s branches' % (r.bmisses, r.branches)
+ if branches
+ else '%s cmisses, %s caches' % (r.cmisses, r.caches))
+
+ if args['color']:
+ if float(getattr(r, tk)) / max_ >= t1:
+ line = '\x1b[1;31m%s\x1b[m' % line
+ elif float(getattr(r, tk)) / max_ >= t0:
+ line = '\x1b[35m%s\x1b[m' % line
+
+ print(line)
+
+
+def report(perf_paths, *,
+ by=None,
+ fields=None,
+ defines=None,
+ sort=None,
+ branches=False,
+ caches=False,
+ **args):
+ # figure out what color should be
+ if args.get('color') == 'auto':
+ args['color'] = sys.stdout.isatty()
+ elif args.get('color') == 'always':
+ args['color'] = True
+ else:
+ args['color'] = False
+
+ # depth of 0 == m.inf
+ if args.get('depth') == 0:
+ args['depth'] = m.inf
+
+ # find sizes
+ if not args.get('use', None):
+ results = collect(perf_paths, **args)
+ else:
+ results = []
+ with openio(args['use']) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('perf_'+k in r and r['perf_'+k].strip()
+ for k in PerfResult._fields):
+ continue
+ try:
+ results.append(PerfResult(
+ **{k: r[k] for k in PerfResult._by
+ if k in r and r[k].strip()},
+ **{k: r['perf_'+k] for k in PerfResult._fields
+ if 'perf_'+k in r and r['perf_'+k].strip()}))
+ except TypeError:
+ pass
+
+ # fold
+ results = fold(PerfResult, results, by=by, defines=defines)
+
+ # sort, note that python's sort is stable
+ results.sort()
+ if sort:
+ for k, reverse in reversed(sort):
+ results.sort(
+ key=lambda r: tuple(
+ (getattr(r, k),) if getattr(r, k) is not None else ()
+ for k in ([k] if k else PerfResult._sort)),
+ reverse=reverse ^ (not k or k in PerfResult._fields))
+
+ # write results to CSV
+ if args.get('output'):
+ with openio(args['output'], 'w') as f:
+ writer = csv.DictWriter(f,
+ (by if by is not None else PerfResult._by)
+ + ['perf_'+k for k in (
+ fields if fields is not None else PerfResult._fields)])
+ writer.writeheader()
+ for r in results:
+ writer.writerow(
+ {k: getattr(r, k) for k in (
+ by if by is not None else PerfResult._by)}
+ | {'perf_'+k: getattr(r, k) for k in (
+ fields if fields is not None else PerfResult._fields)})
+
+ # find previous results?
+ if args.get('diff'):
+ diff_results = []
+ try:
+ with openio(args['diff']) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('perf_'+k in r and r['perf_'+k].strip()
+ for k in PerfResult._fields):
+ continue
+ try:
+ diff_results.append(PerfResult(
+ **{k: r[k] for k in PerfResult._by
+ if k in r and r[k].strip()},
+ **{k: r['perf_'+k] for k in PerfResult._fields
+ if 'perf_'+k in r and r['perf_'+k].strip()}))
+ except TypeError:
+ pass
+ except FileNotFoundError:
+ pass
+
+ # fold
+ diff_results = fold(PerfResult, diff_results, by=by, defines=defines)
+
+ # print table
+ if not args.get('quiet'):
+ if args.get('annotate') or args.get('threshold'):
+ # annotate sources
+ annotate(PerfResult, results,
+ branches=branches,
+ caches=caches,
+ **args)
+ else:
+ # print table
+ table(PerfResult, results,
+ diff_results if args.get('diff') else None,
+ by=by if by is not None else ['function'],
+ fields=fields if fields is not None
+ else ['cycles'] if not branches and not caches
+ else ['bmisses', 'branches'] if branches
+ else ['cmisses', 'caches'],
+ sort=sort,
+ **args)
+
+
+def main(**args):
+ if args.get('record'):
+ return record(**args)
+ else:
+ return report(**args)
+
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+
+ # bit of a hack, but parse_intermixed_args and REMAINDER are
+ # incompatible, so we need to figure out what we want before running
+ # argparse
+ if '-R' in sys.argv or '--record' in sys.argv:
+ nargs = argparse.REMAINDER
+ else:
+ nargs = '*'
+
+ argparse.ArgumentParser._handle_conflict_ignore = lambda *_: None
+ argparse._ArgumentGroup._handle_conflict_ignore = lambda *_: None
+ parser = argparse.ArgumentParser(
+ description="Aggregate and report Linux perf results.",
+ allow_abbrev=False,
+ conflict_handler='ignore')
+ parser.add_argument(
+ 'perf_paths',
+ nargs=nargs,
+ help="Input *.perf files.")
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument(
+ '-q', '--quiet',
+ action='store_true',
+ help="Don't show anything, useful with -o.")
+ parser.add_argument(
+ '-o', '--output',
+ help="Specify CSV file to store results.")
+ parser.add_argument(
+ '-u', '--use',
+ help="Don't parse anything, use this CSV file.")
+ parser.add_argument(
+ '-d', '--diff',
+ help="Specify CSV file to diff against.")
+ parser.add_argument(
+ '-a', '--all',
+ action='store_true',
+ help="Show all, not just the ones that changed.")
+ parser.add_argument(
+ '-p', '--percent',
+ action='store_true',
+ help="Only show percentage change, not a full diff.")
+ parser.add_argument(
+ '-b', '--by',
+ action='append',
+ choices=PerfResult._by,
+ help="Group by this field.")
+ parser.add_argument(
+ '-f', '--field',
+ dest='fields',
+ action='append',
+ choices=PerfResult._fields,
+ help="Show this field.")
+ parser.add_argument(
+ '-D', '--define',
+ dest='defines',
+ action='append',
+ type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
+ help="Only include results where this field is this value.")
+ class AppendSort(argparse.Action):
+ def __call__(self, parser, namespace, value, option):
+ if namespace.sort is None:
+ namespace.sort = []
+ namespace.sort.append((value, True if option == '-S' else False))
+ parser.add_argument(
+ '-s', '--sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field.")
+ parser.add_argument(
+ '-S', '--reverse-sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field, but backwards.")
+ parser.add_argument(
+ '-Y', '--summary',
+ action='store_true',
+ help="Only show the total.")
+ parser.add_argument(
+ '-F', '--source',
+ dest='sources',
+ action='append',
+ help="Only consider definitions in this file. Defaults to anything "
+ "in the current directory.")
+ parser.add_argument(
+ '--everything',
+ action='store_true',
+ help="Include builtin and libc specific symbols.")
+ parser.add_argument(
+ '--branches',
+ action='store_true',
+ help="Show branches and branch misses.")
+ parser.add_argument(
+ '--caches',
+ action='store_true',
+ help="Show cache accesses and cache misses.")
+ parser.add_argument(
+ '-P', '--propagate',
+ type=lambda x: int(x, 0),
+ help="Depth to propagate samples up the call-stack. 0 propagates up "
+ "to the entry point, 1 does no propagation. Defaults to 0.")
+ parser.add_argument(
+ '-Z', '--depth',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Depth of function calls to show. 0 shows all calls but may not "
+ "terminate!")
+ parser.add_argument(
+ '-A', '--annotate',
+ action='store_true',
+ help="Show source files annotated with coverage info.")
+ parser.add_argument(
+ '-T', '--threshold',
+ nargs='?',
+ type=lambda x: tuple(float(x) for x in x.split(',')),
+ const=THRESHOLD,
+ help="Show lines with samples above this threshold as a percent of "
+ "all lines. Defaults to %s." % ','.join(str(t) for t in THRESHOLD))
+ parser.add_argument(
+ '-c', '--context',
+ type=lambda x: int(x, 0),
+ default=3,
+ help="Show n additional lines of context. Defaults to 3.")
+ parser.add_argument(
+ '-W', '--width',
+ type=lambda x: int(x, 0),
+ default=80,
+ help="Assume source is styled with this many columns. Defaults to 80.")
+ parser.add_argument(
+ '--color',
+ choices=['never', 'always', 'auto'],
+ default='auto',
+ help="When to use terminal colors. Defaults to 'auto'.")
+ parser.add_argument(
+ '-j', '--jobs',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Number of processes to use. 0 spawns one process per core.")
+ parser.add_argument(
+ '--perf-path',
+ type=lambda x: x.split(),
+ help="Path to the perf executable, may include flags. "
+ "Defaults to %r." % PERF_PATH)
+ parser.add_argument(
+ '--objdump-path',
+ type=lambda x: x.split(),
+ default=OBJDUMP_PATH,
+ help="Path to the objdump executable, may include flags. "
+ "Defaults to %r." % OBJDUMP_PATH)
+
+ # record flags
+ record_parser = parser.add_argument_group('record options')
+ record_parser.add_argument(
+ 'command',
+ nargs=nargs,
+ help="Command to run.")
+ record_parser.add_argument(
+ '-R', '--record',
+ action='store_true',
+ help="Run a command and aggregate perf measurements.")
+ record_parser.add_argument(
+ '-o', '--output',
+ help="Output file. Uses flock to synchronize. This is stored as a "
+ "zip-file of multiple perf results.")
+ record_parser.add_argument(
+ '--perf-freq',
+ help="perf sampling frequency. This is passed directly to perf. "
+ "Defaults to %r." % PERF_FREQ)
+ record_parser.add_argument(
+ '--perf-period',
+ help="perf sampling period. This is passed directly to perf.")
+ record_parser.add_argument(
+ '--perf-events',
+ help="perf events to record. This is passed directly to perf. "
+ "Defaults to %r." % PERF_EVENTS)
+ record_parser.add_argument(
+ '--perf-path',
+ type=lambda x: x.split(),
+ help="Path to the perf executable, may include flags. "
+ "Defaults to %r." % PERF_PATH)
+
+ # avoid intermixed/REMAINDER conflict, see above
+ if nargs == argparse.REMAINDER:
+ args = parser.parse_args()
+ else:
+ args = parser.parse_intermixed_args()
+
+ # perf_paths/command overlap, so need to do some munging here
+ args.command = args.perf_paths
+ if args.record:
+ if not args.command:
+ print('error: no command specified?')
+ sys.exit(-1)
+ if not args.output:
+ print('error: no output file specified?')
+ sys.exit(-1)
+
+ sys.exit(main(**{k: v
+ for k, v in vars(args).items()
+ if v is not None}))
diff --git a/scripts/perfbd.py b/scripts/perfbd.py
new file mode 100755
index 00000000..bf57f601
--- /dev/null
+++ b/scripts/perfbd.py
@@ -0,0 +1,1276 @@
+#!/usr/bin/env python3
+#
+# Aggregate and report call-stack propagated block-device operations
+# from trace output.
+#
+# Example:
+# ./scripts/bench.py -ttrace
+# ./scripts/perfbd.py trace -j -Flfs.c -Flfs_util.c -Serased -Sproged -Sreaded
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import bisect
+import collections as co
+import csv
+import functools as ft
+import itertools as it
+import math as m
+import multiprocessing as mp
+import os
+import re
+import shlex
+import subprocess as sp
+
+
+OBJDUMP_PATH = ['objdump']
+THRESHOLD = (0.5, 0.85)
+
+
+# integer fields
+class Int(co.namedtuple('Int', 'x')):
+ __slots__ = ()
+ def __new__(cls, x=0):
+ if isinstance(x, Int):
+ return x
+ if isinstance(x, str):
+ try:
+ x = int(x, 0)
+ except ValueError:
+ # also accept +-∞ and +-inf
+ if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
+ x = m.inf
+ elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
+ x = -m.inf
+ else:
+ raise
+ assert isinstance(x, int) or m.isinf(x), x
+ return super().__new__(cls, x)
+
+ def __str__(self):
+ if self.x == m.inf:
+ return '∞'
+ elif self.x == -m.inf:
+ return '-∞'
+ else:
+ return str(self.x)
+
+ def __int__(self):
+ assert not m.isinf(self.x)
+ return self.x
+
+ def __float__(self):
+ return float(self.x)
+
+ none = '%7s' % '-'
+ def table(self):
+ return '%7s' % (self,)
+
+ diff_none = '%7s' % '-'
+ diff_table = table
+
+ def diff_diff(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ diff = new - old
+ if diff == +m.inf:
+ return '%7s' % '+∞'
+ elif diff == -m.inf:
+ return '%7s' % '-∞'
+ else:
+ return '%+7d' % diff
+
+ def ratio(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ if m.isinf(new) and m.isinf(old):
+ return 0.0
+ elif m.isinf(new):
+ return +m.inf
+ elif m.isinf(old):
+ return -m.inf
+ elif not old and not new:
+ return 0.0
+ elif not old:
+ return 1.0
+ else:
+ return (new-old) / old
+
+ def __add__(self, other):
+ return self.__class__(self.x + other.x)
+
+ def __sub__(self, other):
+ return self.__class__(self.x - other.x)
+
+ def __mul__(self, other):
+ return self.__class__(self.x * other.x)
+
+# perf results
+class PerfBdResult(co.namedtuple('PerfBdResult', [
+ 'file', 'function', 'line',
+ 'readed', 'proged', 'erased',
+ 'children'])):
+ _by = ['file', 'function', 'line']
+ _fields = ['readed', 'proged', 'erased']
+ _sort = ['erased', 'proged', 'readed']
+ _types = {'readed': Int, 'proged': Int, 'erased': Int}
+
+ __slots__ = ()
+ def __new__(cls, file='', function='', line=0,
+ readed=0, proged=0, erased=0,
+ children=[]):
+ return super().__new__(cls, file, function, int(Int(line)),
+ Int(readed), Int(proged), Int(erased),
+ children)
+
+ def __add__(self, other):
+ return PerfBdResult(self.file, self.function, self.line,
+ self.readed + other.readed,
+ self.proged + other.proged,
+ self.erased + other.erased,
+ self.children + other.children)
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def collect_syms_and_lines(obj_path, *,
+ objdump_path=None,
+ **args):
+ symbol_pattern = re.compile(
+ '^(?P[0-9a-fA-F]+)'
+ '\s+.*'
+ '\s+(?P[0-9a-fA-F]+)'
+ '\s+(?P[^\s]+)\s*$')
+ line_pattern = re.compile(
+ '^\s+(?:'
+ # matches dir/file table
+ '(?P[0-9]+)'
+ '(?:\s+(?P[0-9]+))?'
+ '\s+.*'
+ '\s+(?P[^\s]+)'
+ # matches line opcodes
+ '|' '\[[^\]]*\]\s+'
+ '(?:'
+ '(?PSpecial)'
+ '|' '(?PCopy)'
+ '|' '(?PEnd of Sequence)'
+ '|' 'File .*?to (?:entry )?(?P\d+)'
+ '|' 'Line .*?to (?P[0-9]+)'
+ '|' '(?:Address|PC) .*?to (?P[0x0-9a-fA-F]+)'
+ '|' '.' ')*'
+ ')$', re.IGNORECASE)
+
+ # figure out symbol addresses
+ syms = {}
+ sym_at = []
+ cmd = objdump_path + ['-t', obj_path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ for line in proc.stdout:
+ m = symbol_pattern.match(line)
+ if m:
+ name = m.group('name')
+ addr = int(m.group('addr'), 16)
+ size = int(m.group('size'), 16)
+ # ignore zero-sized symbols
+ if not size:
+ continue
+ # note multiple symbols can share a name
+ if name not in syms:
+ syms[name] = set()
+ syms[name].add((addr, size))
+ sym_at.append((addr, name, size))
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ # assume no debug-info on failure
+ pass
+
+ # sort and keep largest/first when duplicates
+ sym_at.sort(key=lambda x: (x[0], -x[2], x[1]))
+ sym_at_ = []
+ for addr, name, size in sym_at:
+ if len(sym_at_) == 0 or sym_at_[-1][0] != addr:
+ sym_at_.append((addr, name, size))
+ sym_at = sym_at_
+
+ # state machine for dwarf line numbers, note that objdump's
+ # decodedline seems to have issues with multiple dir/file
+ # tables, which is why we need this
+ lines = []
+ line_at = []
+ dirs = {}
+ files = {}
+ op_file = 1
+ op_line = 1
+ op_addr = 0
+ cmd = objdump_path + ['--dwarf=rawline', obj_path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ for line in proc.stdout:
+ m = line_pattern.match(line)
+ if m:
+ if m.group('no') and not m.group('dir'):
+ # found a directory entry
+ dirs[int(m.group('no'))] = m.group('path')
+ elif m.group('no'):
+ # found a file entry
+ dir = int(m.group('dir'))
+ if dir in dirs:
+ files[int(m.group('no'))] = os.path.join(
+ dirs[dir],
+ m.group('path'))
+ else:
+ files[int(m.group('no'))] = m.group('path')
+ else:
+ # found a state machine update
+ if m.group('op_file'):
+ op_file = int(m.group('op_file'), 0)
+ if m.group('op_line'):
+ op_line = int(m.group('op_line'), 0)
+ if m.group('op_addr'):
+ op_addr = int(m.group('op_addr'), 0)
+
+ if (m.group('op_special')
+ or m.group('op_copy')
+ or m.group('op_end')):
+ file = os.path.abspath(files.get(op_file, '?'))
+ lines.append((file, op_line, op_addr))
+ line_at.append((op_addr, file, op_line))
+
+ if m.group('op_end'):
+ op_file = 1
+ op_line = 1
+ op_addr = 0
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ # assume no debug-info on failure
+ pass
+
+ # sort and keep first when duplicates
+ lines.sort()
+ lines_ = []
+ for file, line, addr in lines:
+ if len(lines_) == 0 or lines_[-1][0] != file or lines[-1][1] != line:
+ lines_.append((file, line, addr))
+ lines = lines_
+
+ # sort and keep first when duplicates
+ line_at.sort()
+ line_at_ = []
+ for addr, file, line in line_at:
+ if len(line_at_) == 0 or line_at_[-1][0] != addr:
+ line_at_.append((addr, file, line))
+ line_at = line_at_
+
+ return syms, sym_at, lines, line_at
+
+
+def collect_job(path, start, stop, syms, sym_at, lines, line_at, *,
+ sources=None,
+ everything=False,
+ propagate=0,
+ depth=1,
+ **args):
+ trace_pattern = re.compile(
+ '^(?P[^:]*):(?P[0-9]+):trace:\s*(?P[^\s]*?bd_)(?:'
+ '(?Pread)\('
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*\)'
+ '|' '(?Pprog)\('
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*\)'
+ '|' '(?Perase)\('
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)'
+ '\s*\(\s*(?P\w+)\s*\)' '\s*\)' ')\s*$')
+ frame_pattern = re.compile(
+ '^\s+at (?P\w+)\s*$')
+
+ # parse all of the trace files for read/prog/erase operations
+ last_filtered = False
+ last_file = None
+ last_line = None
+ last_sym = None
+ last_readed = 0
+ last_proged = 0
+ last_erased = 0
+ last_stack = []
+ last_delta = None
+ at_cache = {}
+ results = {}
+
+ def commit():
+ # fallback to just capturing top-level measurements
+ if not last_stack:
+ file = last_file
+ sym = last_sym
+ line = last_line
+
+ # ignore filtered sources
+ if sources is not None:
+ if not any(
+ os.path.abspath(file)
+ == os.path.abspath(s)
+ for s in sources):
+ return
+ else:
+ # default to only cwd
+ if not everything and not os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file)]) == os.getcwd():
+ return
+
+ # simplify path
+ if os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file)]) == os.getcwd():
+ file = os.path.relpath(file)
+ else:
+ file = os.path.abspath(file)
+
+ results[(file, sym, line)] = (
+ last_readed,
+ last_proged,
+ last_erased,
+ {})
+ else:
+ # tail-recursively propagate measurements
+ for i in range(len(last_stack)):
+ results_ = results
+ for j in reversed(range(i+1)):
+ if i+1-j > depth:
+ break
+
+ # propagate
+ name = last_stack[j]
+ if name in results_:
+ r, p, e, children = results_[name]
+ else:
+ r, p, e, children = 0, 0, 0, {}
+ results_[name] = (
+ r+last_readed,
+ p+last_proged,
+ e+last_erased,
+ children)
+
+ # recurse
+ results_ = results_[name][-1]
+
+ with openio(path) as f:
+ # try to jump to middle of file? need step out of utf8-safe mode and
+ # then resync up with the next newline to avoid parsing half a line
+ if start is not None and start > 0:
+ fd = f.fileno()
+ os.lseek(fd, start, os.SEEK_SET)
+ while os.read(fd, 1) not in {b'\n', b'\r', b''}:
+ pass
+ f = os.fdopen(fd)
+
+ for line in f:
+ # we have a lot of data, try to take a few shortcuts,
+ # string search is much faster than regex so try to use
+ # regex as late as possible.
+ if not line.startswith('\t'):
+ if last_filtered:
+ commit()
+ last_filtered = False
+
+ # done processing our slice?
+ if stop is not None:
+ if os.lseek(f.fileno(), 0, os.SEEK_CUR) > stop:
+ break
+
+ if 'trace' in line and 'bd' in line:
+ m = trace_pattern.match(line)
+ if m:
+ last_filtered = True
+ last_file = os.path.abspath(m.group('file'))
+ last_line = int(m.group('line'), 0)
+ last_sym = m.group('prefix')
+ last_readed = 0
+ last_proged = 0
+ last_erased = 0
+ last_stack = []
+ last_delta = None
+
+ if m.group('read'):
+ last_sym += m.group('read')
+ last_readed += int(m.group('read_size'))
+ elif m.group('prog'):
+ last_sym += m.group('prog')
+ last_proged += int(m.group('prog_size'))
+ elif m.group('erase'):
+ last_sym += m.group('erase')
+ last_erased += int(m.group('erase_size'))
+
+ elif last_filtered:
+ m = frame_pattern.match(line)
+ if m:
+ addr_ = int(m.group('addr'), 0)
+
+ # before we can do anything with addr, we need to
+ # reverse ASLR, fortunately we know the file+line of
+ # the first stack frame, so we can use that as a point
+ # of reference
+ if last_delta is None:
+ i = bisect.bisect(lines, (last_file, last_line),
+ key=lambda x: (x[0], x[1]))
+ if i > 0:
+ last_delta = lines[i-1][2] - addr_
+ else:
+ # can't reverse ASLR, give up on backtrace
+ commit()
+ last_filtered = False
+ continue
+
+ addr = addr_ + last_delta
+
+ # cached?
+ if addr in at_cache:
+ cached = at_cache[addr]
+ if cached is None:
+ # cache says to skip
+ continue
+ file, sym, line = cached
+ else:
+ # find sym
+ i = bisect.bisect(sym_at, addr, key=lambda x: x[0])
+ # check that we're actually in the sym's size
+ if i > 0 and addr < sym_at[i-1][0] + sym_at[i-1][2]:
+ _, sym, _ = sym_at[i-1]
+ else:
+ sym = hex(addr)
+
+ # filter out internal/unknown functions
+ if not everything and (
+ sym.startswith('__')
+ or sym.startswith('0')
+ or sym.startswith('-')
+ or sym == '_start'):
+ at_cache[addr] = None
+ continue
+
+ # find file+line
+ i = bisect.bisect(line_at, addr, key=lambda x: x[0])
+ if i > 0:
+ _, file, line = line_at[i-1]
+ elif len(last_stack) == 0:
+ file, line = last_file, last_line
+ else:
+ file, line = re.sub('(\.o)?$', '.c', obj_path, 1), 0
+
+ # ignore filtered sources
+ if sources is not None:
+ if not any(
+ os.path.abspath(file)
+ == os.path.abspath(s)
+ for s in sources):
+ at_cache[addr] = None
+ continue
+ else:
+ # default to only cwd
+ if not everything and not os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file)]) == os.getcwd():
+ at_cache[addr] = None
+ continue
+
+ # simplify path
+ if os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(file)]) == os.getcwd():
+ file = os.path.relpath(file)
+ else:
+ file = os.path.abspath(file)
+
+ at_cache[addr] = file, sym, line
+
+ last_stack.append((file, sym, line))
+
+ # stop propagating?
+ if propagate and len(last_stack) >= propagate:
+ commit()
+ last_filtered = False
+ if last_filtered:
+ commit()
+
+ # rearrange results into result type
+ def to_results(results):
+ results_ = []
+ for name, (r, p, e, children) in results.items():
+ results_.append(PerfBdResult(*name,
+ r, p, e,
+ children=to_results(children)))
+ return results_
+
+ return to_results(results)
+
+def starapply(args):
+ f, args, kwargs = args
+ return f(*args, **kwargs)
+
+def collect(obj_path, trace_paths, *,
+ jobs=None,
+ **args):
+ # automatic job detection?
+ if jobs == 0:
+ jobs = len(os.sched_getaffinity(0))
+
+ # find sym/line info to reverse ASLR
+ syms, sym_at, lines, line_at = collect_syms_and_lines(obj_path, **args)
+
+ if jobs is not None:
+ # try to split up files so that even single files can be processed
+ # in parallel
+ #
+ # this looks naive, since we're splitting up text files by bytes, but
+ # we do proper backtrace delimination in collect_job
+ trace_ranges = []
+ for path in trace_paths:
+ if path == '-':
+ trace_ranges.append([(None, None)])
+ continue
+
+ size = os.path.getsize(path)
+ if size == 0:
+ trace_ranges.append([(None, None)])
+ continue
+
+ perjob = m.ceil(size // jobs)
+ trace_ranges.append([(i, i+perjob) for i in range(0, size, perjob)])
+
+ results = []
+ with mp.Pool(jobs) as p:
+ for results_ in p.imap_unordered(
+ starapply,
+ ((collect_job, (path, start, stop,
+ syms, sym_at, lines, line_at),
+ args)
+ for path, ranges in zip(trace_paths, trace_ranges)
+ for start, stop in ranges)):
+ results.extend(results_)
+
+ else:
+ results = []
+ for path in trace_paths:
+ results.extend(collect_job(path, None, None,
+ syms, sym_at, lines, line_at,
+ **args))
+
+ return results
+
+
+def fold(Result, results, *,
+ by=None,
+ defines=None,
+ **_):
+ if by is None:
+ by = Result._by
+
+ for k in it.chain(by or [], (k for k, _ in defines or [])):
+ if k not in Result._by and k not in Result._fields:
+ print("error: could not find field %r?" % k)
+ sys.exit(-1)
+
+ # filter by matching defines
+ if defines is not None:
+ results_ = []
+ for r in results:
+ if all(getattr(r, k) in vs for k, vs in defines):
+ results_.append(r)
+ results = results_
+
+ # organize results into conflicts
+ folding = co.OrderedDict()
+ for r in results:
+ name = tuple(getattr(r, k) for k in by)
+ if name not in folding:
+ folding[name] = []
+ folding[name].append(r)
+
+ # merge conflicts
+ folded = []
+ for name, rs in folding.items():
+ folded.append(sum(rs[1:], start=rs[0]))
+
+ # fold recursively
+ folded_ = []
+ for r in folded:
+ folded_.append(r._replace(children=fold(
+ Result, r.children,
+ by=by,
+ defines=defines)))
+ folded = folded_
+
+ return folded
+
+def table(Result, results, diff_results=None, *,
+ by=None,
+ fields=None,
+ sort=None,
+ summary=False,
+ all=False,
+ percent=False,
+ depth=1,
+ **_):
+ all_, all = all, __builtins__.all
+
+ if by is None:
+ by = Result._by
+ if fields is None:
+ fields = Result._fields
+ types = Result._types
+
+ # fold again
+ results = fold(Result, results, by=by)
+ if diff_results is not None:
+ diff_results = fold(Result, diff_results, by=by)
+
+ # organize by name
+ table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in results}
+ diff_table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in diff_results or []}
+ names = list(table.keys() | diff_table.keys())
+
+ # sort again, now with diff info, note that python's sort is stable
+ names.sort()
+ if diff_results is not None:
+ names.sort(key=lambda n: tuple(
+ types[k].ratio(
+ getattr(table.get(n), k, None),
+ getattr(diff_table.get(n), k, None))
+ for k in fields),
+ reverse=True)
+ if sort:
+ for k, reverse in reversed(sort):
+ names.sort(
+ key=lambda n: tuple(
+ (getattr(table[n], k),)
+ if getattr(table.get(n), k, None) is not None else ()
+ for k in ([k] if k else [
+ k for k in Result._sort if k in fields])),
+ reverse=reverse ^ (not k or k in Result._fields))
+
+
+ # build up our lines
+ lines = []
+
+ # header
+ header = []
+ header.append('%s%s' % (
+ ','.join(by),
+ ' (%d added, %d removed)' % (
+ sum(1 for n in table if n not in diff_table),
+ sum(1 for n in diff_table if n not in table))
+ if diff_results is not None and not percent else '')
+ if not summary else '')
+ if diff_results is None:
+ for k in fields:
+ header.append(k)
+ elif percent:
+ for k in fields:
+ header.append(k)
+ else:
+ for k in fields:
+ header.append('o'+k)
+ for k in fields:
+ header.append('n'+k)
+ for k in fields:
+ header.append('d'+k)
+ header.append('')
+ lines.append(header)
+
+ def table_entry(name, r, diff_r=None, ratios=[]):
+ entry = []
+ entry.append(name)
+ if diff_results is None:
+ for k in fields:
+ entry.append(getattr(r, k).table()
+ if getattr(r, k, None) is not None
+ else types[k].none)
+ elif percent:
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ else:
+ for k in fields:
+ entry.append(getattr(diff_r, k).diff_table()
+ if getattr(diff_r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(types[k].diff_diff(
+ getattr(r, k, None),
+ getattr(diff_r, k, None)))
+ if diff_results is None:
+ entry.append('')
+ elif percent:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios))
+ else:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios
+ if t)
+ if any(ratios) else '')
+ return entry
+
+ # entries
+ if not summary:
+ for name in names:
+ r = table.get(name)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = diff_table.get(name)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ if not all_ and not any(ratios):
+ continue
+ lines.append(table_entry(name, r, diff_r, ratios))
+
+ # total
+ r = next(iter(fold(Result, results, by=[])), None)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = next(iter(fold(Result, diff_results, by=[])), None)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ lines.append(table_entry('TOTAL', r, diff_r, ratios))
+
+ # find the best widths, note that column 0 contains the names and column -1
+ # the ratios, so those are handled a bit differently
+ widths = [
+ ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
+ for w, i in zip(
+ it.chain([23], it.repeat(7)),
+ range(len(lines[0])-1))]
+
+ # adjust the name width based on the expected call depth, though
+ # note this doesn't really work with unbounded recursion
+ if not summary and not m.isinf(depth):
+ widths[0] += 4*(depth-1)
+
+ # print the tree recursively
+ print('%-*s %s%s' % (
+ widths[0], lines[0][0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], lines[0][1:-1])),
+ lines[0][-1]))
+
+ if not summary:
+ def recurse(results_, depth_, prefixes=('', '', '', '')):
+ # rebuild our tables at each layer
+ table_ = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in results_}
+ names_ = list(table_.keys())
+
+ # sort again at each layer, keep in mind the numbers are
+ # changing as we descend
+ names_.sort()
+ if sort:
+ for k, reverse in reversed(sort):
+ names_.sort(
+ key=lambda n: tuple(
+ (getattr(table_[n], k),)
+ if getattr(table_.get(n), k, None) is not None
+ else ()
+ for k in ([k] if k else [
+ k for k in Result._sort if k in fields])),
+ reverse=reverse ^ (not k or k in Result._fields))
+
+ for i, name in enumerate(names_):
+ r = table_[name]
+ is_last = (i == len(names_)-1)
+
+ print('%s%-*s %s' % (
+ prefixes[0+is_last],
+ widths[0] - (
+ len(prefixes[0+is_last])
+ if not m.isinf(depth) else 0),
+ name,
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(
+ widths[1:],
+ table_entry(name, r)[1:]))))
+
+ # recurse?
+ if depth_ > 1:
+ recurse(
+ r.children,
+ depth_-1,
+ (prefixes[2+is_last] + "|-> ",
+ prefixes[2+is_last] + "'-> ",
+ prefixes[2+is_last] + "| ",
+ prefixes[2+is_last] + " "))
+
+ # we have enough going on with diffing to make the top layer
+ # a special case
+ for name, line in zip(names, lines[1:-1]):
+ print('%-*s %s%s' % (
+ widths[0], line[0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], line[1:-1])),
+ line[-1]))
+
+ if name in table and depth > 1:
+ recurse(
+ table[name].children,
+ depth-1,
+ ("|-> ",
+ "'-> ",
+ "| ",
+ " "))
+
+ print('%-*s %s%s' % (
+ widths[0], lines[-1][0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], lines[-1][1:-1])),
+ lines[-1][-1]))
+
+
+def annotate(Result, results, *,
+ annotate=None,
+ threshold=None,
+ read_threshold=None,
+ prog_threshold=None,
+ erase_threshold=None,
+ **args):
+ # figure out the thresholds
+ if threshold is None:
+ threshold = THRESHOLD
+ elif len(threshold) == 1:
+ threshold = threshold[0], threshold[0]
+
+ if read_threshold is None:
+ read_t0, read_t1 = threshold
+ elif len(read_threshold) == 1:
+ read_t0, read_t1 = read_threshold[0], read_threshold[0]
+ else:
+ read_t0, read_t1 = read_threshold
+ read_t0, read_t1 = min(read_t0, read_t1), max(read_t0, read_t1)
+
+ if prog_threshold is None:
+ prog_t0, prog_t1 = threshold
+ elif len(prog_threshold) == 1:
+ prog_t0, prog_t1 = prog_threshold[0], prog_threshold[0]
+ else:
+ prog_t0, prog_t1 = prog_threshold
+ prog_t0, prog_t1 = min(prog_t0, prog_t1), max(prog_t0, prog_t1)
+
+ if erase_threshold is None:
+ erase_t0, erase_t1 = threshold
+ elif len(erase_threshold) == 1:
+ erase_t0, erase_t1 = erase_threshold[0], erase_threshold[0]
+ else:
+ erase_t0, erase_t1 = erase_threshold
+ erase_t0, erase_t1 = min(erase_t0, erase_t1), max(erase_t0, erase_t1)
+
+ # find maxs
+ max_readed = max(it.chain((float(r.readed) for r in results), [1]))
+ max_proged = max(it.chain((float(r.proged) for r in results), [1]))
+ max_erased = max(it.chain((float(r.erased) for r in results), [1]))
+
+ for path in co.OrderedDict.fromkeys(r.file for r in results).keys():
+ # flatten to line info
+ results = fold(Result, results, by=['file', 'line'])
+ table = {r.line: r for r in results if r.file == path}
+
+ # calculate spans to show
+ if not annotate:
+ spans = []
+ last = None
+ func = None
+ for line, r in sorted(table.items()):
+ if (float(r.readed) / max_readed >= read_t0
+ or float(r.proged) / max_proged >= prog_t0
+ or float(r.erased) / max_erased >= erase_t0):
+ if last is not None and line - last.stop <= args['context']:
+ last = range(
+ last.start,
+ line+1+args['context'])
+ else:
+ if last is not None:
+ spans.append((last, func))
+ last = range(
+ line-args['context'],
+ line+1+args['context'])
+ func = r.function
+ if last is not None:
+ spans.append((last, func))
+
+ with open(path) as f:
+ skipped = False
+ for i, line in enumerate(f):
+ # skip lines not in spans?
+ if not annotate and not any(i+1 in s for s, _ in spans):
+ skipped = True
+ continue
+
+ if skipped:
+ skipped = False
+ print('%s@@ %s:%d: %s @@%s' % (
+ '\x1b[36m' if args['color'] else '',
+ path,
+ i+1,
+ next(iter(f for _, f in spans)),
+ '\x1b[m' if args['color'] else ''))
+
+ # build line
+ if line.endswith('\n'):
+ line = line[:-1]
+
+ if i+1 in table:
+ r = table[i+1]
+ line = '%-*s // %s readed, %s proged, %s erased' % (
+ args['width'],
+ line,
+ r.readed,
+ r.proged,
+ r.erased)
+
+ if args['color']:
+ if (float(r.readed) / max_readed >= read_t1
+ or float(r.proged) / max_proged >= prog_t1
+ or float(r.erased) / max_erased >= erase_t1):
+ line = '\x1b[1;31m%s\x1b[m' % line
+ elif (float(r.readed) / max_readed >= read_t0
+ or float(r.proged) / max_proged >= prog_t0
+ or float(r.erased) / max_erased >= erase_t0):
+ line = '\x1b[35m%s\x1b[m' % line
+
+ print(line)
+
+
+def report(obj_path='', trace_paths=[], *,
+ by=None,
+ fields=None,
+ defines=None,
+ sort=None,
+ **args):
+ # figure out what color should be
+ if args.get('color') == 'auto':
+ args['color'] = sys.stdout.isatty()
+ elif args.get('color') == 'always':
+ args['color'] = True
+ else:
+ args['color'] = False
+
+ # depth of 0 == m.inf
+ if args.get('depth') == 0:
+ args['depth'] = m.inf
+
+ # find sizes
+ if not args.get('use', None):
+ results = collect(obj_path, trace_paths, **args)
+ else:
+ results = []
+ with openio(args['use']) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('perfbd_'+k in r and r['perfbd_'+k].strip()
+ for k in PerfBdResult._fields):
+ continue
+ try:
+ results.append(PerfBdResult(
+ **{k: r[k] for k in PerfBdResult._by
+ if k in r and r[k].strip()},
+ **{k: r['perfbd_'+k] for k in PerfBdResult._fields
+ if 'perfbd_'+k in r and r['perfbd_'+k].strip()}))
+ except TypeError:
+ pass
+
+ # fold
+ results = fold(PerfBdResult, results, by=by, defines=defines)
+
+ # sort, note that python's sort is stable
+ results.sort()
+ if sort:
+ for k, reverse in reversed(sort):
+ results.sort(
+ key=lambda r: tuple(
+ (getattr(r, k),) if getattr(r, k) is not None else ()
+ for k in ([k] if k else PerfBdResult._sort)),
+ reverse=reverse ^ (not k or k in PerfBdResult._fields))
+
+ # write results to CSV
+ if args.get('output'):
+ with openio(args['output'], 'w') as f:
+ writer = csv.DictWriter(f,
+ (by if by is not None else PerfBdResult._by)
+ + ['perfbd_'+k for k in (
+ fields if fields is not None else PerfBdResult._fields)])
+ writer.writeheader()
+ for r in results:
+ writer.writerow(
+ {k: getattr(r, k) for k in (
+ by if by is not None else PerfBdResult._by)}
+ | {'perfbd_'+k: getattr(r, k) for k in (
+ fields if fields is not None else PerfBdResult._fields)})
+
+ # find previous results?
+ if args.get('diff'):
+ diff_results = []
+ try:
+ with openio(args['diff']) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('perfbd_'+k in r and r['perfbd_'+k].strip()
+ for k in PerfBdResult._fields):
+ continue
+ try:
+ diff_results.append(PerfBdResult(
+ **{k: r[k] for k in PerfBdResult._by
+ if k in r and r[k].strip()},
+ **{k: r['perfbd_'+k] for k in PerfBdResult._fields
+ if 'perfbd_'+k in r
+ and r['perfbd_'+k].strip()}))
+ except TypeError:
+ pass
+ except FileNotFoundError:
+ pass
+
+ # fold
+ diff_results = fold(PerfBdResult, diff_results, by=by, defines=defines)
+
+ # print table
+ if not args.get('quiet'):
+ if (args.get('annotate')
+ or args.get('threshold')
+ or args.get('read_threshold')
+ or args.get('prog_threshold')
+ or args.get('erase_threshold')):
+ # annotate sources
+ annotate(PerfBdResult, results, **args)
+ else:
+ # print table
+ table(PerfBdResult, results,
+ diff_results if args.get('diff') else None,
+ by=by if by is not None else ['function'],
+ fields=fields,
+ sort=sort,
+ **args)
+
+
+def main(**args):
+ if args.get('record'):
+ return record(**args)
+ else:
+ return report(**args)
+
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Aggregate and report call-stack propagated "
+ "block-device operations from trace output.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'obj_path',
+ nargs='?',
+ help="Input executable for mapping addresses to symbols.")
+ parser.add_argument(
+ 'trace_paths',
+ nargs='*',
+ help="Input *.trace files.")
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument(
+ '-q', '--quiet',
+ action='store_true',
+ help="Don't show anything, useful with -o.")
+ parser.add_argument(
+ '-o', '--output',
+ help="Specify CSV file to store results.")
+ parser.add_argument(
+ '-u', '--use',
+ help="Don't parse anything, use this CSV file.")
+ parser.add_argument(
+ '-d', '--diff',
+ help="Specify CSV file to diff against.")
+ parser.add_argument(
+ '-a', '--all',
+ action='store_true',
+ help="Show all, not just the ones that changed.")
+ parser.add_argument(
+ '-p', '--percent',
+ action='store_true',
+ help="Only show percentage change, not a full diff.")
+ parser.add_argument(
+ '-b', '--by',
+ action='append',
+ choices=PerfBdResult._by,
+ help="Group by this field.")
+ parser.add_argument(
+ '-f', '--field',
+ dest='fields',
+ action='append',
+ choices=PerfBdResult._fields,
+ help="Show this field.")
+ parser.add_argument(
+ '-D', '--define',
+ dest='defines',
+ action='append',
+ type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
+ help="Only include results where this field is this value.")
+ class AppendSort(argparse.Action):
+ def __call__(self, parser, namespace, value, option):
+ if namespace.sort is None:
+ namespace.sort = []
+ namespace.sort.append((value, True if option == '-S' else False))
+ parser.add_argument(
+ '-s', '--sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field.")
+ parser.add_argument(
+ '-S', '--reverse-sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field, but backwards.")
+ parser.add_argument(
+ '-Y', '--summary',
+ action='store_true',
+ help="Only show the total.")
+ parser.add_argument(
+ '-F', '--source',
+ dest='sources',
+ action='append',
+ help="Only consider definitions in this file. Defaults to anything "
+ "in the current directory.")
+ parser.add_argument(
+ '--everything',
+ action='store_true',
+ help="Include builtin and libc specific symbols.")
+ parser.add_argument(
+ '-P', '--propagate',
+ type=lambda x: int(x, 0),
+ help="Depth to propagate samples up the call-stack. 0 propagates up "
+ "to the entry point, 1 does no propagation. Defaults to 0.")
+ parser.add_argument(
+ '-Z', '--depth',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Depth of function calls to show. 0 shows all calls but may not "
+ "terminate!")
+ parser.add_argument(
+ '-A', '--annotate',
+ action='store_true',
+ help="Show source files annotated with coverage info.")
+ parser.add_argument(
+ '-T', '--threshold',
+ nargs='?',
+ type=lambda x: tuple(float(x) for x in x.split(',')),
+ const=THRESHOLD,
+ help="Show lines with any ops above this threshold as a percent of "
+ "all lines. Defaults to %s." % ','.join(str(t) for t in THRESHOLD))
+ parser.add_argument(
+ '--read-threshold',
+ nargs='?',
+ type=lambda x: tuple(float(x) for x in x.split(',')),
+ const=THRESHOLD,
+ help="Show lines with reads above this threshold as a percent of "
+ "all lines. Defaults to %s." % ','.join(str(t) for t in THRESHOLD))
+ parser.add_argument(
+ '--prog-threshold',
+ nargs='?',
+ type=lambda x: tuple(float(x) for x in x.split(',')),
+ const=THRESHOLD,
+ help="Show lines with progs above this threshold as a percent of "
+ "all lines. Defaults to %s." % ','.join(str(t) for t in THRESHOLD))
+ parser.add_argument(
+ '--erase-threshold',
+ nargs='?',
+ type=lambda x: tuple(float(x) for x in x.split(',')),
+ const=THRESHOLD,
+ help="Show lines with erases above this threshold as a percent of "
+ "all lines. Defaults to %s." % ','.join(str(t) for t in THRESHOLD))
+ parser.add_argument(
+ '-c', '--context',
+ type=lambda x: int(x, 0),
+ default=3,
+ help="Show n additional lines of context. Defaults to 3.")
+ parser.add_argument(
+ '-W', '--width',
+ type=lambda x: int(x, 0),
+ default=80,
+ help="Assume source is styled with this many columns. Defaults to 80.")
+ parser.add_argument(
+ '--color',
+ choices=['never', 'always', 'auto'],
+ default='auto',
+ help="When to use terminal colors. Defaults to 'auto'.")
+ parser.add_argument(
+ '-j', '--jobs',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Number of processes to use. 0 spawns one process per core.")
+ parser.add_argument(
+ '--objdump-path',
+ type=lambda x: x.split(),
+ default=OBJDUMP_PATH,
+ help="Path to the objdump executable, may include flags. "
+ "Defaults to %r." % OBJDUMP_PATH)
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/plot.py b/scripts/plot.py
new file mode 100755
index 00000000..2f980f4f
--- /dev/null
+++ b/scripts/plot.py
@@ -0,0 +1,1592 @@
+#!/usr/bin/env python3
+#
+# Plot CSV files in terminal.
+#
+# Example:
+# ./scripts/plot.py bench.csv -xSIZE -ybench_read -W80 -H17
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import bisect
+import codecs
+import collections as co
+import csv
+import io
+import itertools as it
+import math as m
+import os
+import shlex
+import shutil
+import time
+
+try:
+ import inotify_simple
+except ModuleNotFoundError:
+ inotify_simple = None
+
+
+COLORS = [
+ '1;34', # bold blue
+ '1;31', # bold red
+ '1;32', # bold green
+ '1;35', # bold purple
+ '1;33', # bold yellow
+ '1;36', # bold cyan
+ '34', # blue
+ '31', # red
+ '32', # green
+ '35', # purple
+ '33', # yellow
+ '36', # cyan
+]
+
+CHARS_DOTS = " .':"
+CHARS_BRAILLE = (
+ '⠀⢀⡀⣀⠠⢠⡠⣠⠄⢄⡄⣄⠤⢤⡤⣤' '⠐⢐⡐⣐⠰⢰⡰⣰⠔⢔⡔⣔⠴⢴⡴⣴'
+ '⠂⢂⡂⣂⠢⢢⡢⣢⠆⢆⡆⣆⠦⢦⡦⣦' '⠒⢒⡒⣒⠲⢲⡲⣲⠖⢖⡖⣖⠶⢶⡶⣶'
+ '⠈⢈⡈⣈⠨⢨⡨⣨⠌⢌⡌⣌⠬⢬⡬⣬' '⠘⢘⡘⣘⠸⢸⡸⣸⠜⢜⡜⣜⠼⢼⡼⣼'
+ '⠊⢊⡊⣊⠪⢪⡪⣪⠎⢎⡎⣎⠮⢮⡮⣮' '⠚⢚⡚⣚⠺⢺⡺⣺⠞⢞⡞⣞⠾⢾⡾⣾'
+ '⠁⢁⡁⣁⠡⢡⡡⣡⠅⢅⡅⣅⠥⢥⡥⣥' '⠑⢑⡑⣑⠱⢱⡱⣱⠕⢕⡕⣕⠵⢵⡵⣵'
+ '⠃⢃⡃⣃⠣⢣⡣⣣⠇⢇⡇⣇⠧⢧⡧⣧' '⠓⢓⡓⣓⠳⢳⡳⣳⠗⢗⡗⣗⠷⢷⡷⣷'
+ '⠉⢉⡉⣉⠩⢩⡩⣩⠍⢍⡍⣍⠭⢭⡭⣭' '⠙⢙⡙⣙⠹⢹⡹⣹⠝⢝⡝⣝⠽⢽⡽⣽'
+ '⠋⢋⡋⣋⠫⢫⡫⣫⠏⢏⡏⣏⠯⢯⡯⣯' '⠛⢛⡛⣛⠻⢻⡻⣻⠟⢟⡟⣟⠿⢿⡿⣿')
+CHARS_POINTS_AND_LINES = 'o'
+
+SI_PREFIXES = {
+ 18: 'E',
+ 15: 'P',
+ 12: 'T',
+ 9: 'G',
+ 6: 'M',
+ 3: 'K',
+ 0: '',
+ -3: 'm',
+ -6: 'u',
+ -9: 'n',
+ -12: 'p',
+ -15: 'f',
+ -18: 'a',
+}
+
+SI2_PREFIXES = {
+ 60: 'Ei',
+ 50: 'Pi',
+ 40: 'Ti',
+ 30: 'Gi',
+ 20: 'Mi',
+ 10: 'Ki',
+ 0: '',
+ -10: 'mi',
+ -20: 'ui',
+ -30: 'ni',
+ -40: 'pi',
+ -50: 'fi',
+ -60: 'ai',
+}
+
+
+# format a number to a strict character width using SI prefixes
+def si(x, w=4):
+ if x == 0:
+ return '0'
+ # figure out prefix and scale
+ #
+ # note we adjust this so that 100K = .1M, which has more info
+ # per character
+ p = 3*int(m.log(abs(x)*10, 10**3))
+ p = min(18, max(-18, p))
+ # format with enough digits
+ s = '%.*f' % (w, abs(x) / (10.0**p))
+ s = s.lstrip('0')
+ # truncate but only digits that follow the dot
+ if '.' in s:
+ s = s[:max(s.find('.'), w-(2 if x < 0 else 1))]
+ s = s.rstrip('0')
+ s = s.rstrip('.')
+ return '%s%s%s' % ('-' if x < 0 else '', s, SI_PREFIXES[p])
+
+def si2(x, w=5):
+ if x == 0:
+ return '0'
+ # figure out prefix and scale
+ #
+ # note we adjust this so that 128Ki = .1Mi, which has more info
+ # per character
+ p = 10*int(m.log(abs(x)*10, 2**10))
+ p = min(30, max(-30, p))
+ # format with enough digits
+ s = '%.*f' % (w, abs(x) / (2.0**p))
+ s = s.lstrip('0')
+ # truncate but only digits that follow the dot
+ if '.' in s:
+ s = s[:max(s.find('.'), w-(3 if x < 0 else 2))]
+ s = s.rstrip('0')
+ s = s.rstrip('.')
+ return '%s%s%s' % ('-' if x < 0 else '', s, SI2_PREFIXES[p])
+
+# parse escape strings
+def escape(s):
+ return codecs.escape_decode(s.encode('utf8'))[0].decode('utf8')
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def inotifywait(paths):
+ # wait for interesting events
+ inotify = inotify_simple.INotify()
+ flags = (inotify_simple.flags.ATTRIB
+ | inotify_simple.flags.CREATE
+ | inotify_simple.flags.DELETE
+ | inotify_simple.flags.DELETE_SELF
+ | inotify_simple.flags.MODIFY
+ | inotify_simple.flags.MOVED_FROM
+ | inotify_simple.flags.MOVED_TO
+ | inotify_simple.flags.MOVE_SELF)
+
+ # recurse into directories
+ for path in paths:
+ if os.path.isdir(path):
+ for dir, _, files in os.walk(path):
+ inotify.add_watch(dir, flags)
+ for f in files:
+ inotify.add_watch(os.path.join(dir, f), flags)
+ else:
+ inotify.add_watch(path, flags)
+
+ # wait for event
+ inotify.read()
+
+class LinesIO:
+ def __init__(self, maxlen=None):
+ self.maxlen = maxlen
+ self.lines = co.deque(maxlen=maxlen)
+ self.tail = io.StringIO()
+
+ # trigger automatic sizing
+ if maxlen == 0:
+ self.resize(0)
+
+ def write(self, s):
+ # note using split here ensures the trailing string has no newline
+ lines = s.split('\n')
+
+ if len(lines) > 1 and self.tail.getvalue():
+ self.tail.write(lines[0])
+ lines[0] = self.tail.getvalue()
+ self.tail = io.StringIO()
+
+ self.lines.extend(lines[:-1])
+
+ if lines[-1]:
+ self.tail.write(lines[-1])
+
+ def resize(self, maxlen):
+ self.maxlen = maxlen
+ if maxlen == 0:
+ maxlen = shutil.get_terminal_size((80, 5))[1]
+ if maxlen != self.lines.maxlen:
+ self.lines = co.deque(self.lines, maxlen=maxlen)
+
+ canvas_lines = 1
+ def draw(self):
+ # did terminal size change?
+ if self.maxlen == 0:
+ self.resize(0)
+
+ # first thing first, give ourself a canvas
+ while LinesIO.canvas_lines < len(self.lines):
+ sys.stdout.write('\n')
+ LinesIO.canvas_lines += 1
+
+ # clear the bottom of the canvas if we shrink
+ shrink = LinesIO.canvas_lines - len(self.lines)
+ if shrink > 0:
+ for i in range(shrink):
+ sys.stdout.write('\r')
+ if shrink-1-i > 0:
+ sys.stdout.write('\x1b[%dA' % (shrink-1-i))
+ sys.stdout.write('\x1b[K')
+ if shrink-1-i > 0:
+ sys.stdout.write('\x1b[%dB' % (shrink-1-i))
+ sys.stdout.write('\x1b[%dA' % shrink)
+ LinesIO.canvas_lines = len(self.lines)
+
+ for i, line in enumerate(self.lines):
+ # move cursor, clear line, disable/reenable line wrapping
+ sys.stdout.write('\r')
+ if len(self.lines)-1-i > 0:
+ sys.stdout.write('\x1b[%dA' % (len(self.lines)-1-i))
+ sys.stdout.write('\x1b[K')
+ sys.stdout.write('\x1b[?7l')
+ sys.stdout.write(line)
+ sys.stdout.write('\x1b[?7h')
+ if len(self.lines)-1-i > 0:
+ sys.stdout.write('\x1b[%dB' % (len(self.lines)-1-i))
+ sys.stdout.flush()
+
+
+# parse different data representations
+def dat(x):
+ # allow the first part of an a/b fraction
+ if '/' in x:
+ x, _ = x.split('/', 1)
+
+ # first try as int
+ try:
+ return int(x, 0)
+ except ValueError:
+ pass
+
+ # then try as float
+ try:
+ return float(x)
+ # just don't allow infinity or nan
+ if m.isinf(x) or m.isnan(x):
+ raise ValueError("invalid dat %r" % x)
+ except ValueError:
+ pass
+
+ # else give up
+ raise ValueError("invalid dat %r" % x)
+
+
+# a hack log that preserves sign, with a linear region between -1 and 1
+def symlog(x):
+ if x > 1:
+ return m.log(x)+1
+ elif x < -1:
+ return -m.log(-x)-1
+ else:
+ return x
+
+class Plot:
+ def __init__(self, width, height, *,
+ xlim=None,
+ ylim=None,
+ xlog=False,
+ ylog=False,
+ braille=False,
+ dots=False):
+ # scale if we're printing with dots or braille
+ self.width = 2*width if braille else width
+ self.height = (4*height if braille
+ else 2*height if dots
+ else height)
+
+ self.xlim = xlim or (0, width)
+ self.ylim = ylim or (0, height)
+ self.xlog = xlog
+ self.ylog = ylog
+ self.braille = braille
+ self.dots = dots
+
+ self.grid = [('',False)]*(self.width*self.height)
+
+ def scale(self, x, y):
+ # scale and clamp
+ try:
+ if self.xlog:
+ x = int(self.width * (
+ (symlog(x)-symlog(self.xlim[0]))
+ / (symlog(self.xlim[1])-symlog(self.xlim[0]))))
+ else:
+ x = int(self.width * (
+ (x-self.xlim[0])
+ / (self.xlim[1]-self.xlim[0])))
+ if self.ylog:
+ y = int(self.height * (
+ (symlog(y)-symlog(self.ylim[0]))
+ / (symlog(self.ylim[1])-symlog(self.ylim[0]))))
+ else:
+ y = int(self.height * (
+ (y-self.ylim[0])
+ / (self.ylim[1]-self.ylim[0])))
+ except ZeroDivisionError:
+ x = 0
+ y = 0
+ return x, y
+
+ def point(self, x, y, *,
+ color=COLORS[0],
+ char=True):
+ # scale
+ x, y = self.scale(x, y)
+
+ # ignore out of bounds points
+ if x >= 0 and x < self.width and y >= 0 and y < self.height:
+ self.grid[x + y*self.width] = (color, char)
+
+ def line(self, x1, y1, x2, y2, *,
+ color=COLORS[0],
+ char=True):
+ # scale
+ x1, y1 = self.scale(x1, y1)
+ x2, y2 = self.scale(x2, y2)
+
+ # incremental error line algorithm
+ ex = abs(x2 - x1)
+ ey = -abs(y2 - y1)
+ dx = +1 if x1 < x2 else -1
+ dy = +1 if y1 < y2 else -1
+ e = ex + ey
+
+ while True:
+ if x1 >= 0 and x1 < self.width and y1 >= 0 and y1 < self.height:
+ self.grid[x1 + y1*self.width] = (color, char)
+ e2 = 2*e
+
+ if x1 == x2 and y1 == y2:
+ break
+
+ if e2 > ey:
+ e += ey
+ x1 += dx
+
+ if x1 == x2 and y1 == y2:
+ break
+
+ if e2 < ex:
+ e += ex
+ y1 += dy
+
+ if x2 >= 0 and x2 < self.width and y2 >= 0 and y2 < self.height:
+ self.grid[x2 + y2*self.width] = (color, char)
+
+ def plot(self, coords, *,
+ color=COLORS[0],
+ char=True,
+ line_char=True):
+ # draw lines
+ if line_char:
+ for (x1, y1), (x2, y2) in zip(coords, coords[1:]):
+ if y1 is not None and y2 is not None:
+ self.line(x1, y1, x2, y2,
+ color=color,
+ char=line_char)
+
+ # draw points
+ if char and (not line_char or char is not True):
+ for x, y in coords:
+ if y is not None:
+ self.point(x, y,
+ color=color,
+ char=char)
+
+ def draw(self, row, *,
+ color=False):
+ # scale if needed
+ if self.braille:
+ xscale, yscale = 2, 4
+ elif self.dots:
+ xscale, yscale = 1, 2
+ else:
+ xscale, yscale = 1, 1
+
+ y = self.height//yscale-1 - row
+ row_ = []
+ for x in range(self.width//xscale):
+ best_f = ''
+ best_c = False
+
+ # encode into a byte
+ b = 0
+ for i in range(xscale*yscale):
+ f, c = self.grid[x*xscale+(xscale-1-(i%xscale))
+ + (y*yscale+(i//xscale))*self.width]
+ if c:
+ b |= 1 << i
+
+ if f:
+ best_f = f
+ if c and c is not True:
+ best_c = c
+
+ # use byte to lookup character
+ if b:
+ if best_c:
+ c = best_c
+ elif self.braille:
+ c = CHARS_BRAILLE[b]
+ else:
+ c = CHARS_DOTS[b]
+ else:
+ c = ' '
+
+ # color?
+ if b and color and best_f:
+ c = '\x1b[%sm%s\x1b[m' % (best_f, c)
+
+ # draw axis in blank spaces
+ if not b:
+ if x == 0 and y == 0:
+ c = '+'
+ elif x == 0 and y == self.height//yscale-1:
+ c = '^'
+ elif x == self.width//xscale-1 and y == 0:
+ c = '>'
+ elif x == 0:
+ c = '|'
+ elif y == 0:
+ c = '-'
+
+ row_.append(c)
+
+ return ''.join(row_)
+
+
+def collect(csv_paths, renames=[]):
+ # collect results from CSV files
+ results = []
+ for path in csv_paths:
+ try:
+ with openio(path) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ results.append(r)
+ except FileNotFoundError:
+ pass
+
+ if renames:
+ for r in results:
+ # make a copy so renames can overlap
+ r_ = {}
+ for new_k, old_k in renames:
+ if old_k in r:
+ r_[new_k] = r[old_k]
+ r.update(r_)
+
+ return results
+
+def dataset(results, x=None, y=None, define=[]):
+ # organize by 'by', x, and y
+ dataset = {}
+ i = 0
+ for r in results:
+ # filter results by matching defines
+ if not all(k in r and r[k] in vs for k, vs in define):
+ continue
+
+ # find xs
+ if x is not None:
+ if x not in r:
+ continue
+ try:
+ x_ = dat(r[x])
+ except ValueError:
+ continue
+ else:
+ x_ = i
+ i += 1
+
+ # find ys
+ if y is not None:
+ if y not in r:
+ continue
+ try:
+ y_ = dat(r[y])
+ except ValueError:
+ continue
+ else:
+ y_ = None
+
+ if y_ is not None:
+ dataset[x_] = y_ + dataset.get(x_, 0)
+ else:
+ dataset[x_] = y_ or dataset.get(x_, None)
+
+ return dataset
+
+def datasets(results, by=None, x=None, y=None, define=[]):
+ # filter results by matching defines
+ results_ = []
+ for r in results:
+ if all(k in r and r[k] in vs for k, vs in define):
+ results_.append(r)
+ results = results_
+
+ # if y not specified, try to guess from data
+ if y is None:
+ y = co.OrderedDict()
+ for r in results:
+ for k, v in r.items():
+ if (by is None or k not in by) and v.strip():
+ try:
+ dat(v)
+ y[k] = True
+ except ValueError:
+ y[k] = False
+ y = list(k for k,v in y.items() if v)
+
+ if by is not None:
+ # find all 'by' values
+ ks = set()
+ for r in results:
+ ks.add(tuple(r.get(k, '') for k in by))
+ ks = sorted(ks)
+
+ # collect all datasets
+ datasets = co.OrderedDict()
+ for ks_ in (ks if by is not None else [()]):
+ for x_ in (x if x is not None else [None]):
+ for y_ in y:
+ # hide x/y if there is only one field
+ k_x = x_ if len(x or []) > 1 else ''
+ k_y = y_ if len(y or []) > 1 or (not ks_ and not k_x) else ''
+
+ datasets[ks_ + (k_x, k_y)] = dataset(
+ results,
+ x_,
+ y_,
+ [(by_, {k_}) for by_, k_ in zip(by, ks_)]
+ if by is not None else [])
+
+ return datasets
+
+
+# some classes for organizing subplots into a grid
+class Subplot:
+ def __init__(self, **args):
+ self.x = 0
+ self.y = 0
+ self.xspan = 1
+ self.yspan = 1
+ self.args = args
+
+class Grid:
+ def __init__(self, subplot, width=1.0, height=1.0):
+ self.xweights = [width]
+ self.yweights = [height]
+ self.map = {(0,0): subplot}
+ self.subplots = [subplot]
+
+ def __repr__(self):
+ return 'Grid(%r, %r)' % (self.xweights, self.yweights)
+
+ @property
+ def width(self):
+ return len(self.xweights)
+
+ @property
+ def height(self):
+ return len(self.yweights)
+
+ def __iter__(self):
+ return iter(self.subplots)
+
+ def __getitem__(self, i):
+ x, y = i
+ if x < 0:
+ x += len(self.xweights)
+ if y < 0:
+ y += len(self.yweights)
+
+ return self.map[(x,y)]
+
+ def merge(self, other, dir):
+ if dir in ['above', 'below']:
+ # first scale the two grids so they line up
+ self_xweights = self.xweights
+ other_xweights = other.xweights
+ self_w = sum(self_xweights)
+ other_w = sum(other_xweights)
+ ratio = self_w / other_w
+ other_xweights = [s*ratio for s in other_xweights]
+
+ # now interleave xweights as needed
+ new_xweights = []
+ self_map = {}
+ other_map = {}
+ self_i = 0
+ other_i = 0
+ self_xweight = (self_xweights[self_i]
+ if self_i < len(self_xweights) else m.inf)
+ other_xweight = (other_xweights[other_i]
+ if other_i < len(other_xweights) else m.inf)
+ while self_i < len(self_xweights) and other_i < len(other_xweights):
+ if other_xweight - self_xweight > 0.0000001:
+ new_xweights.append(self_xweight)
+ other_xweight -= self_xweight
+
+ new_i = len(new_xweights)-1
+ for j in range(len(self.yweights)):
+ self_map[(new_i, j)] = self.map[(self_i, j)]
+ for j in range(len(other.yweights)):
+ other_map[(new_i, j)] = other.map[(other_i, j)]
+ for s in other.subplots:
+ if s.x+s.xspan-1 == new_i:
+ s.xspan += 1
+ elif s.x > new_i:
+ s.x += 1
+
+ self_i += 1
+ self_xweight = (self_xweights[self_i]
+ if self_i < len(self_xweights) else m.inf)
+ elif self_xweight - other_xweight > 0.0000001:
+ new_xweights.append(other_xweight)
+ self_xweight -= other_xweight
+
+ new_i = len(new_xweights)-1
+ for j in range(len(other.yweights)):
+ other_map[(new_i, j)] = other.map[(other_i, j)]
+ for j in range(len(self.yweights)):
+ self_map[(new_i, j)] = self.map[(self_i, j)]
+ for s in self.subplots:
+ if s.x+s.xspan-1 == new_i:
+ s.xspan += 1
+ elif s.x > new_i:
+ s.x += 1
+
+ other_i += 1
+ other_xweight = (other_xweights[other_i]
+ if other_i < len(other_xweights) else m.inf)
+ else:
+ new_xweights.append(self_xweight)
+
+ new_i = len(new_xweights)-1
+ for j in range(len(self.yweights)):
+ self_map[(new_i, j)] = self.map[(self_i, j)]
+ for j in range(len(other.yweights)):
+ other_map[(new_i, j)] = other.map[(other_i, j)]
+
+ self_i += 1
+ self_xweight = (self_xweights[self_i]
+ if self_i < len(self_xweights) else m.inf)
+ other_i += 1
+ other_xweight = (other_xweights[other_i]
+ if other_i < len(other_xweights) else m.inf)
+
+ # squish so ratios are preserved
+ self_h = sum(self.yweights)
+ other_h = sum(other.yweights)
+ ratio = (self_h-other_h) / self_h
+ self_yweights = [s*ratio for s in self.yweights]
+
+ # finally concatenate the two grids
+ if dir == 'above':
+ for s in other.subplots:
+ s.y += len(self_yweights)
+ self.subplots.extend(other.subplots)
+
+ self.xweights = new_xweights
+ self.yweights = self_yweights + other.yweights
+ self.map = self_map | {(x, y+len(self_yweights)): s
+ for (x, y), s in other_map.items()}
+ else:
+ for s in self.subplots:
+ s.y += len(other.yweights)
+ self.subplots.extend(other.subplots)
+
+ self.xweights = new_xweights
+ self.yweights = other.yweights + self_yweights
+ self.map = other_map | {(x, y+len(other.yweights)): s
+ for (x, y), s in self_map.items()}
+
+ if dir in ['right', 'left']:
+ # first scale the two grids so they line up
+ self_yweights = self.yweights
+ other_yweights = other.yweights
+ self_h = sum(self_yweights)
+ other_h = sum(other_yweights)
+ ratio = self_h / other_h
+ other_yweights = [s*ratio for s in other_yweights]
+
+ # now interleave yweights as needed
+ new_yweights = []
+ self_map = {}
+ other_map = {}
+ self_i = 0
+ other_i = 0
+ self_yweight = (self_yweights[self_i]
+ if self_i < len(self_yweights) else m.inf)
+ other_yweight = (other_yweights[other_i]
+ if other_i < len(other_yweights) else m.inf)
+ while self_i < len(self_yweights) and other_i < len(other_yweights):
+ if other_yweight - self_yweight > 0.0000001:
+ new_yweights.append(self_yweight)
+ other_yweight -= self_yweight
+
+ new_i = len(new_yweights)-1
+ for j in range(len(self.xweights)):
+ self_map[(j, new_i)] = self.map[(j, self_i)]
+ for j in range(len(other.xweights)):
+ other_map[(j, new_i)] = other.map[(j, other_i)]
+ for s in other.subplots:
+ if s.y+s.yspan-1 == new_i:
+ s.yspan += 1
+ elif s.y > new_i:
+ s.y += 1
+
+ self_i += 1
+ self_yweight = (self_yweights[self_i]
+ if self_i < len(self_yweights) else m.inf)
+ elif self_yweight - other_yweight > 0.0000001:
+ new_yweights.append(other_yweight)
+ self_yweight -= other_yweight
+
+ new_i = len(new_yweights)-1
+ for j in range(len(other.xweights)):
+ other_map[(j, new_i)] = other.map[(j, other_i)]
+ for j in range(len(self.xweights)):
+ self_map[(j, new_i)] = self.map[(j, self_i)]
+ for s in self.subplots:
+ if s.y+s.yspan-1 == new_i:
+ s.yspan += 1
+ elif s.y > new_i:
+ s.y += 1
+
+ other_i += 1
+ other_yweight = (other_yweights[other_i]
+ if other_i < len(other_yweights) else m.inf)
+ else:
+ new_yweights.append(self_yweight)
+
+ new_i = len(new_yweights)-1
+ for j in range(len(self.xweights)):
+ self_map[(j, new_i)] = self.map[(j, self_i)]
+ for j in range(len(other.xweights)):
+ other_map[(j, new_i)] = other.map[(j, other_i)]
+
+ self_i += 1
+ self_yweight = (self_yweights[self_i]
+ if self_i < len(self_yweights) else m.inf)
+ other_i += 1
+ other_yweight = (other_yweights[other_i]
+ if other_i < len(other_yweights) else m.inf)
+
+ # squish so ratios are preserved
+ self_w = sum(self.xweights)
+ other_w = sum(other.xweights)
+ ratio = (self_w-other_w) / self_w
+ self_xweights = [s*ratio for s in self.xweights]
+
+ # finally concatenate the two grids
+ if dir == 'right':
+ for s in other.subplots:
+ s.x += len(self_xweights)
+ self.subplots.extend(other.subplots)
+
+ self.xweights = self_xweights + other.xweights
+ self.yweights = new_yweights
+ self.map = self_map | {(x+len(self_xweights), y): s
+ for (x, y), s in other_map.items()}
+ else:
+ for s in self.subplots:
+ s.x += len(other.xweights)
+ self.subplots.extend(other.subplots)
+
+ self.xweights = other.xweights + self_xweights
+ self.yweights = new_yweights
+ self.map = other_map | {(x+len(other.xweights), y): s
+ for (x, y), s in self_map.items()}
+
+
+ def scale(self, width, height):
+ self.xweights = [s*width for s in self.xweights]
+ self.yweights = [s*height for s in self.yweights]
+
+ @classmethod
+ def fromargs(cls, width=1.0, height=1.0, *,
+ subplots=[],
+ **args):
+ grid = cls(Subplot(**args))
+
+ for dir, subargs in subplots:
+ subgrid = cls.fromargs(
+ width=subargs.pop('width',
+ 0.5 if dir in ['right', 'left'] else width),
+ height=subargs.pop('height',
+ 0.5 if dir in ['above', 'below'] else height),
+ **subargs)
+ grid.merge(subgrid, dir)
+
+ grid.scale(width, height)
+ return grid
+
+
+def main(csv_paths, *,
+ by=None,
+ x=None,
+ y=None,
+ define=[],
+ color=False,
+ braille=False,
+ colors=None,
+ chars=None,
+ line_chars=None,
+ points=False,
+ points_and_lines=False,
+ width=None,
+ height=None,
+ xlim=(None,None),
+ ylim=(None,None),
+ xlog=False,
+ ylog=False,
+ x2=False,
+ y2=False,
+ xunits='',
+ yunits='',
+ xlabel=None,
+ ylabel=None,
+ xticklabels=None,
+ yticklabels=None,
+ title=None,
+ legend_right=False,
+ legend_above=False,
+ legend_below=False,
+ subplot={},
+ subplots=[],
+ cat=False,
+ keep_open=False,
+ sleep=None,
+ **args):
+ # figure out what color should be
+ if color == 'auto':
+ color = sys.stdout.isatty()
+ elif color == 'always':
+ color = True
+ else:
+ color = False
+
+ # what colors to use?
+ if colors is not None:
+ colors_ = colors
+ else:
+ colors_ = COLORS
+
+ if chars is not None:
+ chars_ = chars
+ elif points_and_lines:
+ chars_ = CHARS_POINTS_AND_LINES
+ else:
+ chars_ = [True]
+
+ if line_chars is not None:
+ line_chars_ = line_chars
+ elif points_and_lines or not points:
+ line_chars_ = [True]
+ else:
+ line_chars_ = [False]
+
+ # allow escape codes in labels/titles
+ title = escape(title).splitlines() if title is not None else []
+ xlabel = escape(xlabel).splitlines() if xlabel is not None else []
+ ylabel = escape(ylabel).splitlines() if ylabel is not None else []
+
+ # separate out renames
+ renames = list(it.chain.from_iterable(
+ ((k, v) for v in vs)
+ for k, vs in it.chain(by or [], x or [], y or [])))
+ if by is not None:
+ by = [k for k, _ in by]
+ if x is not None:
+ x = [k for k, _ in x]
+ if y is not None:
+ y = [k for k, _ in y]
+
+ # create a grid of subplots
+ grid = Grid.fromargs(
+ subplots=subplots + subplot.pop('subplots', []),
+ **subplot)
+
+ for s in grid:
+ # allow subplot params to override global params
+ x2_ = s.args.get('x2', False) or x2
+ y2_ = s.args.get('y2', False) or y2
+ xunits_ = s.args.get('xunits', xunits)
+ yunits_ = s.args.get('yunits', yunits)
+ xticklabels_ = s.args.get('xticklabels', xticklabels)
+ yticklabels_ = s.args.get('yticklabels', yticklabels)
+
+ # label/titles are handled a bit differently in subplots
+ subtitle = s.args.get('title')
+ xsublabel = s.args.get('xlabel')
+ ysublabel = s.args.get('ylabel')
+
+ # allow escape codes in sublabels/subtitles
+ subtitle = (escape(subtitle).splitlines()
+ if subtitle is not None else [])
+ xsublabel = (escape(xsublabel).splitlines()
+ if xsublabel is not None else [])
+ ysublabel = (escape(ysublabel).splitlines()
+ if ysublabel is not None else [])
+
+ # don't allow >2 ticklabels and render single ticklabels only once
+ if xticklabels_ is not None:
+ if len(xticklabels_) == 1:
+ xticklabels_ = ["", xticklabels_[0]]
+ elif len(xticklabels_) > 2:
+ xticklabels_ = [xticklabels_[0], xticklabels_[-1]]
+ if yticklabels_ is not None:
+ if len(yticklabels_) == 1:
+ yticklabels_ = ["", yticklabels_[0]]
+ elif len(yticklabels_) > 2:
+ yticklabels_ = [yticklabels_[0], yticklabels_[-1]]
+
+ s.x2 = x2_
+ s.y2 = y2_
+ s.xunits = xunits_
+ s.yunits = yunits_
+ s.xticklabels = xticklabels_
+ s.yticklabels = yticklabels_
+ s.title = subtitle
+ s.xlabel = xsublabel
+ s.ylabel = ysublabel
+
+ # preprocess margins so they can be shared
+ for s in grid:
+ s.xmargin = (
+ len(s.ylabel) + (1 if s.ylabel else 0) # fit ysublabel
+ + (1 if s.x > 0 else 0), # space between
+ ((5 if s.y2 else 4) + len(s.yunits) # fit yticklabels
+ if s.yticklabels is None
+ else max((len(t) for t in s.yticklabels), default=0))
+ + (1 if s.yticklabels != [] else 0),
+ )
+ s.ymargin = (
+ len(s.xlabel), # fit xsublabel
+ 1 if s.xticklabels != [] else 0, # fit xticklabels
+ len(s.title), # fit subtitle
+ )
+
+ for s in grid:
+ # share margins so everything aligns nicely
+ s.xmargin = (
+ max(s_.xmargin[0] for s_ in grid if s_.x == s.x),
+ max(s_.xmargin[1] for s_ in grid if s_.x == s.x),
+ )
+ s.ymargin = (
+ max(s_.ymargin[0] for s_ in grid if s_.y == s.y),
+ max(s_.ymargin[1] for s_ in grid if s_.y == s.y),
+ max(s_.ymargin[-1] for s_ in grid if s_.y+s_.yspan == s.y+s.yspan),
+ )
+
+
+ def draw(f):
+ def writeln(s=''):
+ f.write(s)
+ f.write('\n')
+ f.writeln = writeln
+
+ # first collect results from CSV files
+ results = collect(csv_paths, renames)
+
+ # then extract the requested datasets
+ datasets_ = datasets(results, by, x, y, define)
+
+ # figure out colors/chars here so that subplot defines
+ # don't change them later, that'd be bad
+ datacolors_ = {
+ name: colors_[i % len(colors_)]
+ for i, name in enumerate(datasets_.keys())}
+ datachars_ = {
+ name: chars_[i % len(chars_)]
+ for i, name in enumerate(datasets_.keys())}
+ dataline_chars_ = {
+ name: line_chars_[i % len(line_chars_)]
+ for i, name in enumerate(datasets_.keys())}
+
+ # build legend?
+ legend_width = 0
+ if legend_right or legend_above or legend_below:
+ legend_ = []
+ for i, k in enumerate(datasets_.keys()):
+ label = '%s%s' % (
+ '%s ' % chars_[i % len(chars_)]
+ if chars is not None
+ else '%s ' % line_chars_[i % len(line_chars_)]
+ if line_chars is not None
+ else '',
+ ','.join(k_ for k_ in k if k_))
+
+ if label:
+ legend_.append(label)
+ legend_width = max(legend_width, len(label)+1)
+
+ # figure out our canvas size
+ if width is None:
+ width_ = min(80, shutil.get_terminal_size((80, None))[0])
+ elif width:
+ width_ = width
+ else:
+ width_ = shutil.get_terminal_size((80, None))[0]
+
+ if height is None:
+ height_ = 17 + len(title) + len(xlabel)
+ elif height:
+ height_ = height
+ else:
+ height_ = shutil.get_terminal_size((None,
+ 17 + len(title) + len(xlabel)))[1]
+ # make space for shell prompt
+ if not keep_open:
+ height_ -= 1
+
+ # carve out space for the xlabel
+ height_ -= len(xlabel)
+ # carve out space for the ylabel
+ width_ -= len(ylabel) + (1 if ylabel else 0)
+ # carve out space for title
+ height_ -= len(title)
+
+ # carve out space for the legend
+ if legend_right and legend_:
+ width_ -= legend_width
+ if legend_above and legend_:
+ legend_cols = len(legend_)
+ while True:
+ legend_widths = [
+ max(len(l) for l in legend_[i::legend_cols])
+ for i in range(legend_cols)]
+ if (legend_cols <= 1
+ or sum(legend_widths)+2*(legend_cols-1)
+ + max(sum(s.xmargin[:2]) for s in grid if s.x == 0)
+ <= width_):
+ break
+ legend_cols -= 1
+ height_ -= (len(legend_)+legend_cols-1) // legend_cols
+ if legend_below and legend_:
+ legend_cols = len(legend_)
+ while True:
+ legend_widths = [
+ max(len(l) for l in legend_[i::legend_cols])
+ for i in range(legend_cols)]
+ if (legend_cols <= 1
+ or sum(legend_widths)+2*(legend_cols-1)
+ + max(sum(s.xmargin[:2]) for s in grid if s.x == 0)
+ <= width_):
+ break
+ legend_cols -= 1
+ height_ -= (len(legend_)+legend_cols-1) // legend_cols
+
+ # figure out the grid dimensions
+ #
+ # note we floor to give the dimension tweaks the best chance of not
+ # exceeding the requested dimensions, this means we usually are less
+ # than the requested dimensions by quite a bit when we have many
+ # subplots, but it's a tradeoff for a relatively simple implementation
+ widths = [m.floor(w*width_) for w in grid.xweights]
+ heights = [m.floor(w*height_) for w in grid.yweights]
+
+ # tweak dimensions to allow all plots to have a minimum width,
+ # this may force the plot to be larger than the requested dimensions,
+ # but that's the best we can do
+ for s in grid:
+ # fit xunits
+ minwidth = sum(s.xmargin) + max(2,
+ 2*((5 if s.x2 else 4)+len(s.xunits))
+ if s.xticklabels is None
+ else sum(len(t) for t in s.xticklabels))
+ # fit yunits
+ minheight = sum(s.ymargin) + 2
+
+ i = 0
+ while minwidth > sum(widths[s.x:s.x+s.xspan]):
+ widths[s.x+i] += 1
+ i = (i + 1) % s.xspan
+
+ i = 0
+ while minheight > sum(heights[s.y:s.y+s.yspan]):
+ heights[s.y+i] += 1
+ i = (i + 1) % s.yspan
+
+ width_ = sum(widths)
+ height_ = sum(heights)
+
+ # create a plot for each subplot
+ for s in grid:
+ # allow subplot params to override global params
+ define_ = define + s.args.get('define', [])
+ xlim_ = s.args.get('xlim', xlim)
+ ylim_ = s.args.get('ylim', ylim)
+ xlog_ = s.args.get('xlog', False) or xlog
+ ylog_ = s.args.get('ylog', False) or ylog
+
+ # allow shortened ranges
+ if len(xlim_) == 1:
+ xlim_ = (0, xlim_[0])
+ if len(ylim_) == 1:
+ ylim_ = (0, ylim_[0])
+
+ # data can be constrained by subplot-specific defines,
+ # so re-extract for each plot
+ subdatasets = datasets(results, by, x, y, define_)
+
+ # find actual xlim/ylim
+ xlim_ = (
+ xlim_[0] if xlim_[0] is not None
+ else min(it.chain([0], (k
+ for r in subdatasets.values()
+ for k, v in r.items()
+ if v is not None))),
+ xlim_[1] if xlim_[1] is not None
+ else max(it.chain([0], (k
+ for r in subdatasets.values()
+ for k, v in r.items()
+ if v is not None))))
+
+ ylim_ = (
+ ylim_[0] if ylim_[0] is not None
+ else min(it.chain([0], (v
+ for r in subdatasets.values()
+ for _, v in r.items()
+ if v is not None))),
+ ylim_[1] if ylim_[1] is not None
+ else max(it.chain([0], (v
+ for r in subdatasets.values()
+ for _, v in r.items()
+ if v is not None))))
+
+ # find actual width/height
+ subwidth = sum(widths[s.x:s.x+s.xspan]) - sum(s.xmargin)
+ subheight = sum(heights[s.y:s.y+s.yspan]) - sum(s.ymargin)
+
+ # plot!
+ plot = Plot(
+ subwidth,
+ subheight,
+ xlim=xlim_,
+ ylim=ylim_,
+ xlog=xlog_,
+ ylog=ylog_,
+ braille=line_chars is None and braille,
+ dots=line_chars is None and not braille)
+
+ for name, dataset in subdatasets.items():
+ plot.plot(
+ sorted((x,y) for x,y in dataset.items()),
+ color=datacolors_[name],
+ char=datachars_[name],
+ line_char=dataline_chars_[name])
+
+ s.plot = plot
+ s.width = subwidth
+ s.height = subheight
+ s.xlim = xlim_
+ s.ylim = ylim_
+
+
+ # now that everything's plotted, let's render things to the terminal
+
+ # figure out margin
+ xmargin = (
+ len(ylabel) + (1 if ylabel else 0),
+ sum(grid[0,0].xmargin[:2]),
+ )
+ ymargin = (
+ sum(grid[0,0].ymargin[:2]),
+ grid[-1,-1].ymargin[-1],
+ )
+
+ # draw title?
+ for line in title:
+ f.writeln('%*s%s' % (
+ sum(xmargin[:2]), '',
+ line.center(width_-xmargin[1])))
+
+ # draw legend_above?
+ if legend_above and legend_:
+ for i in range(0, len(legend_), legend_cols):
+ f.writeln('%*s%s' % (
+ max(sum(xmargin[:2])
+ + (width_-xmargin[1]
+ - (sum(legend_widths)+2*(legend_cols-1)))
+ // 2,
+ 0), '',
+ ' '.join('%s%s%s' % (
+ '\x1b[%sm' % colors_[(i+j) % len(colors_)]
+ if color else '',
+ '%-*s' % (legend_widths[j], legend_[i+j]),
+ '\x1b[m'
+ if color else '')
+ for j in range(min(legend_cols, len(legend_)-i)))))
+
+ for row in range(height_):
+ # draw ylabel?
+ f.write(
+ '%s ' % ''.join(
+ ('%*s%s%*s' % (
+ ymargin[-1], '',
+ line.center(height_-sum(ymargin)),
+ ymargin[0], ''))[row]
+ for line in ylabel)
+ if ylabel else '')
+
+ for x_ in range(grid.width):
+ # figure out the grid x/y position
+ subrow = row
+ y_ = len(heights)-1
+ while subrow >= heights[y_]:
+ subrow -= heights[y_]
+ y_ -= 1
+
+ s = grid[x_, y_]
+ subrow = row - sum(heights[s.y+s.yspan:])
+
+ # header
+ if subrow < s.ymargin[-1]:
+ # draw subtitle?
+ if subrow < len(s.title):
+ f.write('%*s%s' % (
+ sum(s.xmargin[:2]), '',
+ s.title[subrow].center(s.width)))
+ else:
+ f.write('%*s%*s' % (
+ sum(s.xmargin[:2]), '',
+ s.width, ''))
+ # draw plot?
+ elif subrow-s.ymargin[-1] < s.height:
+ subrow = subrow-s.ymargin[-1]
+
+ # draw ysublabel?
+ f.write('%-*s' % (
+ s.xmargin[0],
+ '%s ' % ''.join(
+ line.center(s.height)[subrow]
+ for line in s.ylabel)
+ if s.ylabel else ''))
+
+ # draw yunits?
+ if subrow == 0 and s.yticklabels != []:
+ f.write('%*s' % (
+ s.xmargin[1],
+ ((si2 if s.y2 else si)(s.ylim[1]) + s.yunits
+ if s.yticklabels is None
+ else s.yticklabels[1])
+ + ' '))
+ elif subrow == s.height-1 and s.yticklabels != []:
+ f.write('%*s' % (
+ s.xmargin[1],
+ ((si2 if s.y2 else si)(s.ylim[0]) + s.yunits
+ if s.yticklabels is None
+ else s.yticklabels[0])
+ + ' '))
+ else:
+ f.write('%*s' % (
+ s.xmargin[1], ''))
+
+ # draw plot!
+ f.write(s.plot.draw(subrow, color=color))
+
+ # footer
+ else:
+ subrow = subrow-s.ymargin[-1]-s.height
+
+ # draw xunits?
+ if subrow < (1 if s.xticklabels != [] else 0):
+ f.write('%*s%-*s%*s%*s' % (
+ sum(s.xmargin[:2]), '',
+ (5 if s.x2 else 4) + len(s.xunits)
+ if s.xticklabels is None
+ else len(s.xticklabels[0]),
+ (si2 if s.x2 else si)(s.xlim[0]) + s.xunits
+ if s.xticklabels is None
+ else s.xticklabels[0],
+ s.width - (2*((5 if s.x2 else 4)+len(s.xunits))
+ if s.xticklabels is None
+ else sum(len(t) for t in s.xticklabels)), '',
+ (5 if s.x2 else 4) + len(s.xunits)
+ if s.xticklabels is None
+ else len(s.xticklabels[1]),
+ (si2 if s.x2 else si)(s.xlim[1]) + s.xunits
+ if s.xticklabels is None
+ else s.xticklabels[1]))
+ # draw xsublabel?
+ elif (subrow < s.ymargin[1]
+ or subrow-s.ymargin[1] >= len(s.xlabel)):
+ f.write('%*s%*s' % (
+ sum(s.xmargin[:2]), '',
+ s.width, ''))
+ else:
+ f.write('%*s%s' % (
+ sum(s.xmargin[:2]), '',
+ s.xlabel[subrow-s.ymargin[1]].center(s.width)))
+
+ # draw legend_right?
+ if (legend_right and legend_
+ and row >= ymargin[-1]
+ and row-ymargin[-1] < len(legend_)):
+ j = row-ymargin[-1]
+ f.write(' %s%s%s' % (
+ '\x1b[%sm' % colors_[j % len(colors_)] if color else '',
+ legend_[j],
+ '\x1b[m' if color else ''))
+
+ f.writeln()
+
+ # draw xlabel?
+ for line in xlabel:
+ f.writeln('%*s%s' % (
+ sum(xmargin[:2]), '',
+ line.center(width_-xmargin[1])))
+
+ # draw legend below?
+ if legend_below and legend_:
+ for i in range(0, len(legend_), legend_cols):
+ f.writeln('%*s%s' % (
+ max(sum(xmargin[:2])
+ + (width_-xmargin[1]
+ - (sum(legend_widths)+2*(legend_cols-1)))
+ // 2,
+ 0), '',
+ ' '.join('%s%s%s' % (
+ '\x1b[%sm' % colors_[(i+j) % len(colors_)]
+ if color else '',
+ '%-*s' % (legend_widths[j], legend_[i+j]),
+ '\x1b[m'
+ if color else '')
+ for j in range(min(legend_cols, len(legend_)-i)))))
+
+
+ if keep_open:
+ try:
+ while True:
+ if cat:
+ draw(sys.stdout)
+ else:
+ ring = LinesIO()
+ draw(ring)
+ ring.draw()
+
+ # try to inotifywait
+ if inotify_simple is not None:
+ ptime = time.time()
+ inotifywait(csv_paths)
+ # sleep for a minimum amount of time, this helps issues
+ # around rapidly updating files
+ time.sleep(max(0, (sleep or 0.01) - (time.time()-ptime)))
+ else:
+ time.sleep(sleep or 0.1)
+ except KeyboardInterrupt:
+ pass
+
+ if cat:
+ draw(sys.stdout)
+ else:
+ ring = LinesIO()
+ draw(ring)
+ ring.draw()
+ sys.stdout.write('\n')
+ else:
+ draw(sys.stdout)
+
+
+if __name__ == "__main__":
+ import sys
+ import argparse
+ parser = argparse.ArgumentParser(
+ description="Plot CSV files in terminal.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'csv_paths',
+ nargs='*',
+ help="Input *.csv files.")
+ parser.add_argument(
+ '-b', '--by',
+ action='append',
+ type=lambda x: (
+ lambda k,v=None: (k, v.split(',') if v is not None else ())
+ )(*x.split('=', 1)),
+ help="Group by this field. Can rename fields with new_name=old_name.")
+ parser.add_argument(
+ '-x',
+ action='append',
+ type=lambda x: (
+ lambda k,v=None: (k, v.split(',') if v is not None else ())
+ )(*x.split('=', 1)),
+ help="Field to use for the x-axis. Can rename fields with "
+ "new_name=old_name.")
+ parser.add_argument(
+ '-y',
+ action='append',
+ type=lambda x: (
+ lambda k,v=None: (k, v.split(',') if v is not None else ())
+ )(*x.split('=', 1)),
+ help="Field to use for the y-axis. Can rename fields with "
+ "new_name=old_name.")
+ parser.add_argument(
+ '-D', '--define',
+ type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
+ action='append',
+ help="Only include results where this field is this value. May include "
+ "comma-separated options.")
+ parser.add_argument(
+ '--color',
+ choices=['never', 'always', 'auto'],
+ default='auto',
+ help="When to use terminal colors. Defaults to 'auto'.")
+ parser.add_argument(
+ '-⣿', '--braille',
+ action='store_true',
+ help="Use 2x4 unicode braille characters. Note that braille characters "
+ "sometimes suffer from inconsistent widths.")
+ parser.add_argument(
+ '-.', '--points',
+ action='store_true',
+ help="Only draw data points.")
+ parser.add_argument(
+ '-!', '--points-and-lines',
+ action='store_true',
+ help="Draw data points and lines.")
+ parser.add_argument(
+ '--colors',
+ type=lambda x: [x.strip() for x in x.split(',')],
+ help="Comma-separated colors to use.")
+ parser.add_argument(
+ '--chars',
+ help="Characters to use for points.")
+ parser.add_argument(
+ '--line-chars',
+ help="Characters to use for lines.")
+ parser.add_argument(
+ '-W', '--width',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Width in columns. 0 uses the terminal width. Defaults to "
+ "min(terminal, 80).")
+ parser.add_argument(
+ '-H', '--height',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Height in rows. 0 uses the terminal height. Defaults to 17.")
+ parser.add_argument(
+ '-X', '--xlim',
+ type=lambda x: tuple(
+ dat(x) if x.strip() else None
+ for x in x.split(',')),
+ help="Range for the x-axis.")
+ parser.add_argument(
+ '-Y', '--ylim',
+ type=lambda x: tuple(
+ dat(x) if x.strip() else None
+ for x in x.split(',')),
+ help="Range for the y-axis.")
+ parser.add_argument(
+ '--xlog',
+ action='store_true',
+ help="Use a logarithmic x-axis.")
+ parser.add_argument(
+ '--ylog',
+ action='store_true',
+ help="Use a logarithmic y-axis.")
+ parser.add_argument(
+ '--x2',
+ action='store_true',
+ help="Use base-2 prefixes for the x-axis.")
+ parser.add_argument(
+ '--y2',
+ action='store_true',
+ help="Use base-2 prefixes for the y-axis.")
+ parser.add_argument(
+ '--xunits',
+ help="Units for the x-axis.")
+ parser.add_argument(
+ '--yunits',
+ help="Units for the y-axis.")
+ parser.add_argument(
+ '--xlabel',
+ help="Add a label to the x-axis.")
+ parser.add_argument(
+ '--ylabel',
+ help="Add a label to the y-axis.")
+ parser.add_argument(
+ '--xticklabels',
+ type=lambda x:
+ [x.strip() for x in x.split(',')]
+ if x.strip() else [],
+ help="Comma separated xticklabels.")
+ parser.add_argument(
+ '--yticklabels',
+ type=lambda x:
+ [x.strip() for x in x.split(',')]
+ if x.strip() else [],
+ help="Comma separated yticklabels.")
+ parser.add_argument(
+ '-t', '--title',
+ help="Add a title.")
+ parser.add_argument(
+ '-l', '--legend-right',
+ action='store_true',
+ help="Place a legend to the right.")
+ parser.add_argument(
+ '--legend-above',
+ action='store_true',
+ help="Place a legend above.")
+ parser.add_argument(
+ '--legend-below',
+ action='store_true',
+ help="Place a legend below.")
+ class AppendSubplot(argparse.Action):
+ @staticmethod
+ def parse(value):
+ import copy
+ subparser = copy.deepcopy(parser)
+ next(a for a in subparser._actions
+ if '--width' in a.option_strings).type = float
+ next(a for a in subparser._actions
+ if '--height' in a.option_strings).type = float
+ return subparser.parse_intermixed_args(shlex.split(value or ""))
+ def __call__(self, parser, namespace, value, option):
+ if not hasattr(namespace, 'subplots'):
+ namespace.subplots = []
+ namespace.subplots.append((
+ option.split('-')[-1],
+ self.__class__.parse(value)))
+ parser.add_argument(
+ '--subplot-above',
+ action=AppendSubplot,
+ help="Add subplot above with the same dataset. Takes an arg string to "
+ "control the subplot which supports most (but not all) of the "
+ "parameters listed here. The relative dimensions of the subplot "
+ "can be controlled with -W/-H which now take a percentage.")
+ parser.add_argument(
+ '--subplot-below',
+ action=AppendSubplot,
+ help="Add subplot below with the same dataset.")
+ parser.add_argument(
+ '--subplot-left',
+ action=AppendSubplot,
+ help="Add subplot left with the same dataset.")
+ parser.add_argument(
+ '--subplot-right',
+ action=AppendSubplot,
+ help="Add subplot right with the same dataset.")
+ parser.add_argument(
+ '--subplot',
+ type=AppendSubplot.parse,
+ help="Add subplot-specific arguments to the main plot.")
+ parser.add_argument(
+ '-z', '--cat',
+ action='store_true',
+ help="Pipe directly to stdout.")
+ parser.add_argument(
+ '-k', '--keep-open',
+ action='store_true',
+ help="Continue to open and redraw the CSV files in a loop.")
+ parser.add_argument(
+ '-s', '--sleep',
+ type=float,
+ help="Time in seconds to sleep between redraws when running with -k. "
+ "Defaults to 0.01.")
+
+ def dictify(ns):
+ if hasattr(ns, 'subplots'):
+ ns.subplots = [(dir, dictify(subplot_ns))
+ for dir, subplot_ns in ns.subplots]
+ if ns.subplot is not None:
+ ns.subplot = dictify(ns.subplot)
+ return {k: v
+ for k, v in vars(ns).items()
+ if v is not None}
+
+ sys.exit(main(**dictify(parser.parse_intermixed_args())))
diff --git a/scripts/plotmpl.py b/scripts/plotmpl.py
new file mode 100755
index 00000000..1bc1158b
--- /dev/null
+++ b/scripts/plotmpl.py
@@ -0,0 +1,1262 @@
+#!/usr/bin/env python3
+#
+# Plot CSV files with matplotlib.
+#
+# Example:
+# ./scripts/plotmpl.py bench.csv -xSIZE -ybench_read -obench.svg
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import codecs
+import collections as co
+import csv
+import io
+import itertools as it
+import logging
+import math as m
+import numpy as np
+import os
+import shlex
+import shutil
+import time
+
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+
+# some nicer colors borrowed from Seaborn
+# note these include a non-opaque alpha
+COLORS = [
+ '#4c72b0bf', # blue
+ '#dd8452bf', # orange
+ '#55a868bf', # green
+ '#c44e52bf', # red
+ '#8172b3bf', # purple
+ '#937860bf', # brown
+ '#da8bc3bf', # pink
+ '#8c8c8cbf', # gray
+ '#ccb974bf', # yellow
+ '#64b5cdbf', # cyan
+]
+COLORS_DARK = [
+ '#a1c9f4bf', # blue
+ '#ffb482bf', # orange
+ '#8de5a1bf', # green
+ '#ff9f9bbf', # red
+ '#d0bbffbf', # purple
+ '#debb9bbf', # brown
+ '#fab0e4bf', # pink
+ '#cfcfcfbf', # gray
+ '#fffea3bf', # yellow
+ '#b9f2f0bf', # cyan
+]
+ALPHAS = [0.75]
+FORMATS = ['-']
+FORMATS_POINTS = ['.']
+FORMATS_POINTS_AND_LINES = ['.-']
+
+WIDTH = 750
+HEIGHT = 350
+FONT_SIZE = 11
+
+SI_PREFIXES = {
+ 18: 'E',
+ 15: 'P',
+ 12: 'T',
+ 9: 'G',
+ 6: 'M',
+ 3: 'K',
+ 0: '',
+ -3: 'm',
+ -6: 'u',
+ -9: 'n',
+ -12: 'p',
+ -15: 'f',
+ -18: 'a',
+}
+
+SI2_PREFIXES = {
+ 60: 'Ei',
+ 50: 'Pi',
+ 40: 'Ti',
+ 30: 'Gi',
+ 20: 'Mi',
+ 10: 'Ki',
+ 0: '',
+ -10: 'mi',
+ -20: 'ui',
+ -30: 'ni',
+ -40: 'pi',
+ -50: 'fi',
+ -60: 'ai',
+}
+
+
+# formatter for matplotlib
+def si(x):
+ if x == 0:
+ return '0'
+ # figure out prefix and scale
+ p = 3*int(m.log(abs(x), 10**3))
+ p = min(18, max(-18, p))
+ # format with 3 digits of precision
+ s = '%.3f' % (abs(x) / (10.0**p))
+ s = s[:3+1]
+ # truncate but only digits that follow the dot
+ if '.' in s:
+ s = s.rstrip('0')
+ s = s.rstrip('.')
+ return '%s%s%s' % ('-' if x < 0 else '', s, SI_PREFIXES[p])
+
+# formatter for matplotlib
+def si2(x):
+ if x == 0:
+ return '0'
+ # figure out prefix and scale
+ p = 10*int(m.log(abs(x), 2**10))
+ p = min(30, max(-30, p))
+ # format with 3 digits of precision
+ s = '%.3f' % (abs(x) / (2.0**p))
+ s = s[:3+1]
+ # truncate but only digits that follow the dot
+ if '.' in s:
+ s = s.rstrip('0')
+ s = s.rstrip('.')
+ return '%s%s%s' % ('-' if x < 0 else '', s, SI2_PREFIXES[p])
+
+# parse escape strings
+def escape(s):
+ return codecs.escape_decode(s.encode('utf8'))[0].decode('utf8')
+
+# we want to use MaxNLocator, but since MaxNLocator forces multiples of 10
+# to be an option, we can't really...
+class AutoMultipleLocator(mpl.ticker.MultipleLocator):
+ def __init__(self, base, nbins=None):
+ # note base needs to be floats to avoid integer pow issues
+ self.base = float(base)
+ self.nbins = nbins
+ super().__init__(self.base)
+
+ def __call__(self):
+ # find best tick count, conveniently matplotlib has a function for this
+ vmin, vmax = self.axis.get_view_interval()
+ vmin, vmax = mpl.transforms.nonsingular(vmin, vmax, 1e-12, 1e-13)
+ if self.nbins is not None:
+ nbins = self.nbins
+ else:
+ nbins = np.clip(self.axis.get_tick_space(), 1, 9)
+
+ # find the best power, use this as our locator's actual base
+ scale = self.base ** (m.ceil(m.log((vmax-vmin) / (nbins+1), self.base)))
+ self.set_params(scale)
+
+ return super().__call__()
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+
+# parse different data representations
+def dat(x):
+ # allow the first part of an a/b fraction
+ if '/' in x:
+ x, _ = x.split('/', 1)
+
+ # first try as int
+ try:
+ return int(x, 0)
+ except ValueError:
+ pass
+
+ # then try as float
+ try:
+ return float(x)
+ # just don't allow infinity or nan
+ if m.isinf(x) or m.isnan(x):
+ raise ValueError("invalid dat %r" % x)
+ except ValueError:
+ pass
+
+ # else give up
+ raise ValueError("invalid dat %r" % x)
+
+def collect(csv_paths, renames=[]):
+ # collect results from CSV files
+ results = []
+ for path in csv_paths:
+ try:
+ with openio(path) as f:
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ results.append(r)
+ except FileNotFoundError:
+ pass
+
+ if renames:
+ for r in results:
+ # make a copy so renames can overlap
+ r_ = {}
+ for new_k, old_k in renames:
+ if old_k in r:
+ r_[new_k] = r[old_k]
+ r.update(r_)
+
+ return results
+
+def dataset(results, x=None, y=None, define=[]):
+ # organize by 'by', x, and y
+ dataset = {}
+ i = 0
+ for r in results:
+ # filter results by matching defines
+ if not all(k in r and r[k] in vs for k, vs in define):
+ continue
+
+ # find xs
+ if x is not None:
+ if x not in r:
+ continue
+ try:
+ x_ = dat(r[x])
+ except ValueError:
+ continue
+ else:
+ x_ = i
+ i += 1
+
+ # find ys
+ if y is not None:
+ if y not in r:
+ continue
+ try:
+ y_ = dat(r[y])
+ except ValueError:
+ continue
+ else:
+ y_ = None
+
+ if y_ is not None:
+ dataset[x_] = y_ + dataset.get(x_, 0)
+ else:
+ dataset[x_] = y_ or dataset.get(x_, None)
+
+ return dataset
+
+def datasets(results, by=None, x=None, y=None, define=[]):
+ # filter results by matching defines
+ results_ = []
+ for r in results:
+ if all(k in r and r[k] in vs for k, vs in define):
+ results_.append(r)
+ results = results_
+
+ # if y not specified, try to guess from data
+ if y is None:
+ y = co.OrderedDict()
+ for r in results:
+ for k, v in r.items():
+ if (by is None or k not in by) and v.strip():
+ try:
+ dat(v)
+ y[k] = True
+ except ValueError:
+ y[k] = False
+ y = list(k for k,v in y.items() if v)
+
+ if by is not None:
+ # find all 'by' values
+ ks = set()
+ for r in results:
+ ks.add(tuple(r.get(k, '') for k in by))
+ ks = sorted(ks)
+
+ # collect all datasets
+ datasets = co.OrderedDict()
+ for ks_ in (ks if by is not None else [()]):
+ for x_ in (x if x is not None else [None]):
+ for y_ in y:
+ # hide x/y if there is only one field
+ k_x = x_ if len(x or []) > 1 else ''
+ k_y = y_ if len(y or []) > 1 or (not ks_ and not k_x) else ''
+
+ datasets[ks_ + (k_x, k_y)] = dataset(
+ results,
+ x_,
+ y_,
+ [(by_, {k_}) for by_, k_ in zip(by, ks_)]
+ if by is not None else [])
+
+ return datasets
+
+
+# some classes for organizing subplots into a grid
+class Subplot:
+ def __init__(self, **args):
+ self.x = 0
+ self.y = 0
+ self.xspan = 1
+ self.yspan = 1
+ self.args = args
+
+class Grid:
+ def __init__(self, subplot, width=1.0, height=1.0):
+ self.xweights = [width]
+ self.yweights = [height]
+ self.map = {(0,0): subplot}
+ self.subplots = [subplot]
+
+ def __repr__(self):
+ return 'Grid(%r, %r)' % (self.xweights, self.yweights)
+
+ @property
+ def width(self):
+ return len(self.xweights)
+
+ @property
+ def height(self):
+ return len(self.yweights)
+
+ def __iter__(self):
+ return iter(self.subplots)
+
+ def __getitem__(self, i):
+ x, y = i
+ if x < 0:
+ x += len(self.xweights)
+ if y < 0:
+ y += len(self.yweights)
+
+ return self.map[(x,y)]
+
+ def merge(self, other, dir):
+ if dir in ['above', 'below']:
+ # first scale the two grids so they line up
+ self_xweights = self.xweights
+ other_xweights = other.xweights
+ self_w = sum(self_xweights)
+ other_w = sum(other_xweights)
+ ratio = self_w / other_w
+ other_xweights = [s*ratio for s in other_xweights]
+
+ # now interleave xweights as needed
+ new_xweights = []
+ self_map = {}
+ other_map = {}
+ self_i = 0
+ other_i = 0
+ self_xweight = (self_xweights[self_i]
+ if self_i < len(self_xweights) else m.inf)
+ other_xweight = (other_xweights[other_i]
+ if other_i < len(other_xweights) else m.inf)
+ while self_i < len(self_xweights) and other_i < len(other_xweights):
+ if other_xweight - self_xweight > 0.0000001:
+ new_xweights.append(self_xweight)
+ other_xweight -= self_xweight
+
+ new_i = len(new_xweights)-1
+ for j in range(len(self.yweights)):
+ self_map[(new_i, j)] = self.map[(self_i, j)]
+ for j in range(len(other.yweights)):
+ other_map[(new_i, j)] = other.map[(other_i, j)]
+ for s in other.subplots:
+ if s.x+s.xspan-1 == new_i:
+ s.xspan += 1
+ elif s.x > new_i:
+ s.x += 1
+
+ self_i += 1
+ self_xweight = (self_xweights[self_i]
+ if self_i < len(self_xweights) else m.inf)
+ elif self_xweight - other_xweight > 0.0000001:
+ new_xweights.append(other_xweight)
+ self_xweight -= other_xweight
+
+ new_i = len(new_xweights)-1
+ for j in range(len(other.yweights)):
+ other_map[(new_i, j)] = other.map[(other_i, j)]
+ for j in range(len(self.yweights)):
+ self_map[(new_i, j)] = self.map[(self_i, j)]
+ for s in self.subplots:
+ if s.x+s.xspan-1 == new_i:
+ s.xspan += 1
+ elif s.x > new_i:
+ s.x += 1
+
+ other_i += 1
+ other_xweight = (other_xweights[other_i]
+ if other_i < len(other_xweights) else m.inf)
+ else:
+ new_xweights.append(self_xweight)
+
+ new_i = len(new_xweights)-1
+ for j in range(len(self.yweights)):
+ self_map[(new_i, j)] = self.map[(self_i, j)]
+ for j in range(len(other.yweights)):
+ other_map[(new_i, j)] = other.map[(other_i, j)]
+
+ self_i += 1
+ self_xweight = (self_xweights[self_i]
+ if self_i < len(self_xweights) else m.inf)
+ other_i += 1
+ other_xweight = (other_xweights[other_i]
+ if other_i < len(other_xweights) else m.inf)
+
+ # squish so ratios are preserved
+ self_h = sum(self.yweights)
+ other_h = sum(other.yweights)
+ ratio = (self_h-other_h) / self_h
+ self_yweights = [s*ratio for s in self.yweights]
+
+ # finally concatenate the two grids
+ if dir == 'above':
+ for s in other.subplots:
+ s.y += len(self_yweights)
+ self.subplots.extend(other.subplots)
+
+ self.xweights = new_xweights
+ self.yweights = self_yweights + other.yweights
+ self.map = self_map | {(x, y+len(self_yweights)): s
+ for (x, y), s in other_map.items()}
+ else:
+ for s in self.subplots:
+ s.y += len(other.yweights)
+ self.subplots.extend(other.subplots)
+
+ self.xweights = new_xweights
+ self.yweights = other.yweights + self_yweights
+ self.map = other_map | {(x, y+len(other.yweights)): s
+ for (x, y), s in self_map.items()}
+
+ if dir in ['right', 'left']:
+ # first scale the two grids so they line up
+ self_yweights = self.yweights
+ other_yweights = other.yweights
+ self_h = sum(self_yweights)
+ other_h = sum(other_yweights)
+ ratio = self_h / other_h
+ other_yweights = [s*ratio for s in other_yweights]
+
+ # now interleave yweights as needed
+ new_yweights = []
+ self_map = {}
+ other_map = {}
+ self_i = 0
+ other_i = 0
+ self_yweight = (self_yweights[self_i]
+ if self_i < len(self_yweights) else m.inf)
+ other_yweight = (other_yweights[other_i]
+ if other_i < len(other_yweights) else m.inf)
+ while self_i < len(self_yweights) and other_i < len(other_yweights):
+ if other_yweight - self_yweight > 0.0000001:
+ new_yweights.append(self_yweight)
+ other_yweight -= self_yweight
+
+ new_i = len(new_yweights)-1
+ for j in range(len(self.xweights)):
+ self_map[(j, new_i)] = self.map[(j, self_i)]
+ for j in range(len(other.xweights)):
+ other_map[(j, new_i)] = other.map[(j, other_i)]
+ for s in other.subplots:
+ if s.y+s.yspan-1 == new_i:
+ s.yspan += 1
+ elif s.y > new_i:
+ s.y += 1
+
+ self_i += 1
+ self_yweight = (self_yweights[self_i]
+ if self_i < len(self_yweights) else m.inf)
+ elif self_yweight - other_yweight > 0.0000001:
+ new_yweights.append(other_yweight)
+ self_yweight -= other_yweight
+
+ new_i = len(new_yweights)-1
+ for j in range(len(other.xweights)):
+ other_map[(j, new_i)] = other.map[(j, other_i)]
+ for j in range(len(self.xweights)):
+ self_map[(j, new_i)] = self.map[(j, self_i)]
+ for s in self.subplots:
+ if s.y+s.yspan-1 == new_i:
+ s.yspan += 1
+ elif s.y > new_i:
+ s.y += 1
+
+ other_i += 1
+ other_yweight = (other_yweights[other_i]
+ if other_i < len(other_yweights) else m.inf)
+ else:
+ new_yweights.append(self_yweight)
+
+ new_i = len(new_yweights)-1
+ for j in range(len(self.xweights)):
+ self_map[(j, new_i)] = self.map[(j, self_i)]
+ for j in range(len(other.xweights)):
+ other_map[(j, new_i)] = other.map[(j, other_i)]
+
+ self_i += 1
+ self_yweight = (self_yweights[self_i]
+ if self_i < len(self_yweights) else m.inf)
+ other_i += 1
+ other_yweight = (other_yweights[other_i]
+ if other_i < len(other_yweights) else m.inf)
+
+ # squish so ratios are preserved
+ self_w = sum(self.xweights)
+ other_w = sum(other.xweights)
+ ratio = (self_w-other_w) / self_w
+ self_xweights = [s*ratio for s in self.xweights]
+
+ # finally concatenate the two grids
+ if dir == 'right':
+ for s in other.subplots:
+ s.x += len(self_xweights)
+ self.subplots.extend(other.subplots)
+
+ self.xweights = self_xweights + other.xweights
+ self.yweights = new_yweights
+ self.map = self_map | {(x+len(self_xweights), y): s
+ for (x, y), s in other_map.items()}
+ else:
+ for s in self.subplots:
+ s.x += len(other.xweights)
+ self.subplots.extend(other.subplots)
+
+ self.xweights = other.xweights + self_xweights
+ self.yweights = new_yweights
+ self.map = other_map | {(x+len(other.xweights), y): s
+ for (x, y), s in self_map.items()}
+
+
+ def scale(self, width, height):
+ self.xweights = [s*width for s in self.xweights]
+ self.yweights = [s*height for s in self.yweights]
+
+ @classmethod
+ def fromargs(cls, width=1.0, height=1.0, *,
+ subplots=[],
+ **args):
+ grid = cls(Subplot(**args))
+
+ for dir, subargs in subplots:
+ subgrid = cls.fromargs(
+ width=subargs.pop('width',
+ 0.5 if dir in ['right', 'left'] else width),
+ height=subargs.pop('height',
+ 0.5 if dir in ['above', 'below'] else height),
+ **subargs)
+ grid.merge(subgrid, dir)
+
+ grid.scale(width, height)
+ return grid
+
+
+def main(csv_paths, output, *,
+ svg=False,
+ png=False,
+ quiet=False,
+ by=None,
+ x=None,
+ y=None,
+ define=[],
+ points=False,
+ points_and_lines=False,
+ colors=None,
+ formats=None,
+ width=WIDTH,
+ height=HEIGHT,
+ xlim=(None,None),
+ ylim=(None,None),
+ xlog=False,
+ ylog=False,
+ x2=False,
+ y2=False,
+ xticks=None,
+ yticks=None,
+ xunits=None,
+ yunits=None,
+ xlabel=None,
+ ylabel=None,
+ xticklabels=None,
+ yticklabels=None,
+ title=None,
+ legend_right=False,
+ legend_above=False,
+ legend_below=False,
+ dark=False,
+ ggplot=False,
+ xkcd=False,
+ github=False,
+ font=None,
+ font_size=FONT_SIZE,
+ font_color=None,
+ foreground=None,
+ background=None,
+ subplot={},
+ subplots=[],
+ **args):
+ # guess the output format
+ if not png and not svg:
+ if output.endswith('.png'):
+ png = True
+ else:
+ svg = True
+
+ # some shortcuts for color schemes
+ if github:
+ ggplot = True
+ if font_color is None:
+ if dark:
+ font_color = '#c9d1d9'
+ else:
+ font_color = '#24292f'
+ if foreground is None:
+ if dark:
+ foreground = '#343942'
+ else:
+ foreground = '#eff1f3'
+ if background is None:
+ if dark:
+ background = '#0d1117'
+ else:
+ background = '#ffffff'
+
+ # what colors/alphas/formats to use?
+ if colors is not None:
+ colors_ = colors
+ elif dark:
+ colors_ = COLORS_DARK
+ else:
+ colors_ = COLORS
+
+ if formats is not None:
+ formats_ = formats
+ elif points_and_lines:
+ formats_ = FORMATS_POINTS_AND_LINES
+ elif points:
+ formats_ = FORMATS_POINTS
+ else:
+ formats_ = FORMATS
+
+ if font_color is not None:
+ font_color_ = font_color
+ elif dark:
+ font_color_ = '#ffffff'
+ else:
+ font_color_ = '#000000'
+
+ if foreground is not None:
+ foreground_ = foreground
+ elif dark:
+ foreground_ = '#333333'
+ else:
+ foreground_ = '#e5e5e5'
+
+ if background is not None:
+ background_ = background
+ elif dark:
+ background_ = '#000000'
+ else:
+ background_ = '#ffffff'
+
+ # configure some matplotlib settings
+ if xkcd:
+ # the font search here prints a bunch of unhelpful warnings
+ logging.getLogger('matplotlib.font_manager').setLevel(logging.ERROR)
+ plt.xkcd()
+ # turn off the white outline, this breaks some things
+ plt.rc('path', effects=[])
+ if ggplot:
+ plt.style.use('ggplot')
+ plt.rc('patch', linewidth=0)
+ plt.rc('axes', facecolor=foreground_, edgecolor=background_)
+ plt.rc('grid', color=background_)
+ # fix the the gridlines when ggplot+xkcd
+ if xkcd:
+ plt.rc('grid', linewidth=1)
+ plt.rc('axes.spines', bottom=False, left=False)
+ if dark:
+ plt.style.use('dark_background')
+ plt.rc('savefig', facecolor='auto', edgecolor='auto')
+ # fix ggplot when dark
+ if ggplot:
+ plt.rc('axes',
+ facecolor=foreground_,
+ edgecolor=background_)
+ plt.rc('grid', color=background_)
+
+ if font is not None:
+ plt.rc('font', family=font)
+ plt.rc('font', size=font_size)
+ plt.rc('text', color=font_color_)
+ plt.rc('figure',
+ titlesize='medium',
+ labelsize='small')
+ plt.rc('axes',
+ titlesize='small',
+ labelsize='small',
+ labelcolor=font_color_)
+ if not ggplot:
+ plt.rc('axes', edgecolor=font_color_)
+ plt.rc('xtick', labelsize='small', color=font_color_)
+ plt.rc('ytick', labelsize='small', color=font_color_)
+ plt.rc('legend',
+ fontsize='small',
+ fancybox=False,
+ framealpha=None,
+ edgecolor=foreground_,
+ borderaxespad=0)
+ plt.rc('axes.spines', top=False, right=False)
+
+ plt.rc('figure', facecolor=background_, edgecolor=background_)
+ if not ggplot:
+ plt.rc('axes', facecolor='#00000000')
+
+ # I think the svg backend just ignores DPI, but seems to use something
+ # equivalent to 96, maybe this is the default for SVG rendering?
+ plt.rc('figure', dpi=96)
+
+ # separate out renames
+ renames = list(it.chain.from_iterable(
+ ((k, v) for v in vs)
+ for k, vs in it.chain(by or [], x or [], y or [])))
+ if by is not None:
+ by = [k for k, _ in by]
+ if x is not None:
+ x = [k for k, _ in x]
+ if y is not None:
+ y = [k for k, _ in y]
+
+ # first collect results from CSV files
+ results = collect(csv_paths, renames)
+
+ # then extract the requested datasets
+ datasets_ = datasets(results, by, x, y, define)
+
+ # figure out formats/colors here so that subplot defines
+ # don't change them later, that'd be bad
+ dataformats_ = {
+ name: formats_[i % len(formats_)]
+ for i, name in enumerate(datasets_.keys())}
+ datacolors_ = {
+ name: colors_[i % len(colors_)]
+ for i, name in enumerate(datasets_.keys())}
+
+ # create a grid of subplots
+ grid = Grid.fromargs(
+ subplots=subplots + subplot.pop('subplots', []),
+ **subplot)
+
+ # create a matplotlib plot
+ fig = plt.figure(figsize=(
+ width/plt.rcParams['figure.dpi'],
+ height/plt.rcParams['figure.dpi']),
+ layout='constrained',
+ # we need a linewidth to keep xkcd mode happy
+ linewidth=8 if xkcd else 0)
+
+ gs = fig.add_gridspec(
+ grid.height
+ + (1 if legend_above else 0)
+ + (1 if legend_below else 0),
+ grid.width
+ + (1 if legend_right else 0),
+ height_ratios=([0.001] if legend_above else [])
+ + [max(s, 0.01) for s in reversed(grid.yweights)]
+ + ([0.001] if legend_below else []),
+ width_ratios=[max(s, 0.01) for s in grid.xweights]
+ + ([0.001] if legend_right else []))
+
+ # first create axes so that plots can interact with each other
+ for s in grid:
+ s.ax = fig.add_subplot(gs[
+ grid.height-(s.y+s.yspan) + (1 if legend_above else 0)
+ : grid.height-s.y + (1 if legend_above else 0),
+ s.x
+ : s.x+s.xspan])
+
+ # now plot each subplot
+ for s in grid:
+ # allow subplot params to override global params
+ define_ = define + s.args.get('define', [])
+ xlim_ = s.args.get('xlim', xlim)
+ ylim_ = s.args.get('ylim', ylim)
+ xlog_ = s.args.get('xlog', False) or xlog
+ ylog_ = s.args.get('ylog', False) or ylog
+ x2_ = s.args.get('x2', False) or x2
+ y2_ = s.args.get('y2', False) or y2
+ xticks_ = s.args.get('xticks', xticks)
+ yticks_ = s.args.get('yticks', yticks)
+ xunits_ = s.args.get('xunits', xunits)
+ yunits_ = s.args.get('yunits', yunits)
+ xticklabels_ = s.args.get('xticklabels', xticklabels)
+ yticklabels_ = s.args.get('yticklabels', yticklabels)
+
+ # label/titles are handled a bit differently in subplots
+ subtitle = s.args.get('title')
+ xsublabel = s.args.get('xlabel')
+ ysublabel = s.args.get('ylabel')
+
+ # allow shortened ranges
+ if len(xlim_) == 1:
+ xlim_ = (0, xlim_[0])
+ if len(ylim_) == 1:
+ ylim_ = (0, ylim_[0])
+
+ # data can be constrained by subplot-specific defines,
+ # so re-extract for each plot
+ subdatasets = datasets(results, by, x, y, define_)
+
+ # plot!
+ ax = s.ax
+ for name, dataset in subdatasets.items():
+ dats = sorted((x,y) for x,y in dataset.items())
+ ax.plot([x for x,_ in dats], [y for _,y in dats],
+ dataformats_[name],
+ color=datacolors_[name],
+ label=','.join(k for k in name if k))
+
+ # axes scaling
+ if xlog_:
+ ax.set_xscale('symlog')
+ ax.xaxis.set_minor_locator(mpl.ticker.NullLocator())
+ if ylog_:
+ ax.set_yscale('symlog')
+ ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())
+ # axes limits
+ ax.set_xlim(
+ xlim_[0] if xlim_[0] is not None
+ else min(it.chain([0], (k
+ for r in subdatasets.values()
+ for k, v in r.items()
+ if v is not None))),
+ xlim_[1] if xlim_[1] is not None
+ else max(it.chain([0], (k
+ for r in subdatasets.values()
+ for k, v in r.items()
+ if v is not None))))
+ ax.set_ylim(
+ ylim_[0] if ylim_[0] is not None
+ else min(it.chain([0], (v
+ for r in subdatasets.values()
+ for _, v in r.items()
+ if v is not None))),
+ ylim_[1] if ylim_[1] is not None
+ else max(it.chain([0], (v
+ for r in subdatasets.values()
+ for _, v in r.items()
+ if v is not None))))
+ # axes ticks
+ if x2_:
+ ax.xaxis.set_major_formatter(lambda x, pos:
+ si2(x)+(xunits_ if xunits_ else ''))
+ if xticklabels_ is not None:
+ ax.xaxis.set_ticklabels(xticklabels_)
+ if xticks_ is None:
+ ax.xaxis.set_major_locator(AutoMultipleLocator(2))
+ elif isinstance(xticks_, list):
+ ax.xaxis.set_major_locator(mpl.ticker.FixedLocator(xticks_))
+ elif xticks_ != 0:
+ ax.xaxis.set_major_locator(AutoMultipleLocator(2, xticks_-1))
+ else:
+ ax.xaxis.set_major_locator(mpl.ticker.NullLocator())
+ else:
+ ax.xaxis.set_major_formatter(lambda x, pos:
+ si(x)+(xunits_ if xunits_ else ''))
+ if xticklabels_ is not None:
+ ax.xaxis.set_ticklabels(xticklabels_)
+ if xticks_ is None:
+ ax.xaxis.set_major_locator(mpl.ticker.AutoLocator())
+ elif isinstance(xticks_, list):
+ ax.xaxis.set_major_locator(mpl.ticker.FixedLocator(xticks_))
+ elif xticks_ != 0:
+ ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(xticks_-1))
+ else:
+ ax.xaxis.set_major_locator(mpl.ticker.NullLocator())
+ if y2_:
+ ax.yaxis.set_major_formatter(lambda x, pos:
+ si2(x)+(yunits_ if yunits_ else ''))
+ if yticklabels_ is not None:
+ ax.yaxis.set_ticklabels(yticklabels_)
+ if yticks_ is None:
+ ax.yaxis.set_major_locator(AutoMultipleLocator(2))
+ elif isinstance(yticks_, list):
+ ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(yticks_))
+ elif yticks_ != 0:
+ ax.yaxis.set_major_locator(AutoMultipleLocator(2, yticks_-1))
+ else:
+ ax.yaxis.set_major_locator(mpl.ticker.NullLocator())
+ else:
+ ax.yaxis.set_major_formatter(lambda x, pos:
+ si(x)+(yunits_ if yunits_ else ''))
+ if yticklabels_ is not None:
+ ax.yaxis.set_ticklabels(yticklabels_)
+ if yticks_ is None:
+ ax.yaxis.set_major_locator(mpl.ticker.AutoLocator())
+ elif isinstance(yticks_, list):
+ ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(yticks_))
+ elif yticks_ != 0:
+ ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(yticks_-1))
+ else:
+ ax.yaxis.set_major_locator(mpl.ticker.NullLocator())
+ if ggplot:
+ ax.grid(sketch_params=None)
+
+ # axes subplot labels
+ if xsublabel is not None:
+ ax.set_xlabel(escape(xsublabel))
+ if ysublabel is not None:
+ ax.set_ylabel(escape(ysublabel))
+ if subtitle is not None:
+ ax.set_title(escape(subtitle))
+
+ # add a legend? a bit tricky with matplotlib
+ #
+ # the best solution I've found is a dedicated, invisible axes for the
+ # legend, hacky, but it works.
+ #
+ # note this was written before constrained_layout supported legend
+ # collisions, hopefully this is added in the future
+ labels = co.OrderedDict()
+ for s in grid:
+ for h, l in zip(*s.ax.get_legend_handles_labels()):
+ labels[l] = h
+
+ if legend_right:
+ ax = fig.add_subplot(gs[(1 if legend_above else 0):,-1])
+ ax.set_axis_off()
+ ax.legend(
+ labels.values(),
+ labels.keys(),
+ loc='upper left',
+ fancybox=False,
+ borderaxespad=0)
+
+ if legend_above:
+ ax = fig.add_subplot(gs[0, :grid.width])
+ ax.set_axis_off()
+
+ # try different column counts until we fit in the axes
+ for ncol in reversed(range(1, len(labels)+1)):
+ legend_ = ax.legend(
+ labels.values(),
+ labels.keys(),
+ loc='upper center',
+ ncol=ncol,
+ fancybox=False,
+ borderaxespad=0)
+
+ if (legend_.get_window_extent().width
+ <= ax.get_window_extent().width):
+ break
+
+ if legend_below:
+ ax = fig.add_subplot(gs[-1, :grid.width])
+ ax.set_axis_off()
+
+ # big hack to get xlabel above the legend! but hey this
+ # works really well actually
+ if xlabel:
+ ax.set_title(escape(xlabel),
+ size=plt.rcParams['axes.labelsize'],
+ weight=plt.rcParams['axes.labelweight'])
+
+ # try different column counts until we fit in the axes
+ for ncol in reversed(range(1, len(labels)+1)):
+ legend_ = ax.legend(
+ labels.values(),
+ labels.keys(),
+ loc='upper center',
+ ncol=ncol,
+ fancybox=False,
+ borderaxespad=0)
+
+ if (legend_.get_window_extent().width
+ <= ax.get_window_extent().width):
+ break
+
+
+ # axes labels, NOTE we reposition these below
+ if xlabel is not None and not legend_below:
+ fig.supxlabel(escape(xlabel))
+ if ylabel is not None:
+ fig.supylabel(escape(ylabel))
+ if title is not None:
+ fig.suptitle(escape(title))
+
+ # precompute constrained layout and find midpoints to adjust things
+ # that should be centered so they are actually centered
+ fig.canvas.draw()
+ xmid = (grid[0,0].ax.get_position().x0 + grid[-1,0].ax.get_position().x1)/2
+ ymid = (grid[0,0].ax.get_position().y0 + grid[0,-1].ax.get_position().y1)/2
+
+ if xlabel is not None and not legend_below:
+ fig.supxlabel(escape(xlabel), x=xmid)
+ if ylabel is not None:
+ fig.supylabel(escape(ylabel), y=ymid)
+ if title is not None:
+ fig.suptitle(escape(title), x=xmid)
+
+
+ # write the figure!
+ plt.savefig(output, format='png' if png else 'svg')
+
+ # some stats
+ if not quiet:
+ print('updated %s, %s datasets, %s points' % (
+ output,
+ len(datasets_),
+ sum(len(dataset) for dataset in datasets_.values())))
+
+
+if __name__ == "__main__":
+ import sys
+ import argparse
+ parser = argparse.ArgumentParser(
+ description="Plot CSV files with matplotlib.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'csv_paths',
+ nargs='*',
+ help="Input *.csv files.")
+ output_rule = parser.add_argument(
+ '-o', '--output',
+ required=True,
+ help="Output *.svg/*.png file.")
+ parser.add_argument(
+ '--svg',
+ action='store_true',
+ help="Output an svg file. By default this is infered.")
+ parser.add_argument(
+ '--png',
+ action='store_true',
+ help="Output a png file. By default this is infered.")
+ parser.add_argument(
+ '-q', '--quiet',
+ action='store_true',
+ help="Don't print info.")
+ parser.add_argument(
+ '-b', '--by',
+ action='append',
+ type=lambda x: (
+ lambda k,v=None: (k, v.split(',') if v is not None else ())
+ )(*x.split('=', 1)),
+ help="Group by this field. Can rename fields with new_name=old_name.")
+ parser.add_argument(
+ '-x',
+ action='append',
+ type=lambda x: (
+ lambda k,v=None: (k, v.split(',') if v is not None else ())
+ )(*x.split('=', 1)),
+ help="Field to use for the x-axis. Can rename fields with "
+ "new_name=old_name.")
+ parser.add_argument(
+ '-y',
+ action='append',
+ type=lambda x: (
+ lambda k,v=None: (k, v.split(',') if v is not None else ())
+ )(*x.split('=', 1)),
+ help="Field to use for the y-axis. Can rename fields with "
+ "new_name=old_name.")
+ parser.add_argument(
+ '-D', '--define',
+ type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
+ action='append',
+ help="Only include results where this field is this value. May include "
+ "comma-separated options.")
+ parser.add_argument(
+ '-.', '--points',
+ action='store_true',
+ help="Only draw data points.")
+ parser.add_argument(
+ '-!', '--points-and-lines',
+ action='store_true',
+ help="Draw data points and lines.")
+ parser.add_argument(
+ '--colors',
+ type=lambda x: [x.strip() for x in x.split(',')],
+ help="Comma-separated hex colors to use.")
+ parser.add_argument(
+ '--formats',
+ type=lambda x: [x.strip().replace('0',',') for x in x.split(',')],
+ help="Comma-separated matplotlib formats to use. Allows '0' as an "
+ "alternative for ','.")
+ parser.add_argument(
+ '-W', '--width',
+ type=lambda x: int(x, 0),
+ help="Width in pixels. Defaults to %r." % WIDTH)
+ parser.add_argument(
+ '-H', '--height',
+ type=lambda x: int(x, 0),
+ help="Height in pixels. Defaults to %r." % HEIGHT)
+ parser.add_argument(
+ '-X', '--xlim',
+ type=lambda x: tuple(
+ dat(x) if x.strip() else None
+ for x in x.split(',')),
+ help="Range for the x-axis.")
+ parser.add_argument(
+ '-Y', '--ylim',
+ type=lambda x: tuple(
+ dat(x) if x.strip() else None
+ for x in x.split(',')),
+ help="Range for the y-axis.")
+ parser.add_argument(
+ '--xlog',
+ action='store_true',
+ help="Use a logarithmic x-axis.")
+ parser.add_argument(
+ '--ylog',
+ action='store_true',
+ help="Use a logarithmic y-axis.")
+ parser.add_argument(
+ '--x2',
+ action='store_true',
+ help="Use base-2 prefixes for the x-axis.")
+ parser.add_argument(
+ '--y2',
+ action='store_true',
+ help="Use base-2 prefixes for the y-axis.")
+ parser.add_argument(
+ '--xticks',
+ type=lambda x: int(x, 0) if ',' not in x
+ else [dat(x) for x in x.split(',')],
+ help="Ticks for the x-axis. This can be explicit comma-separated "
+ "ticks, the number of ticks, or 0 to disable.")
+ parser.add_argument(
+ '--yticks',
+ type=lambda x: int(x, 0) if ',' not in x
+ else [dat(x) for x in x.split(',')],
+ help="Ticks for the y-axis. This can be explicit comma-separated "
+ "ticks, the number of ticks, or 0 to disable.")
+ parser.add_argument(
+ '--xunits',
+ help="Units for the x-axis.")
+ parser.add_argument(
+ '--yunits',
+ help="Units for the y-axis.")
+ parser.add_argument(
+ '--xlabel',
+ help="Add a label to the x-axis.")
+ parser.add_argument(
+ '--ylabel',
+ help="Add a label to the y-axis.")
+ parser.add_argument(
+ '--xticklabels',
+ type=lambda x:
+ [x.strip() for x in x.split(',')]
+ if x.strip() else [],
+ help="Comma separated xticklabels.")
+ parser.add_argument(
+ '--yticklabels',
+ type=lambda x:
+ [x.strip() for x in x.split(',')]
+ if x.strip() else [],
+ help="Comma separated yticklabels.")
+ parser.add_argument(
+ '-t', '--title',
+ help="Add a title.")
+ parser.add_argument(
+ '-l', '--legend-right',
+ action='store_true',
+ help="Place a legend to the right.")
+ parser.add_argument(
+ '--legend-above',
+ action='store_true',
+ help="Place a legend above.")
+ parser.add_argument(
+ '--legend-below',
+ action='store_true',
+ help="Place a legend below.")
+ parser.add_argument(
+ '--dark',
+ action='store_true',
+ help="Use the dark style.")
+ parser.add_argument(
+ '--ggplot',
+ action='store_true',
+ help="Use the ggplot style.")
+ parser.add_argument(
+ '--xkcd',
+ action='store_true',
+ help="Use the xkcd style.")
+ parser.add_argument(
+ '--github',
+ action='store_true',
+ help="Use the ggplot style with GitHub colors.")
+ parser.add_argument(
+ '--font',
+ type=lambda x: [x.strip() for x in x.split(',')],
+ help="Font family for matplotlib.")
+ parser.add_argument(
+ '--font-size',
+ help="Font size for matplotlib. Defaults to %r." % FONT_SIZE)
+ parser.add_argument(
+ '--font-color',
+ help="Color for the font and other line elements.")
+ parser.add_argument(
+ '--foreground',
+ help="Foreground color to use.")
+ parser.add_argument(
+ '--background',
+ help="Background color to use.")
+ class AppendSubplot(argparse.Action):
+ @staticmethod
+ def parse(value):
+ import copy
+ subparser = copy.deepcopy(parser)
+ next(a for a in subparser._actions
+ if '--output' in a.option_strings).required = False
+ next(a for a in subparser._actions
+ if '--width' in a.option_strings).type = float
+ next(a for a in subparser._actions
+ if '--height' in a.option_strings).type = float
+ return subparser.parse_intermixed_args(shlex.split(value or ""))
+ def __call__(self, parser, namespace, value, option):
+ if not hasattr(namespace, 'subplots'):
+ namespace.subplots = []
+ namespace.subplots.append((
+ option.split('-')[-1],
+ self.__class__.parse(value)))
+ parser.add_argument(
+ '--subplot-above',
+ action=AppendSubplot,
+ help="Add subplot above with the same dataset. Takes an arg string to "
+ "control the subplot which supports most (but not all) of the "
+ "parameters listed here. The relative dimensions of the subplot "
+ "can be controlled with -W/-H which now take a percentage.")
+ parser.add_argument(
+ '--subplot-below',
+ action=AppendSubplot,
+ help="Add subplot below with the same dataset.")
+ parser.add_argument(
+ '--subplot-left',
+ action=AppendSubplot,
+ help="Add subplot left with the same dataset.")
+ parser.add_argument(
+ '--subplot-right',
+ action=AppendSubplot,
+ help="Add subplot right with the same dataset.")
+ parser.add_argument(
+ '--subplot',
+ type=AppendSubplot.parse,
+ help="Add subplot-specific arguments to the main plot.")
+
+ def dictify(ns):
+ if hasattr(ns, 'subplots'):
+ ns.subplots = [(dir, dictify(subplot_ns))
+ for dir, subplot_ns in ns.subplots]
+ if ns.subplot is not None:
+ ns.subplot = dictify(ns.subplot)
+ return {k: v
+ for k, v in vars(ns).items()
+ if v is not None}
+
+ sys.exit(main(**dictify(parser.parse_intermixed_args())))
diff --git a/scripts/prefix.py b/scripts/prefix.py
deleted file mode 100755
index 4c33ad48..00000000
--- a/scripts/prefix.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python2
-
-# This script replaces prefixes of files, and symbols in that file.
-# Useful for creating different versions of the codebase that don't
-# conflict at compile time.
-#
-# example:
-# $ ./scripts/prefix.py lfs2
-
-import os
-import os.path
-import re
-import glob
-import itertools
-import tempfile
-import shutil
-import subprocess
-
-DEFAULT_PREFIX = "lfs"
-
-def subn(from_prefix, to_prefix, name):
- name, count1 = re.subn('\\b'+from_prefix, to_prefix, name)
- name, count2 = re.subn('\\b'+from_prefix.upper(), to_prefix.upper(), name)
- name, count3 = re.subn('\\B-D'+from_prefix.upper(),
- '-D'+to_prefix.upper(), name)
- return name, count1+count2+count3
-
-def main(from_prefix, to_prefix=None, files=None):
- if not to_prefix:
- from_prefix, to_prefix = DEFAULT_PREFIX, from_prefix
-
- if not files:
- files = subprocess.check_output([
- 'git', 'ls-tree', '-r', '--name-only', 'HEAD']).split()
-
- for oldname in files:
- # Rename any matching file names
- newname, namecount = subn(from_prefix, to_prefix, oldname)
- if namecount:
- subprocess.check_call(['git', 'mv', oldname, newname])
-
- # Rename any prefixes in file
- count = 0
- with open(newname+'~', 'w') as tempf:
- with open(newname) as newf:
- for line in newf:
- line, n = subn(from_prefix, to_prefix, line)
- count += n
- tempf.write(line)
- shutil.copystat(newname, newname+'~')
- os.rename(newname+'~', newname)
- subprocess.check_call(['git', 'add', newname])
-
- # Summary
- print '%s: %d replacements' % (
- '%s -> %s' % (oldname, newname) if namecount else oldname,
- count)
-
-if __name__ == "__main__":
- import sys
- sys.exit(main(*sys.argv[1:]))
diff --git a/scripts/prettyasserts.py b/scripts/prettyasserts.py
new file mode 100755
index 00000000..3a62d360
--- /dev/null
+++ b/scripts/prettyasserts.py
@@ -0,0 +1,452 @@
+#!/usr/bin/env python3
+#
+# Preprocessor that makes asserts easier to debug.
+#
+# Example:
+# ./scripts/prettyasserts.py -p LFS_ASSERT lfs.c -o lfs.a.c
+#
+# Copyright (c) 2022, The littlefs authors.
+# Copyright (c) 2020, Arm Limited. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import re
+import sys
+
+# NOTE the use of macros here helps keep a consistent stack depth which
+# tools may rely on.
+#
+# If compilation errors are noisy consider using -ftrack-macro-expansion=0.
+#
+
+LIMIT = 16
+
+CMP = {
+ '==': 'eq',
+ '!=': 'ne',
+ '<=': 'le',
+ '>=': 'ge',
+ '<': 'lt',
+ '>': 'gt',
+}
+
+LEXEMES = {
+ 'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
+ 'assert': ['assert'],
+ 'arrow': ['=>'],
+ 'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
+ 'paren': ['\(', '\)'],
+ 'cmp': CMP.keys(),
+ 'logic': ['\&\&', '\|\|'],
+ 'sep': [':', ';', '\{', '\}', ','],
+ 'op': ['->'], # specifically ops that conflict with cmp
+}
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def write_header(f, limit=LIMIT):
+ f.writeln("// Generated by %s:" % sys.argv[0])
+ f.writeln("//")
+ f.writeln("// %s" % ' '.join(sys.argv))
+ f.writeln("//")
+ f.writeln()
+
+ f.writeln("#include ")
+ f.writeln("#include ")
+ f.writeln("#include ")
+ f.writeln("#include ")
+ f.writeln("#include ")
+ f.writeln("#include ")
+ # give source a chance to define feature macros
+ f.writeln("#undef _FEATURES_H")
+ f.writeln()
+
+ # write print macros
+ f.writeln("__attribute__((unused))")
+ f.writeln("static void __pretty_assert_print_bool(")
+ f.writeln(" const void *v, size_t size) {")
+ f.writeln(" (void)size;")
+ f.writeln(" printf(\"%s\", *(const bool*)v ? \"true\" : \"false\");")
+ f.writeln("}")
+ f.writeln()
+ f.writeln("__attribute__((unused))")
+ f.writeln("static void __pretty_assert_print_int(")
+ f.writeln(" const void *v, size_t size) {")
+ f.writeln(" (void)size;")
+ f.writeln(" printf(\"%\"PRIiMAX, *(const intmax_t*)v);")
+ f.writeln("}")
+ f.writeln()
+ f.writeln("__attribute__((unused))")
+ f.writeln("static void __pretty_assert_print_mem(")
+ f.writeln(" const void *v, size_t size) {")
+ f.writeln(" const uint8_t *v_ = v;")
+ f.writeln(" printf(\"\\\"\");")
+ f.writeln(" for (size_t i = 0; i < size && i < %d; i++) {" % limit)
+ f.writeln(" if (v_[i] >= ' ' && v_[i] <= '~') {")
+ f.writeln(" printf(\"%c\", v_[i]);")
+ f.writeln(" } else {")
+ f.writeln(" printf(\"\\\\x%02x\", v_[i]);")
+ f.writeln(" }")
+ f.writeln(" }")
+ f.writeln(" if (size > %d) {" % limit)
+ f.writeln(" printf(\"...\");")
+ f.writeln(" }")
+ f.writeln(" printf(\"\\\"\");")
+ f.writeln("}")
+ f.writeln()
+ f.writeln("__attribute__((unused))")
+ f.writeln("static void __pretty_assert_print_str(")
+ f.writeln(" const void *v, size_t size) {")
+ f.writeln(" __pretty_assert_print_mem(v, size);")
+ f.writeln("}")
+ f.writeln()
+ f.writeln("__attribute__((unused, noinline))")
+ f.writeln("static void __pretty_assert_fail(")
+ f.writeln(" const char *file, int line,")
+ f.writeln(" void (*type_print_cb)(const void*, size_t),")
+ f.writeln(" const char *cmp,")
+ f.writeln(" const void *lh, size_t lsize,")
+ f.writeln(" const void *rh, size_t rsize) {")
+ f.writeln(" printf(\"%s:%d:assert: assert failed with \", file, line);")
+ f.writeln(" type_print_cb(lh, lsize);")
+ f.writeln(" printf(\", expected %s \", cmp);")
+ f.writeln(" type_print_cb(rh, rsize);")
+ f.writeln(" printf(\"\\n\");")
+ f.writeln(" fflush(NULL);")
+ f.writeln(" raise(SIGABRT);")
+ f.writeln("}")
+ f.writeln()
+
+ # write assert macros
+ for op, cmp in sorted(CMP.items()):
+ f.writeln("#define __PRETTY_ASSERT_BOOL_%s(lh, rh) do { \\"
+ % cmp.upper())
+ f.writeln(" bool _lh = !!(lh); \\")
+ f.writeln(" bool _rh = !!(rh); \\")
+ f.writeln(" if (!(_lh %s _rh)) { \\" % op)
+ f.writeln(" __pretty_assert_fail( \\")
+ f.writeln(" __FILE__, __LINE__, \\")
+ f.writeln(" __pretty_assert_print_bool, \"%s\", \\"
+ % cmp)
+ f.writeln(" &_lh, 0, \\")
+ f.writeln(" &_rh, 0); \\")
+ f.writeln(" } \\")
+ f.writeln("} while (0)")
+ for op, cmp in sorted(CMP.items()):
+ f.writeln("#define __PRETTY_ASSERT_INT_%s(lh, rh) do { \\"
+ % cmp.upper())
+ f.writeln(" __typeof__(lh) _lh = lh; \\")
+ f.writeln(" __typeof__(lh) _rh = rh; \\")
+ f.writeln(" if (!(_lh %s _rh)) { \\" % op)
+ f.writeln(" __pretty_assert_fail( \\")
+ f.writeln(" __FILE__, __LINE__, \\")
+ f.writeln(" __pretty_assert_print_int, \"%s\", \\"
+ % cmp)
+ f.writeln(" &(intmax_t){_lh}, 0, \\")
+ f.writeln(" &(intmax_t){_rh}, 0); \\")
+ f.writeln(" } \\")
+ f.writeln("} while (0)")
+ for op, cmp in sorted(CMP.items()):
+ f.writeln("#define __PRETTY_ASSERT_MEM_%s(lh, rh, size) do { \\"
+ % cmp.upper())
+ f.writeln(" const void *_lh = lh; \\")
+ f.writeln(" const void *_rh = rh; \\")
+ f.writeln(" if (!(memcmp(_lh, _rh, size) %s 0)) { \\" % op)
+ f.writeln(" __pretty_assert_fail( \\")
+ f.writeln(" __FILE__, __LINE__, \\")
+ f.writeln(" __pretty_assert_print_mem, \"%s\", \\"
+ % cmp)
+ f.writeln(" _lh, size, \\")
+ f.writeln(" _rh, size); \\")
+ f.writeln(" } \\")
+ f.writeln("} while (0)")
+ for op, cmp in sorted(CMP.items()):
+ f.writeln("#define __PRETTY_ASSERT_STR_%s(lh, rh) do { \\"
+ % cmp.upper())
+ f.writeln(" const char *_lh = lh; \\")
+ f.writeln(" const char *_rh = rh; \\")
+ f.writeln(" if (!(strcmp(_lh, _rh) %s 0)) { \\" % op)
+ f.writeln(" __pretty_assert_fail( \\")
+ f.writeln(" __FILE__, __LINE__, \\")
+ f.writeln(" __pretty_assert_print_str, \"%s\", \\"
+ % cmp)
+ f.writeln(" _lh, strlen(_lh), \\")
+ f.writeln(" _rh, strlen(_rh)); \\")
+ f.writeln(" } \\")
+ f.writeln("} while (0)")
+ f.writeln()
+ f.writeln()
+
+def mkassert(type, cmp, lh, rh, size=None):
+ if size is not None:
+ return ("__PRETTY_ASSERT_%s_%s(%s, %s, %s)"
+ % (type.upper(), cmp.upper(), lh, rh, size))
+ else:
+ return ("__PRETTY_ASSERT_%s_%s(%s, %s)"
+ % (type.upper(), cmp.upper(), lh, rh))
+
+
+# simple recursive descent parser
+class ParseFailure(Exception):
+ def __init__(self, expected, found):
+ self.expected = expected
+ self.found = found
+
+ def __str__(self):
+ return "expected %r, found %s..." % (
+ self.expected, repr(self.found)[:70])
+
+class Parser:
+ def __init__(self, in_f, lexemes=LEXEMES):
+ p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
+ for n, l in lexemes.items())
+ p = re.compile(p, re.DOTALL)
+ data = in_f.read()
+ tokens = []
+ line = 1
+ col = 0
+ while True:
+ m = p.search(data)
+ if m:
+ if m.start() > 0:
+ tokens.append((None, data[:m.start()], line, col))
+ tokens.append((m.lastgroup, m.group(), line, col))
+ data = data[m.end():]
+ else:
+ tokens.append((None, data, line, col))
+ break
+ self.tokens = tokens
+ self.off = 0
+
+ def lookahead(self, *pattern):
+ if self.off < len(self.tokens):
+ token = self.tokens[self.off]
+ if token[0] in pattern or token[1] in pattern:
+ self.m = token[1]
+ return self.m
+ self.m = None
+ return self.m
+
+ def accept(self, *patterns):
+ m = self.lookahead(*patterns)
+ if m is not None:
+ self.off += 1
+ return m
+
+ def expect(self, *patterns):
+ m = self.accept(*patterns)
+ if not m:
+ raise ParseFailure(patterns, self.tokens[self.off:])
+ return m
+
+ def push(self):
+ return self.off
+
+ def pop(self, state):
+ self.off = state
+
+def p_assert(p):
+ state = p.push()
+
+ # assert(memcmp(a,b,size) cmp 0)?
+ try:
+ p.expect('assert') ; p.accept('ws')
+ p.expect('(') ; p.accept('ws')
+ p.expect('memcmp') ; p.accept('ws')
+ p.expect('(') ; p.accept('ws')
+ lh = p_expr(p) ; p.accept('ws')
+ p.expect(',') ; p.accept('ws')
+ rh = p_expr(p) ; p.accept('ws')
+ p.expect(',') ; p.accept('ws')
+ size = p_expr(p) ; p.accept('ws')
+ p.expect(')') ; p.accept('ws')
+ cmp = p.expect('cmp') ; p.accept('ws')
+ p.expect('0') ; p.accept('ws')
+ p.expect(')')
+ return mkassert('mem', CMP[cmp], lh, rh, size)
+ except ParseFailure:
+ p.pop(state)
+
+ # assert(strcmp(a,b) cmp 0)?
+ try:
+ p.expect('assert') ; p.accept('ws')
+ p.expect('(') ; p.accept('ws')
+ p.expect('strcmp') ; p.accept('ws')
+ p.expect('(') ; p.accept('ws')
+ lh = p_expr(p) ; p.accept('ws')
+ p.expect(',') ; p.accept('ws')
+ rh = p_expr(p) ; p.accept('ws')
+ p.expect(')') ; p.accept('ws')
+ cmp = p.expect('cmp') ; p.accept('ws')
+ p.expect('0') ; p.accept('ws')
+ p.expect(')')
+ return mkassert('str', CMP[cmp], lh, rh)
+ except ParseFailure:
+ p.pop(state)
+
+ # assert(a cmp b)?
+ try:
+ p.expect('assert') ; p.accept('ws')
+ p.expect('(') ; p.accept('ws')
+ lh = p_expr(p) ; p.accept('ws')
+ cmp = p.expect('cmp') ; p.accept('ws')
+ rh = p_expr(p) ; p.accept('ws')
+ p.expect(')')
+ return mkassert('int', CMP[cmp], lh, rh)
+ except ParseFailure:
+ p.pop(state)
+
+ # assert(a)?
+ p.expect('assert') ; p.accept('ws')
+ p.expect('(') ; p.accept('ws')
+ lh = p_exprs(p) ; p.accept('ws')
+ p.expect(')')
+ return mkassert('bool', 'eq', lh, 'true')
+
+def p_expr(p):
+ res = []
+ while True:
+ if p.accept('('):
+ res.append(p.m)
+ while True:
+ res.append(p_exprs(p))
+ if p.accept('sep'):
+ res.append(p.m)
+ else:
+ break
+ res.append(p.expect(')'))
+ elif p.lookahead('assert'):
+ state = p.push()
+ try:
+ res.append(p_assert(p))
+ except ParseFailure:
+ p.pop(state)
+ res.append(p.expect('assert'))
+ elif p.accept('string', 'op', 'ws', None):
+ res.append(p.m)
+ else:
+ return ''.join(res)
+
+def p_exprs(p):
+ res = []
+ while True:
+ res.append(p_expr(p))
+ if p.accept('cmp', 'logic', ','):
+ res.append(p.m)
+ else:
+ return ''.join(res)
+
+def p_stmt(p):
+ ws = p.accept('ws') or ''
+
+ # memcmp(lh,rh,size) => 0?
+ if p.lookahead('memcmp'):
+ state = p.push()
+ try:
+ p.expect('memcmp') ; p.accept('ws')
+ p.expect('(') ; p.accept('ws')
+ lh = p_expr(p) ; p.accept('ws')
+ p.expect(',') ; p.accept('ws')
+ rh = p_expr(p) ; p.accept('ws')
+ p.expect(',') ; p.accept('ws')
+ size = p_expr(p) ; p.accept('ws')
+ p.expect(')') ; p.accept('ws')
+ p.expect('=>') ; p.accept('ws')
+ p.expect('0') ; p.accept('ws')
+ return ws + mkassert('mem', 'eq', lh, rh, size)
+ except ParseFailure:
+ p.pop(state)
+
+ # strcmp(lh,rh) => 0?
+ if p.lookahead('strcmp'):
+ state = p.push()
+ try:
+ p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
+ lh = p_expr(p) ; p.accept('ws')
+ p.expect(',') ; p.accept('ws')
+ rh = p_expr(p) ; p.accept('ws')
+ p.expect(')') ; p.accept('ws')
+ p.expect('=>') ; p.accept('ws')
+ p.expect('0') ; p.accept('ws')
+ return ws + mkassert('str', 'eq', lh, rh)
+ except ParseFailure:
+ p.pop(state)
+
+ # lh => rh?
+ lh = p_exprs(p)
+ if p.accept('=>'):
+ rh = p_exprs(p)
+ return ws + mkassert('int', 'eq', lh, rh)
+ else:
+ return ws + lh
+
+def main(input=None, output=None, pattern=[], limit=LIMIT):
+ with openio(input or '-', 'r') as in_f:
+ # create parser
+ lexemes = LEXEMES.copy()
+ lexemes['assert'] += pattern
+ p = Parser(in_f, lexemes)
+
+ with openio(output or '-', 'w') as f:
+ def writeln(s=''):
+ f.write(s)
+ f.write('\n')
+ f.writeln = writeln
+
+ # write extra verbose asserts
+ write_header(f, limit=limit)
+ if input is not None:
+ f.writeln("#line %d \"%s\"" % (1, input))
+
+ # parse and write out stmt at a time
+ try:
+ while True:
+ f.write(p_stmt(p))
+ if p.accept('sep'):
+ f.write(p.m)
+ else:
+ break
+ except ParseFailure as e:
+ print('warning: %s' % e)
+ pass
+
+ for i in range(p.off, len(p.tokens)):
+ f.write(p.tokens[i][1])
+
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Preprocessor that makes asserts easier to debug.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'input',
+ help="Input C file.")
+ parser.add_argument(
+ '-o', '--output',
+ required=True,
+ help="Output C file.")
+ parser.add_argument(
+ '-p', '--pattern',
+ action='append',
+ help="Regex patterns to search for starting an assert statement. This"
+ " implicitly includes \"assert\" and \"=>\".")
+ parser.add_argument(
+ '-l', '--limit',
+ type=lambda x: int(x, 0),
+ default=LIMIT,
+ help="Maximum number of characters to display in strcmp and memcmp. "
+ "Defaults to %r." % LIMIT)
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/readmdir.py b/scripts/readmdir.py
index b6c3dcca..98816df9 100755
--- a/scripts/readmdir.py
+++ b/scripts/readmdir.py
@@ -24,6 +24,8 @@
'gstate': (0x700, 0x700),
'movestate': (0x7ff, 0x7ff),
'crc': (0x700, 0x500),
+ 'ccrc': (0x780, 0x500),
+ 'fcrc': (0x7ff, 0x5ff),
}
class Tag:
@@ -99,7 +101,16 @@ def schunk(self):
return struct.unpack('b', struct.pack('B', self.chunk))[0]
def is_(self, type):
- return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
+ try:
+ if ' ' in type:
+ type1, type3 = type.split()
+ return (self.is_(type1) and
+ (self.type & ~TAG_TYPES[type1][0]) == int(type3, 0))
+
+ return self.type == int(type, 0)
+
+ except (ValueError, KeyError):
+ return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
def mkmask(self):
return Tag(
@@ -109,14 +120,20 @@ def mkmask(self):
def chid(self, nid):
ntag = Tag(self.type, nid, self.size)
- if hasattr(self, 'off'): ntag.off = self.off
- if hasattr(self, 'data'): ntag.data = self.data
- if hasattr(self, 'crc'): ntag.crc = self.crc
+ if hasattr(self, 'off'): ntag.off = self.off
+ if hasattr(self, 'data'): ntag.data = self.data
+ if hasattr(self, 'ccrc'): ntag.crc = self.crc
+ if hasattr(self, 'erased'): ntag.erased = self.erased
return ntag
def typerepr(self):
- if self.is_('crc') and getattr(self, 'crc', 0xffffffff) != 0xffffffff:
- return 'crc (bad)'
+ if (self.is_('ccrc')
+ and getattr(self, 'ccrc', 0xffffffff) != 0xffffffff):
+ crc_status = ' (bad)'
+ elif self.is_('fcrc') and getattr(self, 'erased', False):
+ crc_status = ' (era)'
+ else:
+ crc_status = ''
reverse_types = {v: k for k, v in TAG_TYPES.items()}
for prefix in range(12):
@@ -124,12 +141,12 @@ def typerepr(self):
if (mask, self.type & mask) in reverse_types:
type = reverse_types[mask, self.type & mask]
if prefix > 0:
- return '%s %#0*x' % (
- type, prefix//4, self.type & ((1 << prefix)-1))
+ return '%s %#x%s' % (
+ type, self.type & ((1 << prefix)-1), crc_status)
else:
- return type
+ return '%s%s' % (type, crc_status)
else:
- return '%02x' % self.type
+ return '%02x%s' % (self.type, crc_status)
def idrepr(self):
return repr(self.id) if self.id != 0x3ff else '.'
@@ -172,6 +189,8 @@ def __init__(self, blocks):
self.rev, = struct.unpack('= 4:
ntag, = struct.unpack('>I', block[off:off+4])
- tag = Tag(int(tag) ^ ntag)
+ tag = Tag((int(tag) ^ ntag) & 0x7fffffff)
tag.off = off + 4
tag.data = block[off+4:off+tag.dsize]
- if tag.is_('crc'):
- crc = binascii.crc32(block[off:off+4+4], crc)
+ if tag.is_('ccrc'):
+ crc = binascii.crc32(block[off:off+2*4], crc)
else:
crc = binascii.crc32(block[off:off+tag.dsize], crc)
tag.crc = crc
@@ -194,16 +213,29 @@ def __init__(self, blocks):
self.all_.append(tag)
- if tag.is_('crc'):
+ if tag.is_('fcrc') and len(tag.data) == 8:
+ fcrctag = tag
+ fcrcdata = struct.unpack(' 1:
+ children = {
+ ','.join(str(getattr(Result(*c), k) or '') for k in by)
+ for c in table[name].children}
+ recurse(
+ # note we're maintaining sort order
+ [n for n in names if n in children],
+ depth_-1,
+ (prefixes[2+is_last] + "|-> ",
+ prefixes[2+is_last] + "'-> ",
+ prefixes[2+is_last] + "| ",
+ prefixes[2+is_last] + " "))
+
+ recurse(names, depth)
+
+ if not tree:
+ print('%-*s %s%s' % (
+ widths[0], lines[-1][0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], lines[-1][1:-1])),
+ lines[-1][-1]))
+
+
+def main(ci_paths,
+ by=None,
+ fields=None,
+ defines=None,
+ sort=None,
+ **args):
+ # it doesn't really make sense to not have a depth with tree,
+ # so assume depth=inf if tree by default
+ if args.get('depth') is None:
+ args['depth'] = m.inf if args['tree'] else 1
+ elif args.get('depth') == 0:
+ args['depth'] = m.inf
+
+ # find sizes
+ if not args.get('use', None):
+ results = collect(ci_paths, **args)
+ else:
+ results = []
with openio(args['use']) as f:
- r = csv.DictReader(f)
- results = [
- ( result['file'],
- result['name'],
- int(result['stack_frame']),
- float(result['stack_limit']), # note limit can be inf
- set())
- for result in r
- if result.get('stack_frame') not in {None, ''}
- if result.get('stack_limit') not in {None, ''}]
-
- total_frame = 0
- total_limit = 0
- for _, _, frame, limit, _ in results:
- total_frame += frame
- total_limit = max(total_limit, limit)
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('stack_'+k in r and r['stack_'+k].strip()
+ for k in StackResult._fields):
+ continue
+ try:
+ results.append(StackResult(
+ **{k: r[k] for k in StackResult._by
+ if k in r and r[k].strip()},
+ **{k: r['stack_'+k] for k in StackResult._fields
+ if 'stack_'+k in r and r['stack_'+k].strip()}))
+ except TypeError:
+ pass
+
+ # fold
+ results = fold(StackResult, results, by=by, defines=defines)
+
+ # sort, note that python's sort is stable
+ results.sort()
+ if sort:
+ for k, reverse in reversed(sort):
+ results.sort(
+ key=lambda r: tuple(
+ (getattr(r, k),) if getattr(r, k) is not None else ()
+ for k in ([k] if k else StackResult._sort)),
+ reverse=reverse ^ (not k or k in StackResult._fields))
+
+ # write results to CSV
+ if args.get('output'):
+ with openio(args['output'], 'w') as f:
+ writer = csv.DictWriter(f,
+ (by if by is not None else StackResult._by)
+ + ['stack_'+k for k in (
+ fields if fields is not None else StackResult._fields)])
+ writer.writeheader()
+ for r in results:
+ writer.writerow(
+ {k: getattr(r, k) for k in (
+ by if by is not None else StackResult._by)}
+ | {'stack_'+k: getattr(r, k) for k in (
+ fields if fields is not None else StackResult._fields)})
# find previous results?
if args.get('diff'):
+ diff_results = []
try:
with openio(args['diff']) as f:
- r = csv.DictReader(f)
- prev_results = [
- ( result['file'],
- result['name'],
- int(result['stack_frame']),
- float(result['stack_limit']),
- set())
- for result in r
- if result.get('stack_frame') not in {None, ''}
- if result.get('stack_limit') not in {None, ''}]
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('stack_'+k in r and r['stack_'+k].strip()
+ for k in StackResult._fields):
+ continue
+ try:
+ diff_results.append(StackResult(
+ **{k: r[k] for k in StackResult._by
+ if k in r and r[k].strip()},
+ **{k: r['stack_'+k] for k in StackResult._fields
+ if 'stack_'+k in r and r['stack_'+k].strip()}))
+ except TypeError:
+ raise
except FileNotFoundError:
- prev_results = []
+ pass
- prev_total_frame = 0
- prev_total_limit = 0
- for _, _, frame, limit, _ in prev_results:
- prev_total_frame += frame
- prev_total_limit = max(prev_total_limit, limit)
-
- # write results to CSV
- if args.get('output'):
- merged_results = co.defaultdict(lambda: {})
- other_fields = []
-
- # merge?
- if args.get('merge'):
- try:
- with openio(args['merge']) as f:
- r = csv.DictReader(f)
- for result in r:
- file = result.pop('file', '')
- func = result.pop('name', '')
- result.pop('stack_frame', None)
- result.pop('stack_limit', None)
- merged_results[(file, func)] = result
- other_fields = result.keys()
- except FileNotFoundError:
- pass
-
- for file, func, frame, limit, _ in results:
- merged_results[(file, func)]['stack_frame'] = frame
- merged_results[(file, func)]['stack_limit'] = limit
+ # fold
+ diff_results = fold(StackResult, diff_results, by=by, defines=defines)
- with openio(args['output'], 'w') as f:
- w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit'])
- w.writeheader()
- for (file, func), result in sorted(merged_results.items()):
- w.writerow({'file': file, 'name': func, **result})
-
- # print results
- def dedup_entries(results, by='name'):
- entries = co.defaultdict(lambda: (0, 0, set()))
- for file, func, frame, limit, deps in results:
- entry = (file if by == 'file' else func)
- entry_frame, entry_limit, entry_deps = entries[entry]
- entries[entry] = (
- entry_frame + frame,
- max(entry_limit, limit),
- entry_deps | {file if by == 'file' else func
- for file, func in deps})
- return entries
-
- def diff_entries(olds, news):
- diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set()))
- for name, (new_frame, new_limit, deps) in news.items():
- diff[name] = (
- None, None,
- new_frame, new_limit,
- new_frame, new_limit,
- 1.0,
- deps)
- for name, (old_frame, old_limit, _) in olds.items():
- _, _, new_frame, new_limit, _, _, _, deps = diff[name]
- diff[name] = (
- old_frame, old_limit,
- new_frame, new_limit,
- (new_frame or 0) - (old_frame or 0),
- 0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
- else (new_limit or 0) - (old_limit or 0),
- 0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
- else +float('inf') if m.isinf(new_limit or 0)
- else -float('inf') if m.isinf(old_limit or 0)
- else +0.0 if not old_limit and not new_limit
- else +1.0 if not old_limit
- else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0),
- deps)
- return diff
-
- def sorted_entries(entries):
- if args.get('limit_sort'):
- return sorted(entries, key=lambda x: (-x[1][1], x))
- elif args.get('reverse_limit_sort'):
- return sorted(entries, key=lambda x: (+x[1][1], x))
- elif args.get('frame_sort'):
- return sorted(entries, key=lambda x: (-x[1][0], x))
- elif args.get('reverse_frame_sort'):
- return sorted(entries, key=lambda x: (+x[1][0], x))
- else:
- return sorted(entries)
-
- def sorted_diff_entries(entries):
- if args.get('limit_sort'):
- return sorted(entries, key=lambda x: (-(x[1][3] or 0), x))
- elif args.get('reverse_limit_sort'):
- return sorted(entries, key=lambda x: (+(x[1][3] or 0), x))
- elif args.get('frame_sort'):
- return sorted(entries, key=lambda x: (-(x[1][2] or 0), x))
- elif args.get('reverse_frame_sort'):
- return sorted(entries, key=lambda x: (+(x[1][2] or 0), x))
- else:
- return sorted(entries, key=lambda x: (-x[1][6], x))
+ # print table
+ if not args.get('quiet'):
+ table(StackResult, results,
+ diff_results if args.get('diff') else None,
+ by=by if by is not None else ['function'],
+ fields=fields,
+ sort=sort,
+ **args)
- def print_header(by=''):
- if not args.get('diff'):
- print('%-36s %7s %7s' % (by, 'frame', 'limit'))
- else:
- print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff'))
-
- def print_entry(name, frame, limit):
- print("%-36s %7d %7s" % (name,
- frame, '∞' if m.isinf(limit) else int(limit)))
-
- def print_diff_entry(name,
- old_frame, old_limit,
- new_frame, new_limit,
- diff_frame, diff_limit,
- ratio):
- print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name,
- old_frame if old_frame is not None else "-",
- ('∞' if m.isinf(old_limit) else int(old_limit))
- if old_limit is not None else "-",
- new_frame if new_frame is not None else "-",
- ('∞' if m.isinf(new_limit) else int(new_limit))
- if new_limit is not None else "-",
- diff_frame,
- ('+∞' if diff_limit > 0 and m.isinf(diff_limit)
- else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
- else '%+d' % diff_limit),
- '' if not ratio
- else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
- else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
- else ' (%+.1f%%)' % (100*ratio)))
-
- def print_entries(by='name'):
- # build optional tree of dependencies
- def print_deps(entries, depth, print,
- filter=lambda _: True,
- prefixes=('', '', '', '')):
- entries = entries if isinstance(entries, list) else list(entries)
- filtered_entries = [(name, entry)
- for name, entry in entries
- if filter(name)]
- for i, (name, entry) in enumerate(filtered_entries):
- last = (i == len(filtered_entries)-1)
- print(prefixes[0+last] + name, entry)
-
- if depth > 0:
- deps = entry[-1]
- print_deps(entries, depth-1, print,
- lambda name: name in deps,
- ( prefixes[2+last] + "|-> ",
- prefixes[2+last] + "'-> ",
- prefixes[2+last] + "| ",
- prefixes[2+last] + " "))
-
- entries = dedup_entries(results, by=by)
-
- if not args.get('diff'):
- print_header(by=by)
- print_deps(
- sorted_entries(entries.items()),
- args.get('depth') or 0,
- lambda name, entry: print_entry(name, *entry[:-1]))
- else:
- prev_entries = dedup_entries(prev_results, by=by)
- diff = diff_entries(prev_entries, entries)
-
- print_header(by='%s (%d added, %d removed)' % (by,
- sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None),
- sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None)))
- print_deps(
- filter(
- lambda x: x[1][6] or args.get('all'),
- sorted_diff_entries(diff.items())),
- args.get('depth') or 0,
- lambda name, entry: print_diff_entry(name, *entry[:-1]))
-
- def print_totals():
- if not args.get('diff'):
- print_entry('TOTAL', total_frame, total_limit)
- else:
- diff_frame = total_frame - prev_total_frame
- diff_limit = (
- 0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
- else (total_limit or 0) - (prev_total_limit or 0))
- ratio = (
- 0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
- else +float('inf') if m.isinf(total_limit or 0)
- else -float('inf') if m.isinf(prev_total_limit or 0)
- else 0.0 if not prev_total_limit and not total_limit
- else 1.0 if not prev_total_limit
- else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0))
- print_diff_entry('TOTAL',
- prev_total_frame, prev_total_limit,
- total_frame, total_limit,
- diff_frame, diff_limit,
- ratio)
-
- if args.get('quiet'):
- pass
- elif args.get('summary'):
- print_header()
- print_totals()
- elif args.get('files'):
- print_entries(by='file')
- print_totals()
- else:
- print_entries(by='name')
- print_totals()
+ # error on recursion
+ if args.get('error_on_recursion') and any(
+ m.isinf(float(r.limit)) for r in results):
+ sys.exit(2)
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
- description="Find stack usage at the function level.")
- parser.add_argument('ci_paths', nargs='*', default=CI_PATHS,
- help="Description of where to find *.ci files. May be a directory \
- or a list of paths. Defaults to %r." % CI_PATHS)
- parser.add_argument('-v', '--verbose', action='store_true',
+ description="Find stack usage at the function level.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'ci_paths',
+ nargs='*',
+ help="Input *.ci files.")
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
help="Output commands that run behind the scenes.")
- parser.add_argument('-q', '--quiet', action='store_true',
+ parser.add_argument(
+ '-q', '--quiet',
+ action='store_true',
help="Don't show anything, useful with -o.")
- parser.add_argument('-o', '--output',
+ parser.add_argument(
+ '-o', '--output',
help="Specify CSV file to store results.")
- parser.add_argument('-u', '--use',
- help="Don't parse callgraph files, instead use this CSV file.")
- parser.add_argument('-d', '--diff',
+ parser.add_argument(
+ '-u', '--use',
+ help="Don't parse anything, use this CSV file.")
+ parser.add_argument(
+ '-d', '--diff',
help="Specify CSV file to diff against.")
- parser.add_argument('-m', '--merge',
- help="Merge with an existing CSV file when writing to output.")
- parser.add_argument('-a', '--all', action='store_true',
- help="Show all functions, not just the ones that changed.")
- parser.add_argument('-A', '--everything', action='store_true',
+ parser.add_argument(
+ '-a', '--all',
+ action='store_true',
+ help="Show all, not just the ones that changed.")
+ parser.add_argument(
+ '-p', '--percent',
+ action='store_true',
+ help="Only show percentage change, not a full diff.")
+ parser.add_argument(
+ '-b', '--by',
+ action='append',
+ choices=StackResult._by,
+ help="Group by this field.")
+ parser.add_argument(
+ '-f', '--field',
+ dest='fields',
+ action='append',
+ choices=StackResult._fields,
+ help="Show this field.")
+ parser.add_argument(
+ '-D', '--define',
+ dest='defines',
+ action='append',
+ type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
+ help="Only include results where this field is this value.")
+ class AppendSort(argparse.Action):
+ def __call__(self, parser, namespace, value, option):
+ if namespace.sort is None:
+ namespace.sort = []
+ namespace.sort.append((value, True if option == '-S' else False))
+ parser.add_argument(
+ '-s', '--sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field.")
+ parser.add_argument(
+ '-S', '--reverse-sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field, but backwards.")
+ parser.add_argument(
+ '-Y', '--summary',
+ action='store_true',
+ help="Only show the total.")
+ parser.add_argument(
+ '-F', '--source',
+ dest='sources',
+ action='append',
+ help="Only consider definitions in this file. Defaults to anything "
+ "in the current directory.")
+ parser.add_argument(
+ '--everything',
+ action='store_true',
help="Include builtin and libc specific symbols.")
- parser.add_argument('-s', '--limit-sort', action='store_true',
- help="Sort by stack limit.")
- parser.add_argument('-S', '--reverse-limit-sort', action='store_true',
- help="Sort by stack limit, but backwards.")
- parser.add_argument('--frame-sort', action='store_true',
- help="Sort by stack frame size.")
- parser.add_argument('--reverse-frame-sort', action='store_true',
- help="Sort by stack frame size, but backwards.")
- parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0),
- nargs='?', const=float('inf'),
- help="Depth of dependencies to show.")
- parser.add_argument('-F', '--files', action='store_true',
- help="Show file-level calls.")
- parser.add_argument('-Y', '--summary', action='store_true',
- help="Only show the total stack size.")
- parser.add_argument('--build-dir',
- help="Specify the relative build directory. Used to map object files \
- to the correct source files.")
- sys.exit(main(**vars(parser.parse_args())))
+ parser.add_argument(
+ '--tree',
+ action='store_true',
+ help="Only show the function call tree.")
+ parser.add_argument(
+ '-Z', '--depth',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Depth of function calls to show. 0 shows all calls but may not "
+ "terminate!")
+ parser.add_argument(
+ '-e', '--error-on-recursion',
+ action='store_true',
+ help="Error if any functions are recursive.")
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/structs.py b/scripts/structs.py
index e8d7193e..e9b97e7d 100755
--- a/scripts/structs.py
+++ b/scripts/structs.py
@@ -2,49 +2,183 @@
#
# Script to find struct sizes.
#
+# Example:
+# ./scripts/structs.py lfs.o lfs_util.o -Ssize
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
-import os
-import glob
+import collections as co
+import csv
+import difflib
import itertools as it
-import subprocess as sp
-import shlex
+import math as m
+import os
import re
-import csv
-import collections as co
+import shlex
+import subprocess as sp
+
+
+OBJDUMP_PATH = ['objdump']
+
+
+# integer fields
+class Int(co.namedtuple('Int', 'x')):
+ __slots__ = ()
+ def __new__(cls, x=0):
+ if isinstance(x, Int):
+ return x
+ if isinstance(x, str):
+ try:
+ x = int(x, 0)
+ except ValueError:
+ # also accept +-∞ and +-inf
+ if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
+ x = m.inf
+ elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
+ x = -m.inf
+ else:
+ raise
+ assert isinstance(x, int) or m.isinf(x), x
+ return super().__new__(cls, x)
+
+ def __str__(self):
+ if self.x == m.inf:
+ return '∞'
+ elif self.x == -m.inf:
+ return '-∞'
+ else:
+ return str(self.x)
+
+ def __int__(self):
+ assert not m.isinf(self.x)
+ return self.x
+
+ def __float__(self):
+ return float(self.x)
+
+ none = '%7s' % '-'
+ def table(self):
+ return '%7s' % (self,)
+
+ diff_none = '%7s' % '-'
+ diff_table = table
+
+ def diff_diff(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ diff = new - old
+ if diff == +m.inf:
+ return '%7s' % '+∞'
+ elif diff == -m.inf:
+ return '%7s' % '-∞'
+ else:
+ return '%+7d' % diff
+
+ def ratio(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ if m.isinf(new) and m.isinf(old):
+ return 0.0
+ elif m.isinf(new):
+ return +m.inf
+ elif m.isinf(old):
+ return -m.inf
+ elif not old and not new:
+ return 0.0
+ elif not old:
+ return 1.0
+ else:
+ return (new-old) / old
+
+ def __add__(self, other):
+ return self.__class__(self.x + other.x)
+
+ def __sub__(self, other):
+ return self.__class__(self.x - other.x)
+
+ def __mul__(self, other):
+ return self.__class__(self.x * other.x)
+
+# struct size results
+class StructResult(co.namedtuple('StructResult', ['file', 'struct', 'size'])):
+ _by = ['file', 'struct']
+ _fields = ['size']
+ _sort = ['size']
+ _types = {'size': Int}
-OBJ_PATHS = ['*.o']
+ __slots__ = ()
+ def __new__(cls, file='', struct='', size=0):
+ return super().__new__(cls, file, struct,
+ Int(size))
-def collect(paths, **args):
- decl_pattern = re.compile(
+ def __add__(self, other):
+ return StructResult(self.file, self.struct,
+ self.size + other.size)
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def collect(obj_paths, *,
+ objdump_path=OBJDUMP_PATH,
+ sources=None,
+ everything=False,
+ internal=False,
+ **args):
+ line_pattern = re.compile(
'^\s+(?P[0-9]+)'
- '\s+(?P[0-9]+)'
+ '(?:\s+(?P[0-9]+))?'
'\s+.*'
- '\s+(?P[^\s]+)$')
- struct_pattern = re.compile(
- '^(?:.*DW_TAG_(?P[a-z_]+).*'
- '|^.*DW_AT_name.*:\s*(?P[^:\s]+)\s*'
- '|^.*DW_AT_decl_file.*:\s*(?P[0-9]+)\s*'
- '|^.*DW_AT_byte_size.*:\s*(?P[0-9]+)\s*)$')
-
- results = co.defaultdict(lambda: 0)
- for path in paths:
- # find decl, we want to filter by structs in .h files
- decls = {}
- # note objdump-tool may contain extra args
- cmd = args['objdump_tool'] + ['--dwarf=rawline', path]
+ '\s+(?P[^\s]+)$')
+ info_pattern = re.compile(
+ '^(?:.*(?PDW_TAG_[a-z_]+).*'
+ '|.*DW_AT_name.*:\s*(?P[^:\s]+)\s*'
+ '|.*DW_AT_decl_file.*:\s*(?P[0-9]+)\s*'
+ '|.*DW_AT_byte_size.*:\s*(?P[0-9]+)\s*)$')
+
+ results = []
+ for path in obj_paths:
+ # find files, we want to filter by structs in .h files
+ dirs = {}
+ files = {}
+ # note objdump-path may contain extra args
+ cmd = objdump_path + ['--dwarf=rawline', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
- errors='replace')
+ errors='replace',
+ close_fds=False)
for line in proc.stdout:
- # find file numbers
- m = decl_pattern.match(line)
+ # note that files contain references to dirs, which we
+ # dereference as soon as we see them as each file table follows a
+ # dir table
+ m = line_pattern.match(line)
if m:
- decls[int(m.group('no'))] = m.group('file')
+ if not m.group('dir'):
+ # found a directory entry
+ dirs[int(m.group('no'))] = m.group('path')
+ else:
+ # found a file entry
+ dir = int(m.group('dir'))
+ if dir in dirs:
+ files[int(m.group('no'))] = os.path.join(
+ dirs[dir],
+ m.group('path'))
+ else:
+ files[int(m.group('no'))] = m.group('path')
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
@@ -53,40 +187,39 @@ def collect(paths, **args):
sys.exit(-1)
# collect structs as we parse dwarf info
- found = False
- name = None
- decl = None
- size = None
-
- # note objdump-tool may contain extra args
- cmd = args['objdump_tool'] + ['--dwarf=info', path]
+ results_ = []
+ is_struct = False
+ s_name = None
+ s_file = None
+ s_size = None
+ # note objdump-path may contain extra args
+ cmd = objdump_path + ['--dwarf=info', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
- errors='replace')
+ errors='replace',
+ close_fds=False)
for line in proc.stdout:
# state machine here to find structs
- m = struct_pattern.match(line)
+ m = info_pattern.match(line)
if m:
if m.group('tag'):
- if (name is not None
- and decl is not None
- and size is not None):
- decl = decls.get(decl, '?')
- results[(decl, name)] = size
- found = (m.group('tag') == 'structure_type')
- name = None
- decl = None
- size = None
- elif found and m.group('name'):
- name = m.group('name')
- elif found and name and m.group('decl'):
- decl = int(m.group('decl'))
- elif found and name and m.group('size'):
- size = int(m.group('size'))
+ if is_struct:
+ file = files.get(s_file, '?')
+ results_.append(StructResult(file, s_name, s_size))
+ is_struct = (m.group('tag') == 'DW_TAG_structure_type')
+ elif m.group('name'):
+ s_name = m.group('name')
+ elif m.group('file'):
+ s_file = int(m.group('file'))
+ elif m.group('size'):
+ s_size = int(m.group('size'))
+ if is_struct:
+ file = files.get(s_file, '?')
+ results_.append(StructResult(file, s_name, s_size))
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
@@ -94,238 +227,426 @@ def collect(paths, **args):
sys.stdout.write(line)
sys.exit(-1)
- flat_results = []
- for (file, struct), size in results.items():
- # map to source files
- if args.get('build_dir'):
- file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
- # only include structs declared in header files in the current
- # directory, ignore internal-only # structs (these are represented
- # in other measurements)
- if not args.get('everything'):
- if not file.endswith('.h'):
- continue
- # replace .o with .c, different scripts report .o/.c, we need to
- # choose one if we want to deduplicate csv files
- file = re.sub('\.o$', '.c', file)
-
- flat_results.append((file, struct, size))
-
- return flat_results
-
-
-def main(**args):
- def openio(path, mode='r'):
- if path == '-':
- if 'r' in mode:
- return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+ for r in results_:
+ # ignore filtered sources
+ if sources is not None:
+ if not any(
+ os.path.abspath(r.file) == os.path.abspath(s)
+ for s in sources):
+ continue
else:
- return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
- else:
- return open(path, mode)
+ # default to only cwd
+ if not everything and not os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(r.file)]) == os.getcwd():
+ continue
+
+ # limit to .h files unless --internal
+ if not internal and not r.file.endswith('.h'):
+ continue
+
+ # simplify path
+ if os.path.commonpath([
+ os.getcwd(),
+ os.path.abspath(r.file)]) == os.getcwd():
+ file = os.path.relpath(r.file)
+ else:
+ file = os.path.abspath(r.file)
- # find sizes
- if not args.get('use', None):
- # find .o files
- paths = []
- for path in args['obj_paths']:
- if os.path.isdir(path):
- path = path + '/*.o'
+ results.append(r._replace(file=file))
+
+ return results
- for path in glob.glob(path):
- paths.append(path)
- if not paths:
- print('no .obj files found in %r?' % args['obj_paths'])
+def fold(Result, results, *,
+ by=None,
+ defines=None,
+ **_):
+ if by is None:
+ by = Result._by
+
+ for k in it.chain(by or [], (k for k, _ in defines or [])):
+ if k not in Result._by and k not in Result._fields:
+ print("error: could not find field %r?" % k)
sys.exit(-1)
- results = collect(paths, **args)
+ # filter by matching defines
+ if defines is not None:
+ results_ = []
+ for r in results:
+ if all(getattr(r, k) in vs for k, vs in defines):
+ results_.append(r)
+ results = results_
+
+ # organize results into conflicts
+ folding = co.OrderedDict()
+ for r in results:
+ name = tuple(getattr(r, k) for k in by)
+ if name not in folding:
+ folding[name] = []
+ folding[name].append(r)
+
+ # merge conflicts
+ folded = []
+ for name, rs in folding.items():
+ folded.append(sum(rs[1:], start=rs[0]))
+
+ return folded
+
+def table(Result, results, diff_results=None, *,
+ by=None,
+ fields=None,
+ sort=None,
+ summary=False,
+ all=False,
+ percent=False,
+ **_):
+ all_, all = all, __builtins__.all
+
+ if by is None:
+ by = Result._by
+ if fields is None:
+ fields = Result._fields
+ types = Result._types
+
+ # fold again
+ results = fold(Result, results, by=by)
+ if diff_results is not None:
+ diff_results = fold(Result, diff_results, by=by)
+
+ # organize by name
+ table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in results}
+ diff_table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in diff_results or []}
+ names = list(table.keys() | diff_table.keys())
+
+ # sort again, now with diff info, note that python's sort is stable
+ names.sort()
+ if diff_results is not None:
+ names.sort(key=lambda n: tuple(
+ types[k].ratio(
+ getattr(table.get(n), k, None),
+ getattr(diff_table.get(n), k, None))
+ for k in fields),
+ reverse=True)
+ if sort:
+ for k, reverse in reversed(sort):
+ names.sort(
+ key=lambda n: tuple(
+ (getattr(table[n], k),)
+ if getattr(table.get(n), k, None) is not None else ()
+ for k in ([k] if k else [
+ k for k in Result._sort if k in fields])),
+ reverse=reverse ^ (not k or k in Result._fields))
+
+
+ # build up our lines
+ lines = []
+
+ # header
+ header = []
+ header.append('%s%s' % (
+ ','.join(by),
+ ' (%d added, %d removed)' % (
+ sum(1 for n in table if n not in diff_table),
+ sum(1 for n in diff_table if n not in table))
+ if diff_results is not None and not percent else '')
+ if not summary else '')
+ if diff_results is None:
+ for k in fields:
+ header.append(k)
+ elif percent:
+ for k in fields:
+ header.append(k)
+ else:
+ for k in fields:
+ header.append('o'+k)
+ for k in fields:
+ header.append('n'+k)
+ for k in fields:
+ header.append('d'+k)
+ header.append('')
+ lines.append(header)
+
+ def table_entry(name, r, diff_r=None, ratios=[]):
+ entry = []
+ entry.append(name)
+ if diff_results is None:
+ for k in fields:
+ entry.append(getattr(r, k).table()
+ if getattr(r, k, None) is not None
+ else types[k].none)
+ elif percent:
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ else:
+ for k in fields:
+ entry.append(getattr(diff_r, k).diff_table()
+ if getattr(diff_r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(types[k].diff_diff(
+ getattr(r, k, None),
+ getattr(diff_r, k, None)))
+ if diff_results is None:
+ entry.append('')
+ elif percent:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios))
+ else:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios
+ if t)
+ if any(ratios) else '')
+ return entry
+
+ # entries
+ if not summary:
+ for name in names:
+ r = table.get(name)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = diff_table.get(name)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ if not all_ and not any(ratios):
+ continue
+ lines.append(table_entry(name, r, diff_r, ratios))
+
+ # total
+ r = next(iter(fold(Result, results, by=[])), None)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = next(iter(fold(Result, diff_results, by=[])), None)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ lines.append(table_entry('TOTAL', r, diff_r, ratios))
+
+ # find the best widths, note that column 0 contains the names and column -1
+ # the ratios, so those are handled a bit differently
+ widths = [
+ ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
+ for w, i in zip(
+ it.chain([23], it.repeat(7)),
+ range(len(lines[0])-1))]
+
+ # print our table
+ for line in lines:
+ print('%-*s %s%s' % (
+ widths[0], line[0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], line[1:-1])),
+ line[-1]))
+
+
+def main(obj_paths, *,
+ by=None,
+ fields=None,
+ defines=None,
+ sort=None,
+ **args):
+ # find sizes
+ if not args.get('use', None):
+ results = collect(obj_paths, **args)
else:
+ results = []
with openio(args['use']) as f:
- r = csv.DictReader(f)
- results = [
- ( result['file'],
- result['name'],
- int(result['struct_size']))
- for result in r
- if result.get('struct_size') not in {None, ''}]
-
- total = 0
- for _, _, size in results:
- total += size
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('struct_'+k in r and r['struct_'+k].strip()
+ for k in StructResult._fields):
+ continue
+ try:
+ results.append(StructResult(
+ **{k: r[k] for k in StructResult._by
+ if k in r and r[k].strip()},
+ **{k: r['struct_'+k]
+ for k in StructResult._fields
+ if 'struct_'+k in r
+ and r['struct_'+k].strip()}))
+ except TypeError:
+ pass
+
+ # fold
+ results = fold(StructResult, results, by=by, defines=defines)
+
+ # sort, note that python's sort is stable
+ results.sort()
+ if sort:
+ for k, reverse in reversed(sort):
+ results.sort(
+ key=lambda r: tuple(
+ (getattr(r, k),) if getattr(r, k) is not None else ()
+ for k in ([k] if k else StructResult._sort)),
+ reverse=reverse ^ (not k or k in StructResult._fields))
+
+ # write results to CSV
+ if args.get('output'):
+ with openio(args['output'], 'w') as f:
+ writer = csv.DictWriter(f,
+ (by if by is not None else StructResult._by)
+ + ['struct_'+k for k in (
+ fields if fields is not None else StructResult._fields)])
+ writer.writeheader()
+ for r in results:
+ writer.writerow(
+ {k: getattr(r, k) for k in (
+ by if by is not None else StructResult._by)}
+ | {'struct_'+k: getattr(r, k) for k in (
+ fields if fields is not None else StructResult._fields)})
# find previous results?
if args.get('diff'):
+ diff_results = []
try:
with openio(args['diff']) as f:
- r = csv.DictReader(f)
- prev_results = [
- ( result['file'],
- result['name'],
- int(result['struct_size']))
- for result in r
- if result.get('struct_size') not in {None, ''}]
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ if not any('struct_'+k in r and r['struct_'+k].strip()
+ for k in StructResult._fields):
+ continue
+ try:
+ diff_results.append(StructResult(
+ **{k: r[k] for k in StructResult._by
+ if k in r and r[k].strip()},
+ **{k: r['struct_'+k]
+ for k in StructResult._fields
+ if 'struct_'+k in r
+ and r['struct_'+k].strip()}))
+ except TypeError:
+ pass
except FileNotFoundError:
- prev_results = []
+ pass
- prev_total = 0
- for _, _, size in prev_results:
- prev_total += size
+ # fold
+ diff_results = fold(StructResult, diff_results, by=by, defines=defines)
- # write results to CSV
- if args.get('output'):
- merged_results = co.defaultdict(lambda: {})
- other_fields = []
+ # print table
+ if not args.get('quiet'):
+ table(StructResult, results,
+ diff_results if args.get('diff') else None,
+ by=by if by is not None else ['struct'],
+ fields=fields,
+ sort=sort,
+ **args)
- # merge?
- if args.get('merge'):
- try:
- with openio(args['merge']) as f:
- r = csv.DictReader(f)
- for result in r:
- file = result.pop('file', '')
- struct = result.pop('name', '')
- result.pop('struct_size', None)
- merged_results[(file, struct)] = result
- other_fields = result.keys()
- except FileNotFoundError:
- pass
-
- for file, struct, size in results:
- merged_results[(file, struct)]['struct_size'] = size
-
- with openio(args['output'], 'w') as f:
- w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
- w.writeheader()
- for (file, struct), result in sorted(merged_results.items()):
- w.writerow({'file': file, 'name': struct, **result})
-
- # print results
- def dedup_entries(results, by='name'):
- entries = co.defaultdict(lambda: 0)
- for file, struct, size in results:
- entry = (file if by == 'file' else struct)
- entries[entry] += size
- return entries
-
- def diff_entries(olds, news):
- diff = co.defaultdict(lambda: (0, 0, 0, 0))
- for name, new in news.items():
- diff[name] = (0, new, new, 1.0)
- for name, old in olds.items():
- _, new, _, _ = diff[name]
- diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
- return diff
-
- def sorted_entries(entries):
- if args.get('size_sort'):
- return sorted(entries, key=lambda x: (-x[1], x))
- elif args.get('reverse_size_sort'):
- return sorted(entries, key=lambda x: (+x[1], x))
- else:
- return sorted(entries)
-
- def sorted_diff_entries(entries):
- if args.get('size_sort'):
- return sorted(entries, key=lambda x: (-x[1][1], x))
- elif args.get('reverse_size_sort'):
- return sorted(entries, key=lambda x: (+x[1][1], x))
- else:
- return sorted(entries, key=lambda x: (-x[1][3], x))
-
- def print_header(by=''):
- if not args.get('diff'):
- print('%-36s %7s' % (by, 'size'))
- else:
- print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
-
- def print_entry(name, size):
- print("%-36s %7d" % (name, size))
-
- def print_diff_entry(name, old, new, diff, ratio):
- print("%-36s %7s %7s %+7d%s" % (name,
- old or "-",
- new or "-",
- diff,
- ' (%+.1f%%)' % (100*ratio) if ratio else ''))
-
- def print_entries(by='name'):
- entries = dedup_entries(results, by=by)
-
- if not args.get('diff'):
- print_header(by=by)
- for name, size in sorted_entries(entries.items()):
- print_entry(name, size)
- else:
- prev_entries = dedup_entries(prev_results, by=by)
- diff = diff_entries(prev_entries, entries)
- print_header(by='%s (%d added, %d removed)' % (by,
- sum(1 for old, _, _, _ in diff.values() if not old),
- sum(1 for _, new, _, _ in diff.values() if not new)))
- for name, (old, new, diff, ratio) in sorted_diff_entries(
- diff.items()):
- if ratio or args.get('all'):
- print_diff_entry(name, old, new, diff, ratio)
-
- def print_totals():
- if not args.get('diff'):
- print_entry('TOTAL', total)
- else:
- ratio = (0.0 if not prev_total and not total
- else 1.0 if not prev_total
- else (total-prev_total)/prev_total)
- print_diff_entry('TOTAL',
- prev_total, total,
- total-prev_total,
- ratio)
-
- if args.get('quiet'):
- pass
- elif args.get('summary'):
- print_header()
- print_totals()
- elif args.get('files'):
- print_entries(by='file')
- print_totals()
- else:
- print_entries(by='name')
- print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
- description="Find struct sizes.")
- parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
- help="Description of where to find *.o files. May be a directory \
- or a list of paths. Defaults to %r." % OBJ_PATHS)
- parser.add_argument('-v', '--verbose', action='store_true',
+ description="Find struct sizes.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'obj_paths',
+ nargs='*',
+ help="Input *.o files.")
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
help="Output commands that run behind the scenes.")
- parser.add_argument('-q', '--quiet', action='store_true',
+ parser.add_argument(
+ '-q', '--quiet',
+ action='store_true',
help="Don't show anything, useful with -o.")
- parser.add_argument('-o', '--output',
+ parser.add_argument(
+ '-o', '--output',
help="Specify CSV file to store results.")
- parser.add_argument('-u', '--use',
- help="Don't compile and find struct sizes, instead use this CSV file.")
- parser.add_argument('-d', '--diff',
- help="Specify CSV file to diff struct size against.")
- parser.add_argument('-m', '--merge',
- help="Merge with an existing CSV file when writing to output.")
- parser.add_argument('-a', '--all', action='store_true',
- help="Show all functions, not just the ones that changed.")
- parser.add_argument('-A', '--everything', action='store_true',
+ parser.add_argument(
+ '-u', '--use',
+ help="Don't parse anything, use this CSV file.")
+ parser.add_argument(
+ '-d', '--diff',
+ help="Specify CSV file to diff against.")
+ parser.add_argument(
+ '-a', '--all',
+ action='store_true',
+ help="Show all, not just the ones that changed.")
+ parser.add_argument(
+ '-p', '--percent',
+ action='store_true',
+ help="Only show percentage change, not a full diff.")
+ parser.add_argument(
+ '-b', '--by',
+ action='append',
+ choices=StructResult._by,
+ help="Group by this field.")
+ parser.add_argument(
+ '-f', '--field',
+ dest='fields',
+ action='append',
+ choices=StructResult._fields,
+ help="Show this field.")
+ parser.add_argument(
+ '-D', '--define',
+ dest='defines',
+ action='append',
+ type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
+ help="Only include results where this field is this value.")
+ class AppendSort(argparse.Action):
+ def __call__(self, parser, namespace, value, option):
+ if namespace.sort is None:
+ namespace.sort = []
+ namespace.sort.append((value, True if option == '-S' else False))
+ parser.add_argument(
+ '-s', '--sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field.")
+ parser.add_argument(
+ '-S', '--reverse-sort',
+ nargs='?',
+ action=AppendSort,
+ help="Sort by this field, but backwards.")
+ parser.add_argument(
+ '-Y', '--summary',
+ action='store_true',
+ help="Only show the total.")
+ parser.add_argument(
+ '-F', '--source',
+ dest='sources',
+ action='append',
+ help="Only consider definitions in this file. Defaults to anything "
+ "in the current directory.")
+ parser.add_argument(
+ '--everything',
+ action='store_true',
help="Include builtin and libc specific symbols.")
- parser.add_argument('-s', '--size-sort', action='store_true',
- help="Sort by size.")
- parser.add_argument('-S', '--reverse-size-sort', action='store_true',
- help="Sort by size, but backwards.")
- parser.add_argument('-F', '--files', action='store_true',
- help="Show file-level struct sizes.")
- parser.add_argument('-Y', '--summary', action='store_true',
- help="Only show the total struct size.")
- parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
- help="Path to the objdump tool to use.")
- parser.add_argument('--build-dir',
- help="Specify the relative build directory. Used to map object files \
- to the correct source files.")
- sys.exit(main(**vars(parser.parse_args())))
+ parser.add_argument(
+ '--internal',
+ action='store_true',
+ help="Also show structs in .c files.")
+ parser.add_argument(
+ '--objdump-path',
+ type=lambda x: x.split(),
+ default=OBJDUMP_PATH,
+ help="Path to the objdump executable, may include flags. "
+ "Defaults to %r." % OBJDUMP_PATH)
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/summary.py b/scripts/summary.py
index 7ce769bf..445368ec 100755
--- a/scripts/summary.py
+++ b/scripts/summary.py
@@ -2,278 +2,828 @@
#
# Script to summarize the outputs of other scripts. Operates on CSV files.
#
+# Example:
+# ./scripts/code.py lfs.o lfs_util.o -q -o lfs.code.csv
+# ./scripts/data.py lfs.o lfs_util.o -q -o lfs.data.csv
+# ./scripts/summary.py lfs.code.csv lfs.data.csv -q -o lfs.csv
+# ./scripts/summary.py -Y lfs.csv -f code=code_size,data=data_size
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
-import functools as ft
import collections as co
-import os
import csv
-import re
+import functools as ft
+import itertools as it
import math as m
+import os
+import re
+
+
+# supported merge operations
+#
+# this is a terrible way to express these
+#
+OPS = {
+ 'sum': lambda xs: sum(xs[1:], start=xs[0]),
+ 'prod': lambda xs: m.prod(xs[1:], start=xs[0]),
+ 'min': min,
+ 'max': max,
+ 'mean': lambda xs: Float(sum(float(x) for x in xs) / len(xs)),
+ 'stddev': lambda xs: (
+ lambda mean: Float(
+ m.sqrt(sum((float(x) - mean)**2 for x in xs) / len(xs)))
+ )(sum(float(x) for x in xs) / len(xs)),
+ 'gmean': lambda xs: Float(m.prod(float(x) for x in xs)**(1/len(xs))),
+ 'gstddev': lambda xs: (
+ lambda gmean: Float(
+ m.exp(m.sqrt(sum(m.log(float(x)/gmean)**2 for x in xs) / len(xs)))
+ if gmean else m.inf)
+ )(m.prod(float(x) for x in xs)**(1/len(xs))),
+}
+
+
+# integer fields
+class Int(co.namedtuple('Int', 'x')):
+ __slots__ = ()
+ def __new__(cls, x=0):
+ if isinstance(x, Int):
+ return x
+ if isinstance(x, str):
+ try:
+ x = int(x, 0)
+ except ValueError:
+ # also accept +-∞ and +-inf
+ if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
+ x = m.inf
+ elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
+ x = -m.inf
+ else:
+ raise
+ assert isinstance(x, int) or m.isinf(x), x
+ return super().__new__(cls, x)
+
+ def __str__(self):
+ if self.x == m.inf:
+ return '∞'
+ elif self.x == -m.inf:
+ return '-∞'
+ else:
+ return str(self.x)
+
+ def __int__(self):
+ assert not m.isinf(self.x)
+ return self.x
+
+ def __float__(self):
+ return float(self.x)
+
+ none = '%7s' % '-'
+ def table(self):
+ return '%7s' % (self,)
+
+ diff_none = '%7s' % '-'
+ diff_table = table
+
+ def diff_diff(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ diff = new - old
+ if diff == +m.inf:
+ return '%7s' % '+∞'
+ elif diff == -m.inf:
+ return '%7s' % '-∞'
+ else:
+ return '%+7d' % diff
+
+ def ratio(self, other):
+ new = self.x if self else 0
+ old = other.x if other else 0
+ if m.isinf(new) and m.isinf(old):
+ return 0.0
+ elif m.isinf(new):
+ return +m.inf
+ elif m.isinf(old):
+ return -m.inf
+ elif not old and not new:
+ return 0.0
+ elif not old:
+ return 1.0
+ else:
+ return (new-old) / old
+
+ def __add__(self, other):
+ return self.__class__(self.x + other.x)
+
+ def __sub__(self, other):
+ return self.__class__(self.x - other.x)
+
+ def __mul__(self, other):
+ return self.__class__(self.x * other.x)
+
+# float fields
+class Float(co.namedtuple('Float', 'x')):
+ __slots__ = ()
+ def __new__(cls, x=0.0):
+ if isinstance(x, Float):
+ return x
+ if isinstance(x, str):
+ try:
+ x = float(x)
+ except ValueError:
+ # also accept +-∞ and +-inf
+ if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
+ x = m.inf
+ elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
+ x = -m.inf
+ else:
+ raise
+ assert isinstance(x, float), x
+ return super().__new__(cls, x)
+
+ def __str__(self):
+ if self.x == m.inf:
+ return '∞'
+ elif self.x == -m.inf:
+ return '-∞'
+ else:
+ return '%.1f' % self.x
+
+ def __float__(self):
+ return float(self.x)
+
+ none = Int.none
+ table = Int.table
+ diff_none = Int.diff_none
+ diff_table = Int.diff_table
+ diff_diff = Int.diff_diff
+ ratio = Int.ratio
+ __add__ = Int.__add__
+ __sub__ = Int.__sub__
+ __mul__ = Int.__mul__
+
+# fractional fields, a/b
+class Frac(co.namedtuple('Frac', 'a,b')):
+ __slots__ = ()
+ def __new__(cls, a=0, b=None):
+ if isinstance(a, Frac) and b is None:
+ return a
+ if isinstance(a, str) and b is None:
+ a, b = a.split('/', 1)
+ if b is None:
+ b = a
+ return super().__new__(cls, Int(a), Int(b))
+
+ def __str__(self):
+ return '%s/%s' % (self.a, self.b)
+
+ def __float__(self):
+ return float(self.a)
+
+ none = '%11s %7s' % ('-', '-')
+ def table(self):
+ t = self.a.x/self.b.x if self.b.x else 1.0
+ return '%11s %7s' % (
+ self,
+ '∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%.1f%%' % (100*t))
+
+ diff_none = '%11s' % '-'
+ def diff_table(self):
+ return '%11s' % (self,)
+
+ def diff_diff(self, other):
+ new_a, new_b = self if self else (Int(0), Int(0))
+ old_a, old_b = other if other else (Int(0), Int(0))
+ return '%11s' % ('%s/%s' % (
+ new_a.diff_diff(old_a).strip(),
+ new_b.diff_diff(old_b).strip()))
+
+ def ratio(self, other):
+ new_a, new_b = self if self else (Int(0), Int(0))
+ old_a, old_b = other if other else (Int(0), Int(0))
+ new = new_a.x/new_b.x if new_b.x else 1.0
+ old = old_a.x/old_b.x if old_b.x else 1.0
+ return new - old
+
+ def __add__(self, other):
+ return self.__class__(self.a + other.a, self.b + other.b)
+
+ def __sub__(self, other):
+ return self.__class__(self.a - other.a, self.b - other.b)
+
+ def __mul__(self, other):
+ return self.__class__(self.a * other.a, self.b + other.b)
+
+ def __lt__(self, other):
+ self_t = self.a.x/self.b.x if self.b.x else 1.0
+ other_t = other.a.x/other.b.x if other.b.x else 1.0
+ return (self_t, self.a.x) < (other_t, other.a.x)
+
+ def __gt__(self, other):
+ return self.__class__.__lt__(other, self)
+
+ def __le__(self, other):
+ return not self.__gt__(other)
+
+ def __ge__(self, other):
+ return not self.__lt__(other)
+
+# available types
+TYPES = co.OrderedDict([
+ ('int', Int),
+ ('float', Float),
+ ('frac', Frac)
+])
+
+
+def infer(results, *,
+ by=None,
+ fields=None,
+ types={},
+ ops={},
+ renames=[],
+ **_):
+ # if fields not specified, try to guess from data
+ if fields is None:
+ fields = co.OrderedDict()
+ for r in results:
+ for k, v in r.items():
+ if (by is None or k not in by) and v.strip():
+ types_ = []
+ for t in fields.get(k, TYPES.values()):
+ try:
+ t(v)
+ types_.append(t)
+ except ValueError:
+ pass
+ fields[k] = types_
+ fields = list(k for k, v in fields.items() if v)
+
+ # deduplicate fields
+ fields = list(co.OrderedDict.fromkeys(fields).keys())
+
+ # if by not specified, guess it's anything not in fields and not a
+ # source of a rename
+ if by is None:
+ by = co.OrderedDict()
+ for r in results:
+ # also ignore None keys, these are introduced by csv.DictReader
+ # when header + row mismatch
+ by.update((k, True) for k in r.keys()
+ if k is not None
+ and k not in fields
+ and not any(k == old_k for _, old_k in renames))
+ by = list(by.keys())
+
+ # deduplicate fields
+ by = list(co.OrderedDict.fromkeys(by).keys())
+
+ # find best type for all fields
+ types_ = {}
+ for k in fields:
+ if k in types:
+ types_[k] = types[k]
+ else:
+ for t in TYPES.values():
+ for r in results:
+ if k in r and r[k].strip():
+ try:
+ t(r[k])
+ except ValueError:
+ break
+ else:
+ types_[k] = t
+ break
+ else:
+ print("error: no type matches field %r?" % k)
+ sys.exit(-1)
+ types = types_
+
+ # does folding change the type?
+ types_ = {}
+ for k, t in types.items():
+ types_[k] = ops.get(k, OPS['sum'])([t()]).__class__
+
+
+ # create result class
+ def __new__(cls, **r):
+ return cls.__mro__[1].__new__(cls,
+ **{k: r.get(k, '') for k in by},
+ **{k: r[k] if k in r and isinstance(r[k], list)
+ else [types[k](r[k])] if k in r
+ else []
+ for k in fields})
-# displayable fields
-Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio')
-FIELDS = [
- # name, parse, accumulate, fmt, print, null
- Field('code',
- lambda r: int(r['code_size']),
- sum,
- lambda r: r,
- '%7s',
- lambda r: r,
- '-',
- lambda old, new: (new-old)/old),
- Field('data',
- lambda r: int(r['data_size']),
- sum,
- lambda r: r,
- '%7s',
- lambda r: r,
- '-',
- lambda old, new: (new-old)/old),
- Field('stack',
- lambda r: float(r['stack_limit']),
- max,
- lambda r: r,
- '%7s',
- lambda r: '∞' if m.isinf(r) else int(r),
- '-',
- lambda old, new: (new-old)/old),
- Field('structs',
- lambda r: int(r['struct_size']),
- sum,
- lambda r: r,
- '%8s',
- lambda r: r,
- '-',
- lambda old, new: (new-old)/old),
- Field('coverage',
- lambda r: (int(r['coverage_hits']), int(r['coverage_count'])),
- lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs),
- lambda r: r[0]/r[1],
- '%19s',
- lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])),
- '%11s %7s' % ('-', '-'),
- lambda old, new: ((new[0]/new[1]) - (old[0]/old[1])))
-]
-
-
-def main(**args):
- def openio(path, mode='r'):
- if path == '-':
- if 'r' in mode:
- return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+ def __add__(self, other):
+ return self.__class__(
+ **{k: getattr(self, k) for k in by},
+ **{k: object.__getattribute__(self, k)
+ + object.__getattribute__(other, k)
+ for k in fields})
+
+ def __getattribute__(self, k):
+ if k in fields:
+ if object.__getattribute__(self, k):
+ return ops.get(k, OPS['sum'])(object.__getattribute__(self, k))
else:
- return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+ return None
+ return object.__getattribute__(self, k)
+
+ return type('Result', (co.namedtuple('Result', by + fields),), {
+ '__slots__': (),
+ '__new__': __new__,
+ '__add__': __add__,
+ '__getattribute__': __getattribute__,
+ '_by': by,
+ '_fields': fields,
+ '_sort': fields,
+ '_types': types_,
+ })
+
+
+def fold(Result, results, *,
+ by=None,
+ defines=None,
+ **_):
+ if by is None:
+ by = Result._by
+
+ for k in it.chain(by or [], (k for k, _ in defines or [])):
+ if k not in Result._by and k not in Result._fields:
+ print("error: could not find field %r?" % k)
+ sys.exit(-1)
+
+ # filter by matching defines
+ if defines is not None:
+ results_ = []
+ for r in results:
+ if all(getattr(r, k) in vs for k, vs in defines):
+ results_.append(r)
+ results = results_
+
+ # organize results into conflicts
+ folding = co.OrderedDict()
+ for r in results:
+ name = tuple(getattr(r, k) for k in by)
+ if name not in folding:
+ folding[name] = []
+ folding[name].append(r)
+
+ # merge conflicts
+ folded = []
+ for name, rs in folding.items():
+ folded.append(sum(rs[1:], start=rs[0]))
+
+ return folded
+
+def table(Result, results, diff_results=None, *,
+ by=None,
+ fields=None,
+ sort=None,
+ summary=False,
+ all=False,
+ percent=False,
+ **_):
+ all_, all = all, __builtins__.all
+
+ if by is None:
+ by = Result._by
+ if fields is None:
+ fields = Result._fields
+ types = Result._types
+
+ # fold again
+ results = fold(Result, results, by=by)
+ if diff_results is not None:
+ diff_results = fold(Result, diff_results, by=by)
+
+ # organize by name
+ table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in results}
+ diff_table = {
+ ','.join(str(getattr(r, k) or '') for k in by): r
+ for r in diff_results or []}
+ names = list(table.keys() | diff_table.keys())
+
+ # sort again, now with diff info, note that python's sort is stable
+ names.sort()
+ if diff_results is not None:
+ names.sort(key=lambda n: tuple(
+ types[k].ratio(
+ getattr(table.get(n), k, None),
+ getattr(diff_table.get(n), k, None))
+ for k in fields),
+ reverse=True)
+ if sort:
+ for k, reverse in reversed(sort):
+ names.sort(
+ key=lambda n: tuple(
+ (getattr(table[n], k),)
+ if getattr(table.get(n), k, None) is not None else ()
+ for k in ([k] if k else [
+ k for k in Result._sort if k in fields])),
+ reverse=reverse ^ (not k or k in Result._fields))
+
+
+ # build up our lines
+ lines = []
+
+ # header
+ header = []
+ header.append('%s%s' % (
+ ','.join(by),
+ ' (%d added, %d removed)' % (
+ sum(1 for n in table if n not in diff_table),
+ sum(1 for n in diff_table if n not in table))
+ if diff_results is not None and not percent else '')
+ if not summary else '')
+ if diff_results is None:
+ for k in fields:
+ header.append(k)
+ elif percent:
+ for k in fields:
+ header.append(k)
+ else:
+ for k in fields:
+ header.append('o'+k)
+ for k in fields:
+ header.append('n'+k)
+ for k in fields:
+ header.append('d'+k)
+ header.append('')
+ lines.append(header)
+
+ def table_entry(name, r, diff_r=None, ratios=[]):
+ entry = []
+ entry.append(name)
+ if diff_results is None:
+ for k in fields:
+ entry.append(getattr(r, k).table()
+ if getattr(r, k, None) is not None
+ else types[k].none)
+ elif percent:
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ else:
+ for k in fields:
+ entry.append(getattr(diff_r, k).diff_table()
+ if getattr(diff_r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(getattr(r, k).diff_table()
+ if getattr(r, k, None) is not None
+ else types[k].diff_none)
+ for k in fields:
+ entry.append(types[k].diff_diff(
+ getattr(r, k, None),
+ getattr(diff_r, k, None)))
+ if diff_results is None:
+ entry.append('')
+ elif percent:
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios))
else:
- return open(path, mode)
+ entry.append(' (%s)' % ', '.join(
+ '+∞%' if t == +m.inf
+ else '-∞%' if t == -m.inf
+ else '%+.1f%%' % (100*t)
+ for t in ratios
+ if t)
+ if any(ratios) else '')
+ return entry
+
+ # entries
+ if not summary:
+ for name in names:
+ r = table.get(name)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = diff_table.get(name)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ if not all_ and not any(ratios):
+ continue
+ lines.append(table_entry(name, r, diff_r, ratios))
+
+ # total
+ r = next(iter(fold(Result, results, by=[])), None)
+ if diff_results is None:
+ diff_r = None
+ ratios = None
+ else:
+ diff_r = next(iter(fold(Result, diff_results, by=[])), None)
+ ratios = [
+ types[k].ratio(
+ getattr(r, k, None),
+ getattr(diff_r, k, None))
+ for k in fields]
+ lines.append(table_entry('TOTAL', r, diff_r, ratios))
+
+ # find the best widths, note that column 0 contains the names and column -1
+ # the ratios, so those are handled a bit differently
+ widths = [
+ ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
+ for w, i in zip(
+ it.chain([23], it.repeat(7)),
+ range(len(lines[0])-1))]
- # find results
- results = co.defaultdict(lambda: {})
- for path in args.get('csv_paths', '-'):
+ # print our table
+ for line in lines:
+ print('%-*s %s%s' % (
+ widths[0], line[0],
+ ' '.join('%*s' % (w, x)
+ for w, x in zip(widths[1:], line[1:-1])),
+ line[-1]))
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def main(csv_paths, *,
+ by=None,
+ fields=None,
+ defines=None,
+ sort=None,
+ **args):
+ # separate out renames
+ renames = list(it.chain.from_iterable(
+ ((k, v) for v in vs)
+ for k, vs in it.chain(by or [], fields or [])))
+ if by is not None:
+ by = [k for k, _ in by]
+ if fields is not None:
+ fields = [k for k, _ in fields]
+
+ # figure out types
+ types = {}
+ for t in TYPES.keys():
+ for k in args.get(t, []):
+ if k in types:
+ print("error: conflicting type for field %r?" % k)
+ sys.exit(-1)
+ types[k] = TYPES[t]
+ # rename types?
+ if renames:
+ types_ = {}
+ for new_k, old_k in renames:
+ if old_k in types:
+ types_[new_k] = types[old_k]
+ types.update(types_)
+
+ # figure out merge operations
+ ops = {}
+ for o in OPS.keys():
+ for k in args.get(o, []):
+ if k in ops:
+ print("error: conflicting op for field %r?" % k)
+ sys.exit(-1)
+ ops[k] = OPS[o]
+ # rename ops?
+ if renames:
+ ops_ = {}
+ for new_k, old_k in renames:
+ if old_k in ops:
+ ops_[new_k] = ops[old_k]
+ ops.update(ops_)
+
+ # find CSV files
+ results = []
+ for path in csv_paths:
try:
with openio(path) as f:
- r = csv.DictReader(f)
- for result in r:
- file = result.pop('file', '')
- name = result.pop('name', '')
- prev = results[(file, name)]
- for field in FIELDS:
- try:
- r = field.parse(result)
- if field.name in prev:
- results[(file, name)][field.name] = field.acc(
- [prev[field.name], r])
- else:
- results[(file, name)][field.name] = r
- except (KeyError, ValueError):
- pass
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ # rename fields?
+ if renames:
+ # make a copy so renames can overlap
+ r_ = {}
+ for new_k, old_k in renames:
+ if old_k in r:
+ r_[new_k] = r[old_k]
+ r.update(r_)
+
+ results.append(r)
except FileNotFoundError:
pass
- # find fields
- if args.get('all_fields'):
- fields = FIELDS
- elif args.get('fields') is not None:
- fields_dict = {field.name: field for field in FIELDS}
- fields = [fields_dict[f] for f in args['fields']]
- else:
- fields = []
- for field in FIELDS:
- if any(field.name in result for result in results.values()):
- fields.append(field)
-
- # find total for every field
- total = {}
- for result in results.values():
- for field in fields:
- if field.name in result and field.name in total:
- total[field.name] = field.acc(
- [total[field.name], result[field.name]])
- elif field.name in result:
- total[field.name] = result[field.name]
+ # homogenize
+ Result = infer(results,
+ by=by,
+ fields=fields,
+ types=types,
+ ops=ops,
+ renames=renames)
+ results_ = []
+ for r in results:
+ if not any(k in r and r[k].strip()
+ for k in Result._fields):
+ continue
+ try:
+ results_.append(Result(**{
+ k: r[k] for k in Result._by + Result._fields
+ if k in r and r[k].strip()}))
+ except TypeError:
+ pass
+ results = results_
+
+ # fold
+ results = fold(Result, results, by=by, defines=defines)
+
+ # sort, note that python's sort is stable
+ results.sort()
+ if sort:
+ for k, reverse in reversed(sort):
+ results.sort(
+ key=lambda r: tuple(
+ (getattr(r, k),) if getattr(r, k) is not None else ()
+ for k in ([k] if k else Result._sort)),
+ reverse=reverse ^ (not k or k in Result._fields))
+
+ # write results to CSV
+ if args.get('output'):
+ with openio(args['output'], 'w') as f:
+ writer = csv.DictWriter(f, Result._by + Result._fields)
+ writer.writeheader()
+ for r in results:
+ # note we need to go through getattr to resolve lazy fields
+ writer.writerow({
+ k: getattr(r, k) for k in Result._by + Result._fields})
# find previous results?
if args.get('diff'):
- prev_results = co.defaultdict(lambda: {})
+ diff_results = []
try:
with openio(args['diff']) as f:
- r = csv.DictReader(f)
- for result in r:
- file = result.pop('file', '')
- name = result.pop('name', '')
- prev = prev_results[(file, name)]
- for field in FIELDS:
- try:
- r = field.parse(result)
- if field.name in prev:
- prev_results[(file, name)][field.name] = field.acc(
- [prev[field.name], r])
- else:
- prev_results[(file, name)][field.name] = r
- except (KeyError, ValueError):
- pass
+ reader = csv.DictReader(f, restval='')
+ for r in reader:
+ # rename fields?
+ if renames:
+ # make a copy so renames can overlap
+ r_ = {}
+ for new_k, old_k in renames:
+ if old_k in r:
+ r_[new_k] = r[old_k]
+ r.update(r_)
+
+ if not any(k in r and r[k].strip()
+ for k in Result._fields):
+ continue
+ try:
+ diff_results.append(Result(**{
+ k: r[k] for k in Result._by + Result._fields
+ if k in r and r[k].strip()}))
+ except TypeError:
+ pass
except FileNotFoundError:
pass
- prev_total = {}
- for result in prev_results.values():
- for field in fields:
- if field.name in result and field.name in prev_total:
- prev_total[field.name] = field.acc(
- [prev_total[field.name], result[field.name]])
- elif field.name in result:
- prev_total[field.name] = result[field.name]
-
- # print results
- def dedup_entries(results, by='name'):
- entries = co.defaultdict(lambda: {})
- for (file, func), result in results.items():
- entry = (file if by == 'file' else func)
- prev = entries[entry]
- for field in fields:
- if field.name in result and field.name in prev:
- entries[entry][field.name] = field.acc(
- [prev[field.name], result[field.name]])
- elif field.name in result:
- entries[entry][field.name] = result[field.name]
- return entries
-
- def sorted_entries(entries):
- if args.get('sort') is not None:
- field = {field.name: field for field in FIELDS}[args['sort']]
- return sorted(entries, key=lambda x: (
- -(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
- elif args.get('reverse_sort') is not None:
- field = {field.name: field for field in FIELDS}[args['reverse_sort']]
- return sorted(entries, key=lambda x: (
- +(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
- else:
- return sorted(entries)
-
- def print_header(by=''):
- if not args.get('diff'):
- print('%-36s' % by, end='')
- for field in fields:
- print((' '+field.fmt) % field.name, end='')
- print()
- else:
- print('%-36s' % by, end='')
- for field in fields:
- print((' '+field.fmt) % field.name, end='')
- print(' %-9s' % '', end='')
- print()
-
- def print_entry(name, result):
- print('%-36s' % name, end='')
- for field in fields:
- r = result.get(field.name)
- if r is not None:
- print((' '+field.fmt) % field.repr(r), end='')
- else:
- print((' '+field.fmt) % '-', end='')
- print()
-
- def print_diff_entry(name, old, new):
- print('%-36s' % name, end='')
- for field in fields:
- n = new.get(field.name)
- if n is not None:
- print((' '+field.fmt) % field.repr(n), end='')
- else:
- print((' '+field.fmt) % '-', end='')
- o = old.get(field.name)
- ratio = (
- 0.0 if m.isinf(o or 0) and m.isinf(n or 0)
- else +float('inf') if m.isinf(n or 0)
- else -float('inf') if m.isinf(o or 0)
- else 0.0 if not o and not n
- else +1.0 if not o
- else -1.0 if not n
- else field.ratio(o, n))
- print(' %-9s' % (
- '' if not ratio
- else '(+∞%)' if ratio > 0 and m.isinf(ratio)
- else '(-∞%)' if ratio < 0 and m.isinf(ratio)
- else '(%+.1f%%)' % (100*ratio)), end='')
- print()
-
- def print_entries(by='name'):
- entries = dedup_entries(results, by=by)
-
- if not args.get('diff'):
- print_header(by=by)
- for name, result in sorted_entries(entries.items()):
- print_entry(name, result)
- else:
- prev_entries = dedup_entries(prev_results, by=by)
- print_header(by='%s (%d added, %d removed)' % (by,
- sum(1 for name in entries if name not in prev_entries),
- sum(1 for name in prev_entries if name not in entries)))
- for name, result in sorted_entries(entries.items()):
- if args.get('all') or result != prev_entries.get(name, {}):
- print_diff_entry(name, prev_entries.get(name, {}), result)
-
- def print_totals():
- if not args.get('diff'):
- print_entry('TOTAL', total)
- else:
- print_diff_entry('TOTAL', prev_total, total)
-
- if args.get('summary'):
- print_header()
- print_totals()
- elif args.get('files'):
- print_entries(by='file')
- print_totals()
- else:
- print_entries(by='name')
- print_totals()
+ # fold
+ diff_results = fold(Result, diff_results, by=by, defines=defines)
+
+ # print table
+ if not args.get('quiet'):
+ table(Result, results,
+ diff_results if args.get('diff') else None,
+ by=by,
+ fields=fields,
+ sort=sort,
+ **args)
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
- description="Summarize measurements")
- parser.add_argument('csv_paths', nargs='*', default='-',
- help="Description of where to find *.csv files. May be a directory \
- or list of paths. *.csv files will be merged to show the total \
- coverage.")
- parser.add_argument('-d', '--diff',
+ description="Summarize measurements in CSV files.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'csv_paths',
+ nargs='*',
+ help="Input *.csv files.")
+ parser.add_argument(
+ '-q', '--quiet',
+ action='store_true',
+ help="Don't show anything, useful with -o.")
+ parser.add_argument(
+ '-o', '--output',
+ help="Specify CSV file to store results.")
+ parser.add_argument(
+ '-d', '--diff',
help="Specify CSV file to diff against.")
- parser.add_argument('-a', '--all', action='store_true',
- help="Show all objects, not just the ones that changed.")
- parser.add_argument('-e', '--all-fields', action='store_true',
- help="Show all fields, even those with no results.")
- parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x),
- help="Comma separated list of fields to print, by default all fields \
- that are found in the CSV files are printed.")
- parser.add_argument('-s', '--sort',
+ parser.add_argument(
+ '-a', '--all',
+ action='store_true',
+ help="Show all, not just the ones that changed.")
+ parser.add_argument(
+ '-p', '--percent',
+ action='store_true',
+ help="Only show percentage change, not a full diff.")
+ parser.add_argument(
+ '-b', '--by',
+ action='append',
+ type=lambda x: (
+ lambda k,v=None: (k, v.split(',') if v is not None else ())
+ )(*x.split('=', 1)),
+ help="Group by this field. Can rename fields with new_name=old_name.")
+ parser.add_argument(
+ '-f', '--field',
+ dest='fields',
+ action='append',
+ type=lambda x: (
+ lambda k,v=None: (k, v.split(',') if v is not None else ())
+ )(*x.split('=', 1)),
+ help="Show this field. Can rename fields with new_name=old_name.")
+ parser.add_argument(
+ '-D', '--define',
+ dest='defines',
+ action='append',
+ type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
+ help="Only include results where this field is this value. May include "
+ "comma-separated options.")
+ class AppendSort(argparse.Action):
+ def __call__(self, parser, namespace, value, option):
+ if namespace.sort is None:
+ namespace.sort = []
+ namespace.sort.append((value, True if option == '-S' else False))
+ parser.add_argument(
+ '-s', '--sort',
+ nargs='?',
+ action=AppendSort,
help="Sort by this field.")
- parser.add_argument('-S', '--reverse-sort',
+ parser.add_argument(
+ '-S', '--reverse-sort',
+ nargs='?',
+ action=AppendSort,
help="Sort by this field, but backwards.")
- parser.add_argument('-F', '--files', action='store_true',
- help="Show file-level calls.")
- parser.add_argument('-Y', '--summary', action='store_true',
- help="Only show the totals.")
- sys.exit(main(**vars(parser.parse_args())))
+ parser.add_argument(
+ '-Y', '--summary',
+ action='store_true',
+ help="Only show the total.")
+ parser.add_argument(
+ '--int',
+ action='append',
+ help="Treat these fields as ints.")
+ parser.add_argument(
+ '--float',
+ action='append',
+ help="Treat these fields as floats.")
+ parser.add_argument(
+ '--frac',
+ action='append',
+ help="Treat these fields as fractions.")
+ parser.add_argument(
+ '--sum',
+ action='append',
+ help="Add these fields (the default).")
+ parser.add_argument(
+ '--prod',
+ action='append',
+ help="Multiply these fields.")
+ parser.add_argument(
+ '--min',
+ action='append',
+ help="Take the minimum of these fields.")
+ parser.add_argument(
+ '--max',
+ action='append',
+ help="Take the maximum of these fields.")
+ parser.add_argument(
+ '--mean',
+ action='append',
+ help="Average these fields.")
+ parser.add_argument(
+ '--stddev',
+ action='append',
+ help="Find the standard deviation of these fields.")
+ parser.add_argument(
+ '--gmean',
+ action='append',
+ help="Find the geometric mean of these fields.")
+ parser.add_argument(
+ '--gstddev',
+ action='append',
+ help="Find the geometric standard deviation of these fields.")
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/tailpipe.py b/scripts/tailpipe.py
new file mode 100755
index 00000000..802f74d4
--- /dev/null
+++ b/scripts/tailpipe.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+#
+# Efficiently displays the last n lines of a file/pipe.
+#
+# Example:
+# ./scripts/tailpipe.py trace -n5
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import collections as co
+import io
+import os
+import select
+import shutil
+import sys
+import threading as th
+import time
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+class LinesIO:
+ def __init__(self, maxlen=None):
+ self.maxlen = maxlen
+ self.lines = co.deque(maxlen=maxlen)
+ self.tail = io.StringIO()
+
+ # trigger automatic sizing
+ if maxlen == 0:
+ self.resize(0)
+
+ def write(self, s):
+ # note using split here ensures the trailing string has no newline
+ lines = s.split('\n')
+
+ if len(lines) > 1 and self.tail.getvalue():
+ self.tail.write(lines[0])
+ lines[0] = self.tail.getvalue()
+ self.tail = io.StringIO()
+
+ self.lines.extend(lines[:-1])
+
+ if lines[-1]:
+ self.tail.write(lines[-1])
+
+ def resize(self, maxlen):
+ self.maxlen = maxlen
+ if maxlen == 0:
+ maxlen = shutil.get_terminal_size((80, 5))[1]
+ if maxlen != self.lines.maxlen:
+ self.lines = co.deque(self.lines, maxlen=maxlen)
+
+ canvas_lines = 1
+ def draw(self):
+ # did terminal size change?
+ if self.maxlen == 0:
+ self.resize(0)
+
+ # first thing first, give ourself a canvas
+ while LinesIO.canvas_lines < len(self.lines):
+ sys.stdout.write('\n')
+ LinesIO.canvas_lines += 1
+
+ # clear the bottom of the canvas if we shrink
+ shrink = LinesIO.canvas_lines - len(self.lines)
+ if shrink > 0:
+ for i in range(shrink):
+ sys.stdout.write('\r')
+ if shrink-1-i > 0:
+ sys.stdout.write('\x1b[%dA' % (shrink-1-i))
+ sys.stdout.write('\x1b[K')
+ if shrink-1-i > 0:
+ sys.stdout.write('\x1b[%dB' % (shrink-1-i))
+ sys.stdout.write('\x1b[%dA' % shrink)
+ LinesIO.canvas_lines = len(self.lines)
+
+ for i, line in enumerate(self.lines):
+ # move cursor, clear line, disable/reenable line wrapping
+ sys.stdout.write('\r')
+ if len(self.lines)-1-i > 0:
+ sys.stdout.write('\x1b[%dA' % (len(self.lines)-1-i))
+ sys.stdout.write('\x1b[K')
+ sys.stdout.write('\x1b[?7l')
+ sys.stdout.write(line)
+ sys.stdout.write('\x1b[?7h')
+ if len(self.lines)-1-i > 0:
+ sys.stdout.write('\x1b[%dB' % (len(self.lines)-1-i))
+ sys.stdout.flush()
+
+
+def main(path='-', *, lines=5, cat=False, sleep=None, keep_open=False):
+ if cat:
+ ring = sys.stdout
+ else:
+ ring = LinesIO(lines)
+
+ # if sleep print in background thread to avoid getting stuck in a read call
+ event = th.Event()
+ lock = th.Lock()
+ if not cat:
+ done = False
+ def background():
+ while not done:
+ event.wait()
+ event.clear()
+ with lock:
+ ring.draw()
+ time.sleep(sleep or 0.01)
+ th.Thread(target=background, daemon=True).start()
+
+ try:
+ while True:
+ with openio(path) as f:
+ for line in f:
+ with lock:
+ ring.write(line)
+ event.set()
+
+ if not keep_open:
+ break
+ # don't just flood open calls
+ time.sleep(sleep or 0.1)
+ except FileNotFoundError as e:
+ print("error: file not found %r" % path)
+ sys.exit(-1)
+ except KeyboardInterrupt:
+ pass
+
+ if not cat:
+ done = True
+ lock.acquire() # avoids https://bugs.python.org/issue42717
+ sys.stdout.write('\n')
+
+
+if __name__ == "__main__":
+ import sys
+ import argparse
+ parser = argparse.ArgumentParser(
+ description="Efficiently displays the last n lines of a file/pipe.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'path',
+ nargs='?',
+ help="Path to read from.")
+ parser.add_argument(
+ '-n', '--lines',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Show this many lines of history. 0 uses the terminal height. "
+ "Defaults to 5.")
+ parser.add_argument(
+ '-z', '--cat',
+ action='store_true',
+ help="Pipe directly to stdout.")
+ parser.add_argument(
+ '-s', '--sleep',
+ type=float,
+ help="Seconds to sleep between reads. Defaults to 0.01.")
+ parser.add_argument(
+ '-k', '--keep-open',
+ action='store_true',
+ help="Reopen the pipe on EOF, useful when multiple "
+ "processes are writing.")
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/teepipe.py b/scripts/teepipe.py
new file mode 100755
index 00000000..ee32e44b
--- /dev/null
+++ b/scripts/teepipe.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+#
+# tee, but for pipes
+#
+# Example:
+# ./scripts/tee.py in_pipe out_pipe1 out_pipe2
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import os
+import io
+import time
+import sys
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def main(in_path, out_paths, *, keep_open=False):
+ out_pipes = [openio(p, 'wb', 0) for p in out_paths]
+ try:
+ with openio(in_path, 'rb', 0) as f:
+ while True:
+ buf = f.read(io.DEFAULT_BUFFER_SIZE)
+ if not buf:
+ if not keep_open:
+ break
+ # don't just flood reads
+ time.sleep(0.1)
+ continue
+
+ for p in out_pipes:
+ try:
+ p.write(buf)
+ except BrokenPipeError:
+ pass
+ except FileNotFoundError as e:
+ print("error: file not found %r" % in_path)
+ sys.exit(-1)
+ except KeyboardInterrupt:
+ pass
+
+
+if __name__ == "__main__":
+ import sys
+ import argparse
+ parser = argparse.ArgumentParser(
+ description="tee, but for pipes.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'in_path',
+ help="Path to read from.")
+ parser.add_argument(
+ 'out_paths',
+ nargs='+',
+ help="Path to write to.")
+ parser.add_argument(
+ '-k', '--keep-open',
+ action='store_true',
+ help="Reopen the pipe on EOF, useful when multiple "
+ "processes are writing.")
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/test.py b/scripts/test.py
index c8196b36..6e8a201c 100755
--- a/scripts/test.py
+++ b/scripts/test.py
@@ -1,860 +1,1484 @@
#!/usr/bin/env python3
-
-# This script manages littlefs tests, which are configured with
-# .toml files stored in the tests directory.
+#
+# Script to compile and runs tests.
+#
+# Example:
+# ./scripts/test.py runners/test_runner -b
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
#
-import toml
+import collections as co
+import csv
+import errno
import glob
-import re
-import os
-import io
import itertools as it
-import collections.abc as abc
-import subprocess as sp
-import base64
-import sys
-import copy
-import shlex
+import math as m
+import os
import pty
-import errno
+import re
+import shlex
+import shutil
import signal
+import subprocess as sp
+import threading as th
+import time
+import toml
-TEST_PATHS = 'tests'
-RULES = """
-# add block devices to sources
-TESTSRC ?= $(SRC) $(wildcard bd/*.c)
-
-define FLATTEN
-%(path)s%%$(subst /,.,$(target)): $(target)
- ./scripts/explode_asserts.py $$< -o $$@
-endef
-$(foreach target,$(TESTSRC),$(eval $(FLATTEN)))
-
--include %(path)s*.d
-.SECONDARY:
-
-%(path)s.test: %(path)s.test.o \\
- $(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t)
- $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
-
-# needed in case builddir is different
-%(path)s%%.o: %(path)s%%.c
- $(CC) -c -MMD $(CFLAGS) $< -o $@
-"""
-COVERAGE_RULES = """
-%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage
-
-# delete lingering coverage
-%(path)s.test: | %(path)s.info.clean
-.PHONY: %(path)s.info.clean
-%(path)s.info.clean:
- rm -f %(path)s*.gcda
-
-# accumulate coverage info
-.PHONY: %(path)s.info
-%(path)s.info:
- $(strip $(LCOV) -c \\
- $(addprefix -d ,$(wildcard %(path)s*.gcda)) \\
- --rc 'geninfo_adjust_src_path=$(shell pwd)' \\
- -o $@)
- $(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@
-ifdef COVERAGETARGET
- $(strip $(LCOV) -a $@ \\
- $(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\
- -o $(COVERAGETARGET))
-endif
-"""
-GLOBALS = """
-//////////////// AUTOGENERATED TEST ////////////////
-#include "lfs.h"
-#include "bd/lfs_testbd.h"
-#include
-extern const char *lfs_testbd_path;
-extern uint32_t lfs_testbd_cycles;
-"""
-DEFINES = {
- 'LFS_READ_SIZE': 16,
- 'LFS_PROG_SIZE': 'LFS_READ_SIZE',
- 'LFS_BLOCK_SIZE': 512,
- 'LFS_BLOCK_COUNT': 1024,
- 'LFS_BLOCK_CYCLES': -1,
- 'LFS_CACHE_SIZE': '(64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)',
- 'LFS_LOOKAHEAD_SIZE': 16,
- 'LFS_ERASE_VALUE': 0xff,
- 'LFS_ERASE_CYCLES': 0,
- 'LFS_BADBLOCK_BEHAVIOR': 'LFS_TESTBD_BADBLOCK_PROGERROR',
-}
-PROLOGUE = """
- // prologue
- __attribute__((unused)) lfs_t lfs;
- __attribute__((unused)) lfs_testbd_t bd;
- __attribute__((unused)) lfs_file_t file;
- __attribute__((unused)) lfs_dir_t dir;
- __attribute__((unused)) struct lfs_info info;
- __attribute__((unused)) char path[1024];
- __attribute__((unused)) uint8_t buffer[(1024 > LFS_BLOCK_SIZE * 4) ? (1024) : (LFS_BLOCK_SIZE * 4)];
- __attribute__((unused)) lfs_size_t size;
- __attribute__((unused)) int err;
-
- __attribute__((unused)) const struct lfs_config cfg = {
- .context = &bd,
- .read = lfs_testbd_read,
- .prog = lfs_testbd_prog,
- .erase = lfs_testbd_erase,
- .sync = lfs_testbd_sync,
- .read_size = LFS_READ_SIZE,
- .prog_size = LFS_PROG_SIZE,
- .block_size = LFS_BLOCK_SIZE,
- .block_count = LFS_BLOCK_COUNT,
- .block_cycles = LFS_BLOCK_CYCLES,
- .cache_size = LFS_CACHE_SIZE,
- .lookahead_size = LFS_LOOKAHEAD_SIZE,
- };
-
- __attribute__((unused)) const struct lfs_testbd_config bdcfg = {
- .erase_value = LFS_ERASE_VALUE,
- .erase_cycles = LFS_ERASE_CYCLES,
- .badblock_behavior = LFS_BADBLOCK_BEHAVIOR,
- .power_cycles = lfs_testbd_cycles,
- };
-
- lfs_testbd_createcfg(&cfg, lfs_testbd_path, &bdcfg) => 0;
-"""
-EPILOGUE = """
- // epilogue
- lfs_testbd_destroy(&cfg) => 0;
-"""
-PASS = '\033[32m✓\033[0m'
-FAIL = '\033[31m✗\033[0m'
-
-class TestFailure(Exception):
- def __init__(self, case, returncode=None, stdout=None, assert_=None):
- self.case = case
- self.returncode = returncode
- self.stdout = stdout
- self.assert_ = assert_
-class TestCase:
- def __init__(self, config, filter=filter,
- suite=None, caseno=None, lineno=None, **_):
- self.config = config
- self.filter = filter
- self.suite = suite
- self.caseno = caseno
- self.lineno = lineno
-
- self.code = config['code']
- self.code_lineno = config['code_lineno']
- self.defines = config.get('define', {})
- self.if_ = config.get('if', None)
- self.in_ = config.get('in', None)
-
- self.result = None
-
- def __str__(self):
- if hasattr(self, 'permno'):
- if any(k not in self.case.defines for k in self.defines):
- return '%s#%d#%d (%s)' % (
- self.suite.name, self.caseno, self.permno, ', '.join(
- '%s=%s' % (k, v) for k, v in self.defines.items()
- if k not in self.case.defines))
- else:
- return '%s#%d#%d' % (
- self.suite.name, self.caseno, self.permno)
- else:
- return '%s#%d' % (
- self.suite.name, self.caseno)
-
- def permute(self, class_=None, defines={}, permno=None, **_):
- ncase = (class_ or type(self))(self.config)
- for k, v in self.__dict__.items():
- setattr(ncase, k, v)
- ncase.case = self
- ncase.perms = [ncase]
- ncase.permno = permno
- ncase.defines = defines
- return ncase
-
- def build(self, f, **_):
- # prologue
- for k, v in sorted(self.defines.items()):
- if k not in self.suite.defines:
- f.write('#define %s %s\n' % (k, v))
-
- f.write('void test_case%d(%s) {' % (self.caseno, ','.join(
- '\n'+8*' '+'__attribute__((unused)) intmax_t %s' % k
- for k in sorted(self.perms[0].defines)
- if k not in self.defines)))
-
- f.write(PROLOGUE)
- f.write('\n')
- f.write(4*' '+'// test case %d\n' % self.caseno)
- f.write(4*' '+'#line %d "%s"\n' % (self.code_lineno, self.suite.path))
-
- # test case goes here
- f.write(self.code)
-
- # epilogue
- f.write(EPILOGUE)
- f.write('}\n')
-
- for k, v in sorted(self.defines.items()):
- if k not in self.suite.defines:
- f.write('#undef %s\n' % k)
-
- def shouldtest(self, **args):
- if (self.filter is not None and
- len(self.filter) >= 1 and
- self.filter[0] != self.caseno):
- return False
- elif (self.filter is not None and
- len(self.filter) >= 2 and
- self.filter[1] != self.permno):
- return False
- elif args.get('no_internal') and self.in_ is not None:
- return False
- elif self.if_ is not None:
- if_ = self.if_
- while True:
- for k, v in sorted(self.defines.items(),
- key=lambda x: len(x[0]), reverse=True):
- if k in if_:
- if_ = if_.replace(k, '(%s)' % v)
- break
- else:
- break
- if_ = (
- re.sub('(\&\&|\?)', ' and ',
- re.sub('(\|\||:)', ' or ',
- re.sub('!(?!=)', ' not ', if_))))
- return eval(if_)
- else:
- return True
-
- def test(self, exec=[], persist=False, cycles=None,
- gdb=False, failure=None, disk=None, **args):
- # build command
- cmd = exec + ['./%s.test' % self.suite.path,
- repr(self.caseno), repr(self.permno)]
-
- # persist disk or keep in RAM for speed?
- if persist:
- if not disk:
- disk = self.suite.path + '.disk'
- if persist != 'noerase':
- try:
- with open(disk, 'w') as f:
- f.truncate(0)
- if args.get('verbose'):
- print('truncate --size=0', disk)
- except FileNotFoundError:
- pass
-
- cmd.append(disk)
-
- # simulate power-loss after n cycles?
- if cycles:
- cmd.append(str(cycles))
-
- # failed? drop into debugger?
- if gdb and failure:
- ncmd = ['gdb']
- if gdb == 'assert':
- ncmd.extend(['-ex', 'r'])
- if failure.assert_:
- ncmd.extend(['-ex', 'up 2'])
- elif gdb == 'main':
- ncmd.extend([
- '-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
- '-ex', 'r'])
- ncmd.extend(['--args'] + cmd)
-
- if args.get('verbose'):
- print(' '.join(shlex.quote(c) for c in ncmd))
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- sys.exit(sp.call(ncmd))
-
- # run test case!
- mpty, spty = pty.openpty()
- if args.get('verbose'):
- print(' '.join(shlex.quote(c) for c in cmd))
- proc = sp.Popen(cmd, stdout=spty, stderr=spty)
- os.close(spty)
- mpty = os.fdopen(mpty, 'r', 1)
- stdout = []
- assert_ = None
- try:
- while True:
- try:
- line = mpty.readline()
- except OSError as e:
- if e.errno == errno.EIO:
- break
- raise
- if not line:
- break;
- stdout.append(line)
- if args.get('verbose'):
- sys.stdout.write(line)
- # intercept asserts
- m = re.match(
- '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
- .format('(?:\033\[[\d;]*.| )*', 'assert'),
- line)
- if m and assert_ is None:
- try:
- with open(m.group(1)) as f:
- lineno = int(m.group(2))
- line = (next(it.islice(f, lineno-1, None))
- .strip('\n'))
- assert_ = {
- 'path': m.group(1),
- 'line': line,
- 'lineno': lineno,
- 'message': m.group(3)}
- except:
- pass
- except KeyboardInterrupt:
- raise TestFailure(self, 1, stdout, None)
- proc.wait()
+RUNNER_PATH = './runners/test_runner'
+HEADER_PATH = 'runners/test_runner.h'
- # did we pass?
- if proc.returncode != 0:
- raise TestFailure(self, proc.returncode, stdout, assert_)
- else:
- return PASS
+GDB_PATH = ['gdb']
+VALGRIND_PATH = ['valgrind']
+PERF_SCRIPT = ['./scripts/perf.py']
-class ValgrindTestCase(TestCase):
- def __init__(self, config, **args):
- self.leaky = config.get('leaky', False)
- super().__init__(config, **args)
- def shouldtest(self, **args):
- return not self.leaky and super().shouldtest(**args)
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
- def test(self, exec=[], **args):
- verbose = args.get('verbose')
- uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
- exec = [
- 'valgrind',
- '--leak-check=full',
- ] + (['--undef-value-errors=no'] if uninit else []) + [
- ] + (['--track-origins=yes'] if not uninit else []) + [
- '--error-exitcode=4',
- '--error-limit=no',
- ] + (['--num-callers=1'] if not verbose else []) + [
- '-q'] + exec
- return super().test(exec=exec, **args)
-
-class ReentrantTestCase(TestCase):
- def __init__(self, config, **args):
- self.reentrant = config.get('reentrant', False)
- super().__init__(config, **args)
-
- def shouldtest(self, **args):
- return self.reentrant and super().shouldtest(**args)
-
- def test(self, persist=False, gdb=False, failure=None, **args):
- for cycles in it.count(1):
- # clear disk first?
- if cycles == 1 and persist != 'noerase':
- persist = 'erase'
+class TestCase:
+ # create a TestCase object from a config
+ def __init__(self, config, args={}):
+ self.name = config.pop('name')
+ self.path = config.pop('path')
+ self.suite = config.pop('suite')
+ self.lineno = config.pop('lineno', None)
+ self.if_ = config.pop('if', None)
+ if isinstance(self.if_, bool):
+ self.if_ = 'true' if self.if_ else 'false'
+ self.code = config.pop('code')
+ self.code_lineno = config.pop('code_lineno', None)
+ self.in_ = config.pop('in',
+ config.pop('suite_in', None))
+
+ self.reentrant = config.pop('reentrant',
+ config.pop('suite_reentrant', False))
+
+ # figure out defines and build possible permutations
+ self.defines = set()
+ self.permutations = []
+
+ # defines can be a dict or a list or dicts
+ suite_defines = config.pop('suite_defines', {})
+ if not isinstance(suite_defines, list):
+ suite_defines = [suite_defines]
+ defines = config.pop('defines', {})
+ if not isinstance(defines, list):
+ defines = [defines]
+
+ def csplit(v):
+ # split commas but only outside of parens
+ parens = 0
+ i_ = 0
+ for i in range(len(v)):
+ if v[i] == ',' and parens == 0:
+ yield v[i_:i]
+ i_ = i+1
+ elif v[i] in '([{':
+ parens += 1
+ elif v[i] in '}])':
+ parens -= 1
+ if v[i_:].strip():
+ yield v[i_:]
+
+ def parse_define(v):
+ # a define entry can be a list
+ if isinstance(v, list):
+ for v_ in v:
+ yield from parse_define(v_)
+ # or a string
+ elif isinstance(v, str):
+ # which can be comma-separated values, with optional
+ # range statements. This matches the runtime define parser in
+ # the runner itself.
+ for v_ in csplit(v):
+ m = re.search(r'\brange\b\s*\('
+ '(?P[^,\s]*)'
+ '\s*(?:,\s*(?P[^,\s]*)'
+ '\s*(?:,\s*(?P[^,\s]*)\s*)?)?\)',
+ v_)
+ if m:
+ start = (int(m.group('start'), 0)
+ if m.group('start') else 0)
+ stop = (int(m.group('stop'), 0)
+ if m.group('stop') else None)
+ step = (int(m.group('step'), 0)
+ if m.group('step') else 1)
+ if m.lastindex <= 1:
+ start, stop = 0, start
+ for x in range(start, stop, step):
+ yield from parse_define('%s(%d)%s' % (
+ v_[:m.start()], x, v_[m.end():]))
+ else:
+ yield v_
+ # or a literal value
+ elif isinstance(v, bool):
+ yield 'true' if v else 'false'
else:
- persist = 'noerase'
+ yield v
+
+ # build possible permutations
+ for suite_defines_ in suite_defines:
+ self.defines |= suite_defines_.keys()
+ for defines_ in defines:
+ self.defines |= defines_.keys()
+ self.permutations.extend(dict(perm) for perm in it.product(*(
+ [(k, v) for v in parse_define(vs)]
+ for k, vs in sorted((suite_defines_ | defines_).items()))))
+
+ for k in config.keys():
+ print('%swarning:%s in %s, found unused key %r' % (
+ '\x1b[01;33m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ self.name,
+ k),
+ file=sys.stderr)
- # exact cycle we should drop into debugger?
- if gdb and failure and failure.cycleno == cycles:
- return super().test(gdb=gdb, persist=persist, cycles=cycles,
- failure=failure, **args)
-
- # run tests, but kill the program after prog/erase has
- # been hit n cycles. We exit with a special return code if the
- # program has not finished, since this isn't a test failure.
- try:
- return super().test(persist=persist, cycles=cycles, **args)
- except TestFailure as nfailure:
- if nfailure.returncode == 33:
- continue
- else:
- nfailure.cycleno = cycles
- raise
class TestSuite:
- def __init__(self, path, classes=[TestCase], defines={},
- filter=None, **args):
+ # create a TestSuite object from a toml file
+ def __init__(self, path, args={}):
+ self.path = path
self.name = os.path.basename(path)
if self.name.endswith('.toml'):
self.name = self.name[:-len('.toml')]
- if args.get('build_dir'):
- self.toml = path
- self.path = args['build_dir'] + '/' + path
- else:
- self.toml = path
- self.path = path
- self.classes = classes
- self.defines = defines.copy()
- self.filter = filter
- with open(self.toml) as f:
+ # load toml file and parse test cases
+ with open(self.path) as f:
# load tests
config = toml.load(f)
# find line numbers
f.seek(0)
- linenos = []
+ case_linenos = []
code_linenos = []
for i, line in enumerate(f):
- if re.match(r'\[\[\s*case\s*\]\]', line):
- linenos.append(i+1)
- if re.match(r'code\s*=\s*(\'\'\'|""")', line):
+ match = re.match(
+ '(?P\[\s*cases\s*\.\s*(?P\w+)\s*\])'
+ '|' '(?Pcode\s*=)',
+ line)
+ if match and match.group('case'):
+ case_linenos.append((i+1, match.group('name')))
+ elif match and match.group('code'):
code_linenos.append(i+2)
- code_linenos.reverse()
-
- # grab global config
- for k, v in config.get('define', {}).items():
- if k not in self.defines:
- self.defines[k] = v
- self.code = config.get('code', None)
- if self.code is not None:
- self.code_lineno = code_linenos.pop()
-
- # create initial test cases
- self.cases = []
- for i, (case, lineno) in enumerate(zip(config['case'], linenos)):
- # code lineno?
- if 'code' in case:
- case['code_lineno'] = code_linenos.pop()
- # merge conditions if necessary
- if 'if' in config and 'if' in case:
- case['if'] = '(%s) && (%s)' % (config['if'], case['if'])
- elif 'if' in config:
- case['if'] = config['if']
- # initialize test case
- self.cases.append(TestCase(case, filter=filter,
- suite=self, caseno=i+1, lineno=lineno, **args))
-
- def __str__(self):
- return self.name
-
- def __lt__(self, other):
- return self.name < other.name
-
- def permute(self, **args):
- for case in self.cases:
- # lets find all parameterized definitions, in one of [args.D,
- # suite.defines, case.defines, DEFINES]. Note that each of these
- # can be either a dict of defines, or a list of dicts, expressing
- # an initial set of permutations.
- pending = [{}]
- for inits in [self.defines, case.defines, DEFINES]:
- if not isinstance(inits, list):
- inits = [inits]
-
- npending = []
- for init, pinit in it.product(inits, pending):
- ninit = pinit.copy()
- for k, v in init.items():
- if k not in ninit:
- try:
- ninit[k] = eval(v)
- except:
- ninit[k] = v
- npending.append(ninit)
-
- pending = npending
-
- # expand permutations
- pending = list(reversed(pending))
- expanded = []
- while pending:
- perm = pending.pop()
- for k, v in sorted(perm.items()):
- if not isinstance(v, str) and isinstance(v, abc.Iterable):
- for nv in reversed(v):
- nperm = perm.copy()
- nperm[k] = nv
- pending.append(nperm)
- break
- else:
- expanded.append(perm)
-
- # generate permutations
- case.perms = []
- for i, (class_, defines) in enumerate(
- it.product(self.classes, expanded)):
- case.perms.append(case.permute(
- class_, defines, permno=i+1, **args))
-
- # also track non-unique defines
- case.defines = {}
- for k, v in case.perms[0].defines.items():
- if all(perm.defines[k] == v for perm in case.perms):
- case.defines[k] = v
-
- # track all perms and non-unique defines
- self.perms = []
- for case in self.cases:
- self.perms.extend(case.perms)
-
- self.defines = {}
- for k, v in self.perms[0].defines.items():
- if all(perm.defines.get(k, None) == v for perm in self.perms):
- self.defines[k] = v
-
- return self.perms
-
- def build(self, **args):
- # build test files
- tf = open(self.path + '.test.tc', 'w')
- tf.write(GLOBALS)
- if self.code is not None:
- tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
- tf.write(self.code)
-
- tfs = {None: tf}
- for case in self.cases:
- if case.in_ not in tfs:
- tfs[case.in_] = open(self.path+'.'+
- re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w')
- tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
- with open(case.in_) as f:
- for line in f:
- tfs[case.in_].write(line)
- tfs[case.in_].write('\n')
- tfs[case.in_].write(GLOBALS)
-
- tfs[case.in_].write('\n')
- case.build(tfs[case.in_], **args)
-
- tf.write('\n')
- tf.write('const char *lfs_testbd_path;\n')
- tf.write('uint32_t lfs_testbd_cycles;\n')
- tf.write('int main(int argc, char **argv) {\n')
- tf.write(4*' '+'int case_ = (argc > 1) ? atoi(argv[1]) : 0;\n')
- tf.write(4*' '+'int perm = (argc > 2) ? atoi(argv[2]) : 0;\n')
- tf.write(4*' '+'lfs_testbd_path = (argc > 3) ? argv[3] : NULL;\n')
- tf.write(4*' '+'lfs_testbd_cycles = (argc > 4) ? atoi(argv[4]) : 0;\n')
- for perm in self.perms:
- # test declaration
- tf.write(4*' '+'extern void test_case%d(%s);\n' % (
- perm.caseno, ', '.join(
- 'intmax_t %s' % k for k in sorted(perm.defines)
- if k not in perm.case.defines)))
- # test call
- tf.write(4*' '+
- 'if (argc < 3 || (case_ == %d && perm == %d)) {'
- ' test_case%d(%s); '
- '}\n' % (perm.caseno, perm.permno, perm.caseno, ', '.join(
- str(v) for k, v in sorted(perm.defines.items())
- if k not in perm.case.defines)))
- tf.write('}\n')
-
- for tf in tfs.values():
- tf.close()
-
- # write makefiles
- with open(self.path + '.mk', 'w') as mk:
- mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path))
- mk.write('\n')
-
- # add coverage hooks?
- if args.get('coverage'):
- mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict(
- path=self.path))
- mk.write('\n')
-
- # add truly global defines globally
- for k, v in sorted(self.defines.items()):
- mk.write('%s.test: override CFLAGS += -D%s=%r\n'
- % (self.path, k, v))
-
- for path in tfs:
- if path is None:
- mk.write('%s: %s | %s\n' % (
- self.path+'.test.c',
- self.toml,
- self.path+'.test.tc'))
- else:
- mk.write('%s: %s %s | %s\n' % (
- self.path+'.'+path.replace('/', '.'),
- self.toml,
- path,
- self.path+'.'+re.sub('(\.c)?$', '.tc',
- path.replace('/', '.'))))
- mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
-
- self.makefile = self.path + '.mk'
- self.target = self.path + '.test'
- return self.makefile, self.target
-
- def test(self, **args):
- # run test suite!
- if not args.get('verbose', True):
- sys.stdout.write(self.name + ' ')
- sys.stdout.flush()
- for perm in self.perms:
- if not perm.shouldtest(**args):
- continue
+ # sort in case toml parsing did not retain order
+ case_linenos.sort()
+
+ cases = config.pop('cases')
+ for (lineno, name), (nlineno, _) in it.zip_longest(
+ case_linenos, case_linenos[1:],
+ fillvalue=(float('inf'), None)):
+ code_lineno = min(
+ (l for l in code_linenos if l >= lineno and l < nlineno),
+ default=None)
+ cases[name]['lineno'] = lineno
+ cases[name]['code_lineno'] = code_lineno
+
+ self.if_ = config.pop('if', None)
+ if isinstance(self.if_, bool):
+ self.if_ = 'true' if self.if_ else 'false'
+
+ self.code = config.pop('code', None)
+ self.code_lineno = min(
+ (l for l in code_linenos
+ if not case_linenos or l < case_linenos[0][0]),
+ default=None)
+
+ # a couple of these we just forward to all cases
+ defines = config.pop('defines', {})
+ in_ = config.pop('in', None)
+ reentrant = config.pop('reentrant', False)
+
+ self.cases = []
+ for name, case in sorted(cases.items(),
+ key=lambda c: c[1].get('lineno')):
+ self.cases.append(TestCase(config={
+ 'name': name,
+ 'path': path + (':%d' % case['lineno']
+ if 'lineno' in case else ''),
+ 'suite': self.name,
+ 'suite_defines': defines,
+ 'suite_in': in_,
+ 'suite_reentrant': reentrant,
+ **case},
+ args=args))
+
+ # combine per-case defines
+ self.defines = set.union(*(
+ set(case.defines) for case in self.cases))
+
+ # combine other per-case things
+ self.reentrant = any(case.reentrant for case in self.cases)
+
+ for k in config.keys():
+ print('%swarning:%s in %s, found unused key %r' % (
+ '\x1b[01;33m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ self.name,
+ k),
+ file=sys.stderr)
+
+
+
+def compile(test_paths, **args):
+ # find .toml files
+ paths = []
+ for path in test_paths:
+ if os.path.isdir(path):
+ path = path + '/*.toml'
+
+ for path in glob.glob(path):
+ paths.append(path)
+
+ if not paths:
+ print('no test suites found in %r?' % test_paths)
+ sys.exit(-1)
- try:
- result = perm.test(**args)
- except TestFailure as failure:
- perm.result = failure
- if not args.get('verbose', True):
- sys.stdout.write(FAIL)
- sys.stdout.flush()
- if not args.get('keep_going'):
- if not args.get('verbose', True):
- sys.stdout.write('\n')
- raise
- else:
- perm.result = PASS
- if not args.get('verbose', True):
- sys.stdout.write(PASS)
- sys.stdout.flush()
+ # load the suites
+ suites = [TestSuite(path, args) for path in paths]
+ suites.sort(key=lambda s: s.name)
- if not args.get('verbose', True):
- sys.stdout.write('\n')
+ # check for name conflicts, these will cause ambiguity problems later
+ # when running tests
+ seen = {}
+ for suite in suites:
+ if suite.name in seen:
+ print('%swarning:%s conflicting suite %r, %s and %s' % (
+ '\x1b[01;33m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ suite.name,
+ suite.path,
+ seen[suite.name].path),
+ file=sys.stderr)
+ seen[suite.name] = suite
+
+ for case in suite.cases:
+ # only allow conflicts if a case and its suite share a name
+ if case.name in seen and not (
+ isinstance(seen[case.name], TestSuite)
+ and seen[case.name].cases == [case]):
+ print('%swarning:%s conflicting case %r, %s and %s' % (
+ '\x1b[01;33m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ case.name,
+ case.path,
+ seen[case.name].path),
+ file=sys.stderr)
+ seen[case.name] = case
+
+ # we can only compile one test suite at a time
+ if not args.get('source'):
+ if len(suites) > 1:
+ print('more than one test suite for compilation? (%r)' % test_paths)
+ sys.exit(-1)
-def main(**args):
- # figure out explicit defines
- defines = {}
- for define in args['D']:
- k, v, *_ = define.split('=', 2) + ['']
- defines[k] = v
-
- # and what class of TestCase to run
- classes = []
- if args.get('normal'):
- classes.append(TestCase)
- if args.get('reentrant'):
- classes.append(ReentrantTestCase)
+ suite = suites[0]
+
+ # write generated test source
+ if 'output' in args:
+ with openio(args['output'], 'w') as f:
+ _write = f.write
+ def write(s):
+ f.lineno += s.count('\n')
+ _write(s)
+ def writeln(s=''):
+ f.lineno += s.count('\n') + 1
+ _write(s)
+ _write('\n')
+ f.lineno = 1
+ f.write = write
+ f.writeln = writeln
+
+ f.writeln("// Generated by %s:" % sys.argv[0])
+ f.writeln("//")
+ f.writeln("// %s" % ' '.join(sys.argv))
+ f.writeln("//")
+ f.writeln()
+
+ # include test_runner.h in every generated file
+ f.writeln("#include \"%s\"" % args['include'])
+ f.writeln()
+
+ # write out generated functions, this can end up in different
+ # files depending on the "in" attribute
+ #
+ # note it's up to the specific generated file to declare
+ # the test defines
+ def write_case_functions(f, suite, case):
+ # create case define functions
+ if case.defines:
+ # deduplicate defines by value to try to reduce the
+ # number of functions we generate
+ define_cbs = {}
+ for i, defines in enumerate(case.permutations):
+ for k, v in sorted(defines.items()):
+ if v not in define_cbs:
+ name = ('__test__%s__%s__%d'
+ % (case.name, k, i))
+ define_cbs[v] = name
+ f.writeln('intmax_t %s('
+ '__attribute__((unused)) '
+ 'void *data) {' % name)
+ f.writeln(4*' '+'return %s;' % v)
+ f.writeln('}')
+ f.writeln()
+ f.writeln('const test_define_t '
+ '__test__%s__defines[]['
+ 'TEST_IMPLICIT_DEFINE_COUNT+%d] = {'
+ % (case.name, len(suite.defines)))
+ for defines in case.permutations:
+ f.writeln(4*' '+'{')
+ for k, v in sorted(defines.items()):
+ f.writeln(8*' '+'[%-24s] = {%s, NULL},' % (
+ k+'_i', define_cbs[v]))
+ f.writeln(4*' '+'},')
+ f.writeln('};')
+ f.writeln()
+
+ # create case filter function
+ if suite.if_ is not None or case.if_ is not None:
+ f.writeln('bool __test__%s__filter(void) {'
+ % (case.name))
+ f.writeln(4*' '+'return %s;'
+ % ' && '.join('(%s)' % if_
+ for if_ in [suite.if_, case.if_]
+ if if_ is not None))
+ f.writeln('}')
+ f.writeln()
+
+ # create case run function
+ f.writeln('void __test__%s__run('
+ '__attribute__((unused)) struct lfs_config *cfg) {'
+ % (case.name))
+ f.writeln(4*' '+'// test case %s' % case.name)
+ if case.code_lineno is not None:
+ f.writeln(4*' '+'#line %d "%s"'
+ % (case.code_lineno, suite.path))
+ f.write(case.code)
+ if case.code_lineno is not None:
+ f.writeln(4*' '+'#line %d "%s"'
+ % (f.lineno+1, args['output']))
+ f.writeln('}')
+ f.writeln()
+
+ if not args.get('source'):
+ if suite.code is not None:
+ if suite.code_lineno is not None:
+ f.writeln('#line %d "%s"'
+ % (suite.code_lineno, suite.path))
+ f.write(suite.code)
+ if suite.code_lineno is not None:
+ f.writeln('#line %d "%s"'
+ % (f.lineno+1, args['output']))
+ f.writeln()
+
+ if suite.defines:
+ for i, define in enumerate(sorted(suite.defines)):
+ f.writeln('#ifndef %s' % define)
+ f.writeln('#define %-24s '
+ 'TEST_IMPLICIT_DEFINE_COUNT+%d' % (define+'_i', i))
+ f.writeln('#define %-24s '
+ 'TEST_DEFINE(%s)' % (define, define+'_i'))
+ f.writeln('#endif')
+ f.writeln()
+
+ # create case functions
+ for case in suite.cases:
+ if case.in_ is None:
+ write_case_functions(f, suite, case)
+ else:
+ if case.defines:
+ f.writeln('extern const test_define_t '
+ '__test__%s__defines[]['
+ 'TEST_IMPLICIT_DEFINE_COUNT+%d];'
+ % (case.name, len(suite.defines)))
+ if suite.if_ is not None or case.if_ is not None:
+ f.writeln('extern bool __test__%s__filter('
+ 'void);'
+ % (case.name))
+ f.writeln('extern void __test__%s__run('
+ 'struct lfs_config *cfg);'
+ % (case.name))
+ f.writeln()
+
+ # create suite struct
+ #
+ # note we place this in the custom test_suites section with
+ # minimum alignment, otherwise GCC ups the alignment to
+ # 32-bytes for some reason
+ f.writeln('__attribute__((section("_test_suites"), '
+ 'aligned(1)))')
+ f.writeln('const struct test_suite __test__%s__suite = {'
+ % suite.name)
+ f.writeln(4*' '+'.name = "%s",' % suite.name)
+ f.writeln(4*' '+'.path = "%s",' % suite.path)
+ f.writeln(4*' '+'.flags = %s,'
+ % (' | '.join(filter(None, [
+ 'TEST_REENTRANT' if suite.reentrant else None]))
+ or 0))
+ if suite.defines:
+ # create suite define names
+ f.writeln(4*' '+'.define_names = (const char *const['
+ 'TEST_IMPLICIT_DEFINE_COUNT+%d]){' % (
+ len(suite.defines)))
+ for k in sorted(suite.defines):
+ f.writeln(8*' '+'[%-24s] = "%s",' % (k+'_i', k))
+ f.writeln(4*' '+'},')
+ f.writeln(4*' '+'.define_count = '
+ 'TEST_IMPLICIT_DEFINE_COUNT+%d,' % len(suite.defines))
+ f.writeln(4*' '+'.cases = (const struct test_case[]){')
+ for case in suite.cases:
+ # create case structs
+ f.writeln(8*' '+'{')
+ f.writeln(12*' '+'.name = "%s",' % case.name)
+ f.writeln(12*' '+'.path = "%s",' % case.path)
+ f.writeln(12*' '+'.flags = %s,'
+ % (' | '.join(filter(None, [
+ 'TEST_REENTRANT' if case.reentrant else None]))
+ or 0))
+ f.writeln(12*' '+'.permutations = %d,'
+ % len(case.permutations))
+ if case.defines:
+ f.writeln(12*' '+'.defines '
+ '= (const test_define_t*)__test__%s__defines,'
+ % (case.name))
+ if suite.if_ is not None or case.if_ is not None:
+ f.writeln(12*' '+'.filter = __test__%s__filter,'
+ % (case.name))
+ f.writeln(12*' '+'.run = __test__%s__run,'
+ % (case.name))
+ f.writeln(8*' '+'},')
+ f.writeln(4*' '+'},')
+ f.writeln(4*' '+'.case_count = %d,' % len(suite.cases))
+ f.writeln('};')
+ f.writeln()
+
+ else:
+ # copy source
+ f.writeln('#line 1 "%s"' % args['source'])
+ with open(args['source']) as sf:
+ shutil.copyfileobj(sf, f)
+ f.writeln()
+
+ # write any internal tests
+ for suite in suites:
+ for case in suite.cases:
+ if (case.in_ is not None
+ and os.path.normpath(case.in_)
+ == os.path.normpath(args['source'])):
+ # write defines, but note we need to undef any
+ # new defines since we're in someone else's file
+ if suite.defines:
+ for i, define in enumerate(
+ sorted(suite.defines)):
+ f.writeln('#ifndef %s' % define)
+ f.writeln('#define %-24s '
+ 'TEST_IMPLICIT_DEFINE_COUNT+%d' % (
+ define+'_i', i))
+ f.writeln('#define %-24s '
+ 'TEST_DEFINE(%s)' % (
+ define, define+'_i'))
+ f.writeln('#define '
+ '__TEST__%s__NEEDS_UNDEF' % (
+ define))
+ f.writeln('#endif')
+ f.writeln()
+
+ write_case_functions(f, suite, case)
+
+ if suite.defines:
+ for define in sorted(suite.defines):
+ f.writeln('#ifdef __TEST__%s__NEEDS_UNDEF'
+ % define)
+ f.writeln('#undef __TEST__%s__NEEDS_UNDEF'
+ % define)
+ f.writeln('#undef %s' % define)
+ f.writeln('#undef %s' % (define+'_i'))
+ f.writeln('#endif')
+ f.writeln()
+
+def find_runner(runner, **args):
+ cmd = runner.copy()
+
+ # run under some external command?
+ if args.get('exec'):
+ cmd[:0] = args['exec']
+
+ # run under valgrind?
if args.get('valgrind'):
- classes.append(ValgrindTestCase)
- if not classes:
- classes = [TestCase]
-
- suites = []
- for testpath in args['test_paths']:
- # optionally specified test case/perm
- testpath, *filter = testpath.split('#')
- filter = [int(f) for f in filter]
-
- # figure out the suite's toml file
- if os.path.isdir(testpath):
- testpath = testpath + '/*.toml'
- elif os.path.isfile(testpath):
- testpath = testpath
- elif testpath.endswith('.toml'):
- testpath = TEST_PATHS + '/' + testpath
- else:
- testpath = TEST_PATHS + '/' + testpath + '.toml'
+ cmd[:0] = args['valgrind_path'] + [
+ '--leak-check=full',
+ '--track-origins=yes',
+ '--error-exitcode=4',
+ '-q']
+
+ # run under perf?
+ if args.get('perf'):
+ cmd[:0] = args['perf_script'] + list(filter(None, [
+ '-R',
+ '--perf-freq=%s' % args['perf_freq']
+ if args.get('perf_freq') else None,
+ '--perf-period=%s' % args['perf_period']
+ if args.get('perf_period') else None,
+ '--perf-events=%s' % args['perf_events']
+ if args.get('perf_events') else None,
+ '--perf-path=%s' % args['perf_path']
+ if args.get('perf_path') else None,
+ '-o%s' % args['perf']]))
+
+ # other context
+ if args.get('geometry'):
+ cmd.append('-G%s' % args['geometry'])
+ if args.get('powerloss'):
+ cmd.append('-P%s' % args['powerloss'])
+ if args.get('disk'):
+ cmd.append('-d%s' % args['disk'])
+ if args.get('trace'):
+ cmd.append('-t%s' % args['trace'])
+ if args.get('trace_backtrace'):
+ cmd.append('--trace-backtrace')
+ if args.get('trace_period'):
+ cmd.append('--trace-period=%s' % args['trace_period'])
+ if args.get('trace_freq'):
+ cmd.append('--trace-freq=%s' % args['trace_freq'])
+ if args.get('read_sleep'):
+ cmd.append('--read-sleep=%s' % args['read_sleep'])
+ if args.get('prog_sleep'):
+ cmd.append('--prog-sleep=%s' % args['prog_sleep'])
+ if args.get('erase_sleep'):
+ cmd.append('--erase-sleep=%s' % args['erase_sleep'])
+
+ # defines?
+ if args.get('define'):
+ for define in args.get('define'):
+ cmd.append('-D%s' % define)
+
+ return cmd
+
+def list_(runner, test_ids=[], **args):
+ cmd = find_runner(runner, **args) + test_ids
+ if args.get('summary'): cmd.append('--summary')
+ if args.get('list_suites'): cmd.append('--list-suites')
+ if args.get('list_cases'): cmd.append('--list-cases')
+ if args.get('list_suite_paths'): cmd.append('--list-suite-paths')
+ if args.get('list_case_paths'): cmd.append('--list-case-paths')
+ if args.get('list_defines'): cmd.append('--list-defines')
+ if args.get('list_permutation_defines'):
+ cmd.append('--list-permutation-defines')
+ if args.get('list_implicit_defines'):
+ cmd.append('--list-implicit-defines')
+ if args.get('list_geometries'): cmd.append('--list-geometries')
+ if args.get('list_powerlosses'): cmd.append('--list-powerlosses')
- # find tests
- for path in glob.glob(testpath):
- suites.append(TestSuite(path, classes, defines, filter, **args))
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ return sp.call(cmd)
- # sort for reproducibility
- suites = sorted(suites)
- # generate permutations
- for suite in suites:
- suite.permute(**args)
+def find_perms(runner_, ids=[], **args):
+ case_suites = {}
+ expected_case_perms = co.defaultdict(lambda: 0)
+ expected_perms = 0
+ total_perms = 0
- # build tests in parallel
- print('====== building ======')
- makefiles = []
- targets = []
- for suite in suites:
- makefile, target = suite.build(**args)
- makefiles.append(makefile)
- targets.append(target)
-
- cmd = (['make', '-f', 'Makefile'] +
- list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
- [target for target in targets])
- mpty, spty = pty.openpty()
+ # query cases from the runner
+ cmd = runner_ + ['--list-cases'] + ids
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
- proc = sp.Popen(cmd, stdout=spty, stderr=spty)
- os.close(spty)
- mpty = os.fdopen(mpty, 'r', 1)
- stdout = []
- while True:
- try:
- line = mpty.readline()
- except OSError as e:
- if e.errno == errno.EIO:
- break
- raise
- if not line:
- break;
- stdout.append(line)
- if args.get('verbose'):
- sys.stdout.write(line)
- # intercept warnings
- m = re.match(
- '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
- .format('(?:\033\[[\d;]*.| )*', 'warning'),
- line)
- if m and not args.get('verbose'):
- try:
- with open(m.group(1)) as f:
- lineno = int(m.group(2))
- line = next(it.islice(f, lineno-1, None)).strip('\n')
- sys.stdout.write(
- "\033[01m{path}:{lineno}:\033[01;35mwarning:\033[m "
- "{message}\n{line}\n\n".format(
- path=m.group(1), line=line, lineno=lineno,
- message=m.group(3)))
- except:
- pass
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ pattern = re.compile(
+ '^(?P[^\s]+)'
+ '\s+(?P[^\s]+)'
+ '\s+(?P\d+)/(?P\d+)')
+ # skip the first line
+ for line in it.islice(proc.stdout, 1, None):
+ m = pattern.match(line)
+ if m:
+ filtered = int(m.group('filtered'))
+ perms = int(m.group('perms'))
+ expected_case_perms[m.group('case')] += filtered
+ expected_perms += filtered
+ total_perms += perms
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ # get which suite each case belongs to via paths
+ cmd = runner_ + ['--list-case-paths'] + ids
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ pattern = re.compile(
+ '^(?P[^\s]+)'
+ '\s+(?P[^:]+):(?P\d+)')
+ # skip the first line
+ for line in it.islice(proc.stdout, 1, None):
+ m = pattern.match(line)
+ if m:
+ path = m.group('path')
+ # strip path/suffix here
+ suite = os.path.basename(path)
+ if suite.endswith('.toml'):
+ suite = suite[:-len('.toml')]
+ case_suites[m.group('case')] = suite
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
- for line in stdout:
+ for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
- print('built %d test suites, %d test cases, %d permutations' % (
- len(suites),
- sum(len(suite.cases) for suite in suites),
- sum(len(suite.perms) for suite in suites)))
+ # figure out expected suite perms
+ expected_suite_perms = co.defaultdict(lambda: 0)
+ for case, suite in case_suites.items():
+ expected_suite_perms[suite] += expected_case_perms[case]
+
+ return (
+ case_suites,
+ expected_suite_perms,
+ expected_case_perms,
+ expected_perms,
+ total_perms)
+
+def find_path(runner_, id, **args):
+ path = None
+ # query from runner
+ cmd = runner_ + ['--list-case-paths', id]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ pattern = re.compile(
+ '^(?P[^\s]+)'
+ '\s+(?P[^:]+):(?P\d+)')
+ # skip the first line
+ for line in it.islice(proc.stdout, 1, None):
+ m = pattern.match(line)
+ if m and path is None:
+ path_ = m.group('path')
+ lineno = int(m.group('lineno'))
+ path = (path_, lineno)
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
- total = 0
- for suite in suites:
- for perm in suite.perms:
- total += perm.shouldtest(**args)
- if total != sum(len(suite.perms) for suite in suites):
- print('filtered down to %d permutations' % total)
+ return path
- # only requested to build?
- if args.get('build'):
- return 0
+def find_defines(runner_, id, **args):
+ # query permutation defines from runner
+ cmd = runner_ + ['--list-permutation-defines', id]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace',
+ close_fds=False)
+ defines = co.OrderedDict()
+ pattern = re.compile('^(?P\w+)=(?P.+)')
+ for line in proc.stdout:
+ m = pattern.match(line)
+ if m:
+ define = m.group('define')
+ value = m.group('value')
+ defines[define] = value
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
- print('====== testing ======')
- try:
- for suite in suites:
- suite.test(**args)
- except TestFailure:
- pass
+ return defines
- print('====== results ======')
- passed = 0
- failed = 0
- for suite in suites:
- for perm in suite.perms:
- if perm.result == PASS:
- passed += 1
- elif isinstance(perm.result, TestFailure):
- sys.stdout.write(
- "\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
- "{perm} failed\n".format(
- perm=perm, path=perm.suite.path, lineno=perm.lineno,
- returncode=perm.result.returncode or 0))
- if perm.result.stdout:
- if perm.result.assert_:
- stdout = perm.result.stdout[:-1]
- else:
- stdout = perm.result.stdout
- for line in stdout[-5:]:
- sys.stdout.write(line)
- if perm.result.assert_:
- sys.stdout.write(
- "\033[01m{path}:{lineno}:\033[01;31massert:\033[m "
- "{message}\n{line}\n".format(
- **perm.result.assert_))
- sys.stdout.write('\n')
- failed += 1
-
- if args.get('coverage'):
- # collect coverage info
- # why -j1? lcov doesn't work in parallel because of gcov limitations
- cmd = (['make', '-j1', '-f', 'Makefile'] +
- list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
- (['COVERAGETARGET=%s' % args['coverage']]
- if isinstance(args['coverage'], str) else []) +
- [suite.path + '.info' for suite in suites
- if any(perm.result == PASS for perm in suite.perms)])
+
+# Thread-safe CSV writer
+class TestOutput:
+ def __init__(self, path, head=None, tail=None):
+ self.f = openio(path, 'w+', 1)
+ self.lock = th.Lock()
+ self.head = head or []
+ self.tail = tail or []
+ self.writer = csv.DictWriter(self.f, self.head + self.tail)
+ self.rows = []
+
+ def close(self):
+ self.f.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *_):
+ self.f.close()
+
+ def writerow(self, row):
+ with self.lock:
+ self.rows.append(row)
+ if all(k in self.head or k in self.tail for k in row.keys()):
+ # can simply append
+ self.writer.writerow(row)
+ else:
+ # need to rewrite the file
+ self.head.extend(row.keys() - (self.head + self.tail))
+ self.f.seek(0)
+ self.f.truncate()
+ self.writer = csv.DictWriter(self.f, self.head + self.tail)
+ self.writer.writeheader()
+ for row in self.rows:
+ self.writer.writerow(row)
+
+# A test failure
+class TestFailure(Exception):
+ def __init__(self, id, returncode, stdout, assert_=None):
+ self.id = id
+ self.returncode = returncode
+ self.stdout = stdout
+ self.assert_ = assert_
+
+def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
+ # get expected suite/case/perm counts
+ (case_suites,
+ expected_suite_perms,
+ expected_case_perms,
+ expected_perms,
+ total_perms) = find_perms(runner_, ids, **args)
+
+ passed_suite_perms = co.defaultdict(lambda: 0)
+ passed_case_perms = co.defaultdict(lambda: 0)
+ passed_perms = 0
+ powerlosses = 0
+ failures = []
+ killed = False
+
+ pattern = re.compile('^(?:'
+ '(?Prunning|finished|skipped|powerloss) '
+ '(?P(?P[^:]+)[^\s]*)'
+ '|' '(?P[^:]+):(?P\d+):(?Passert):'
+ ' *(?P.*)'
+ ')$')
+ locals = th.local()
+ children = set()
+
+ def run_runner(runner_, ids=[]):
+ nonlocal passed_suite_perms
+ nonlocal passed_case_perms
+ nonlocal passed_perms
+ nonlocal powerlosses
+ nonlocal locals
+
+ # run the tests!
+ cmd = runner_ + ids
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
- proc = sp.Popen(cmd,
- stdout=sp.PIPE if not args.get('verbose') else None,
- stderr=sp.STDOUT if not args.get('verbose') else None,
- universal_newlines=True)
- stdout = []
- for line in proc.stdout:
- stdout.append(line)
+
+ mpty, spty = pty.openpty()
+ proc = sp.Popen(cmd, stdout=spty, stderr=spty, close_fds=False)
+ os.close(spty)
+ children.add(proc)
+ mpty = os.fdopen(mpty, 'r', 1)
+
+ last_id = None
+ last_stdout = co.deque(maxlen=args.get('context', 5) + 1)
+ last_assert = None
+ try:
+ while True:
+ # parse a line for state changes
+ try:
+ line = mpty.readline()
+ except OSError as e:
+ if e.errno != errno.EIO:
+ raise
+ break
+ if not line:
+ break
+ last_stdout.append(line)
+ if stdout_:
+ try:
+ stdout_.write(line)
+ stdout_.flush()
+ except BrokenPipeError:
+ pass
+
+ m = pattern.match(line)
+ if m:
+ op = m.group('op') or m.group('op_')
+ if op == 'running':
+ locals.seen_perms += 1
+ last_id = m.group('id')
+ last_stdout.clear()
+ last_assert = None
+ elif op == 'powerloss':
+ last_id = m.group('id')
+ powerlosses += 1
+ elif op == 'finished':
+ case = m.group('case')
+ suite = case_suites[case]
+ passed_suite_perms[suite] += 1
+ passed_case_perms[case] += 1
+ passed_perms += 1
+ if output_:
+ # get defines and write to csv
+ defines = find_defines(
+ runner_, m.group('id'), **args)
+ output_.writerow({
+ 'suite': suite,
+ 'case': case,
+ 'test_passed': '1/1',
+ **defines})
+ elif op == 'skipped':
+ locals.seen_perms += 1
+ elif op == 'assert':
+ last_assert = (
+ m.group('path'),
+ int(m.group('lineno')),
+ m.group('message'))
+ # go ahead and kill the process, aborting takes a while
+ if args.get('keep_going'):
+ proc.kill()
+ except KeyboardInterrupt:
+ raise TestFailure(last_id, 1, list(last_stdout))
+ finally:
+ children.remove(proc)
+ mpty.close()
+
proc.wait()
if proc.returncode != 0:
- if not args.get('verbose'):
- for line in stdout:
- sys.stdout.write(line)
- sys.exit(-1)
+ raise TestFailure(
+ last_id,
+ proc.returncode,
+ list(last_stdout),
+ last_assert)
+
+ def run_job(runner_, ids=[], start=None, step=None):
+ nonlocal failures
+ nonlocal killed
+ nonlocal locals
+
+ start = start or 0
+ step = step or 1
+ while start < total_perms:
+ job_runner = runner_.copy()
+ if args.get('isolate') or args.get('valgrind'):
+ job_runner.append('-s%s,%s,%s' % (start, start+step, step))
+ else:
+ job_runner.append('-s%s,,%s' % (start, step))
+
+ try:
+ # run the tests
+ locals.seen_perms = 0
+ run_runner(job_runner, ids)
+ assert locals.seen_perms > 0
+ start += locals.seen_perms*step
+
+ except TestFailure as failure:
+ # keep track of failures
+ if output_:
+ case, _ = failure.id.split(':', 1)
+ suite = case_suites[case]
+ # get defines and write to csv
+ defines = find_defines(runner_, failure.id, **args)
+ output_.writerow({
+ 'suite': suite,
+ 'case': case,
+ 'test_passed': '0/1',
+ **defines})
+
+ # race condition for multiple failures?
+ if failures and not args.get('keep_going'):
+ break
+
+ failures.append(failure)
+
+ if args.get('keep_going') and not killed:
+ # resume after failed test
+ assert locals.seen_perms > 0
+ start += locals.seen_perms*step
+ continue
+ else:
+ # stop other tests
+ killed = True
+ for child in children.copy():
+ child.kill()
+ break
+
+
+ # parallel jobs?
+ runners = []
+ if 'jobs' in args:
+ for job in range(args['jobs']):
+ runners.append(th.Thread(
+ target=run_job, args=(runner_, ids, job, args['jobs']),
+ daemon=True))
+ else:
+ runners.append(th.Thread(
+ target=run_job, args=(runner_, ids, None, None),
+ daemon=True))
+
+ def print_update(done):
+ if not args.get('verbose') and (args['color'] or done):
+ sys.stdout.write('%s%srunning %s%s:%s %s%s' % (
+ '\r\x1b[K' if args['color'] else '',
+ '\x1b[?7l' if not done else '',
+ ('\x1b[32m' if not failures else '\x1b[31m')
+ if args['color'] else '',
+ name,
+ '\x1b[m' if args['color'] else '',
+ ', '.join(filter(None, [
+ '%d/%d suites' % (
+ sum(passed_suite_perms[k] == v
+ for k, v in expected_suite_perms.items()),
+ len(expected_suite_perms))
+ if (not args.get('by_suites')
+ and not args.get('by_cases')) else None,
+ '%d/%d cases' % (
+ sum(passed_case_perms[k] == v
+ for k, v in expected_case_perms.items()),
+ len(expected_case_perms))
+ if not args.get('by_cases') else None,
+ '%d/%d perms' % (passed_perms, expected_perms),
+ '%dpls!' % powerlosses
+ if powerlosses else None,
+ '%s%d/%d failures%s' % (
+ '\x1b[31m' if args['color'] else '',
+ len(failures),
+ expected_perms,
+ '\x1b[m' if args['color'] else '')
+ if failures else None])),
+ '\x1b[?7h' if not done else '\n'))
+ sys.stdout.flush()
+
+ for r in runners:
+ r.start()
+
+ try:
+ while any(r.is_alive() for r in runners):
+ time.sleep(0.01)
+ print_update(False)
+ except KeyboardInterrupt:
+ # this is handled by the runner threads, we just
+ # need to not abort here
+ killed = True
+ finally:
+ print_update(True)
+
+ for r in runners:
+ r.join()
+
+ return (
+ expected_perms,
+ passed_perms,
+ powerlosses,
+ failures,
+ killed)
+
+
+def run(runner, test_ids=[], **args):
+ # query runner for tests
+ runner_ = find_runner(runner, **args)
+ print('using runner: %s' % ' '.join(shlex.quote(c) for c in runner_))
+ (_,
+ expected_suite_perms,
+ expected_case_perms,
+ expected_perms,
+ total_perms) = find_perms(runner_, test_ids, **args)
+ print('found %d suites, %d cases, %d/%d permutations' % (
+ len(expected_suite_perms),
+ len(expected_case_perms),
+ expected_perms,
+ total_perms))
+ print()
+
+ # automatic job detection?
+ if args.get('jobs') == 0:
+ args['jobs'] = len(os.sched_getaffinity(0))
+
+ # truncate and open logs here so they aren't disconnected between tests
+ stdout = None
+ if args.get('stdout'):
+ stdout = openio(args['stdout'], 'w', 1)
+ trace = None
+ if args.get('trace'):
+ trace = openio(args['trace'], 'w', 1)
+ output = None
+ if args.get('output'):
+ output = TestOutput(args['output'],
+ ['suite', 'case'],
+ ['test_passed'])
+
+ # measure runtime
+ start = time.time()
+
+ # spawn runners
+ expected = 0
+ passed = 0
+ powerlosses = 0
+ failures = []
+ for by in (test_ids if test_ids
+ else expected_case_perms.keys() if args.get('by_cases')
+ else expected_suite_perms.keys() if args.get('by_suites')
+ else [None]):
+ # spawn jobs for stage
+ (expected_,
+ passed_,
+ powerlosses_,
+ failures_,
+ killed) = run_stage(
+ by or 'tests',
+ runner_,
+ [by] if by is not None else [],
+ stdout,
+ trace,
+ output,
+ **args)
+ # collect passes/failures
+ expected += expected_
+ passed += passed_
+ powerlosses += powerlosses_
+ failures.extend(failures_)
+ if (failures and not args.get('keep_going')) or killed:
+ break
+
+ stop = time.time()
+
+ if stdout:
+ try:
+ stdout.close()
+ except BrokenPipeError:
+ pass
+ if trace:
+ try:
+ trace.close()
+ except BrokenPipeError:
+ pass
+ if output:
+ output.close()
+
+ # show summary
+ print()
+ print('%sdone:%s %s' % (
+ ('\x1b[32m' if not failures else '\x1b[31m')
+ if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ ', '.join(filter(None, [
+ '%d/%d passed' % (passed, expected),
+ '%d/%d failed' % (len(failures), expected),
+ '%dpls!' % powerlosses if powerlosses else None,
+ 'in %.2fs' % (stop-start)]))))
+ print()
+
+ # print each failure
+ for failure in failures:
+ assert failure.id is not None, '%s broken? %r' % (
+ ' '.join(shlex.quote(c) for c in runner_),
+ failure)
+
+ # get some extra info from runner
+ path, lineno = find_path(runner_, failure.id, **args)
+ defines = find_defines(runner_, failure.id, **args)
+
+ # show summary of failure
+ print('%s%s:%d:%sfailure:%s %s%s failed' % (
+ '\x1b[01m' if args['color'] else '',
+ path, lineno,
+ '\x1b[01;31m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ failure.id,
+ ' (%s)' % ', '.join('%s=%s' % (k,v) for k,v in defines.items())
+ if defines else ''))
+
+ if failure.stdout:
+ stdout = failure.stdout
+ if failure.assert_ is not None:
+ stdout = stdout[:-1]
+ for line in stdout[-args.get('context', 5):]:
+ sys.stdout.write(line)
+
+ if failure.assert_ is not None:
+ path, lineno, message = failure.assert_
+ print('%s%s:%d:%sassert:%s %s' % (
+ '\x1b[01m' if args['color'] else '',
+ path, lineno,
+ '\x1b[01;31m' if args['color'] else '',
+ '\x1b[m' if args['color'] else '',
+ message))
+ with open(path) as f:
+ line = next(it.islice(f, lineno-1, None)).strip('\n')
+ print(line)
+ print()
+
+ # drop into gdb?
+ if failures and (args.get('gdb')
+ or args.get('gdb_case')
+ or args.get('gdb_main')
+ or args.get('gdb_pl') is not None
+ or args.get('gdb_pl_before')
+ or args.get('gdb_pl_after')):
+ failure = failures[0]
+ cmd = runner_ + [failure.id]
+
+ if args.get('gdb_main'):
+ # we don't really need the case breakpoint here, but it
+ # can be helpful
+ path, lineno = find_path(runner_, failure.id, **args)
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'break main',
+ '-ex', 'break %s:%d' % (path, lineno),
+ '-ex', 'run',
+ '--args']
+ elif args.get('gdb_case'):
+ path, lineno = find_path(runner_, failure.id, **args)
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'break %s:%d' % (path, lineno),
+ '-ex', 'run',
+ '--args']
+ elif args.get('gdb_pl') is not None:
+ path, lineno = find_path(runner_, failure.id, **args)
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'break %s:%d' % (path, lineno),
+ '-ex', 'ignore 1 %d' % args['gdb_pl'],
+ '-ex', 'run',
+ '--args']
+ elif args.get('gdb_pl_before'):
+ # figure out how many powerlosses there were
+ powerlosses = (
+ sum(1 for _ in re.finditer('[0-9a-f]',
+ failure.id.split(':', 2)[-1]))
+ if failure.id.count(':') >= 2 else 0)
+ path, lineno = find_path(runner_, failure.id, **args)
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'break %s:%d' % (path, lineno),
+ '-ex', 'ignore 1 %d' % max(powerlosses-1, 0),
+ '-ex', 'run',
+ '--args']
+ elif args.get('gdb_pl_after'):
+ # figure out how many powerlosses there were
+ powerlosses = (
+ sum(1 for _ in re.finditer('[0-9a-f]',
+ failure.id.split(':', 2)[-1]))
+ if failure.id.count(':') >= 2 else 0)
+ path, lineno = find_path(runner_, failure.id, **args)
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'break %s:%d' % (path, lineno),
+ '-ex', 'ignore 1 %d' % powerlosses,
+ '-ex', 'run',
+ '--args']
+ elif failure.assert_ is not None:
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'run',
+ '-ex', 'frame function raise',
+ '-ex', 'up 2',
+ '--args']
+ else:
+ cmd[:0] = args['gdb_path'] + [
+ '-ex', 'run',
+ '--args']
+
+ # exec gdb interactively
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ os.execvp(cmd[0], cmd)
+
+ return 1 if failures else 0
+
+
+def main(**args):
+ # figure out what color should be
+ if args.get('color') == 'auto':
+ args['color'] = sys.stdout.isatty()
+ elif args.get('color') == 'always':
+ args['color'] = True
+ else:
+ args['color'] = False
+
+ if args.get('compile'):
+ return compile(**args)
+ elif (args.get('summary')
+ or args.get('list_suites')
+ or args.get('list_cases')
+ or args.get('list_suite_paths')
+ or args.get('list_case_paths')
+ or args.get('list_defines')
+ or args.get('list_permutation_defines')
+ or args.get('list_implicit_defines')
+ or args.get('list_geometries')
+ or args.get('list_powerlosses')):
+ return list_(**args)
+ else:
+ return run(**args)
- if args.get('gdb'):
- failure = None
- for suite in suites:
- for perm in suite.perms:
- if isinstance(perm.result, TestFailure):
- failure = perm.result
- if failure is not None:
- print('======= gdb ======')
- # drop into gdb
- failure.case.test(failure=failure, **args)
- sys.exit(0)
-
- print('tests passed %d/%d (%.1f%%)' % (passed, total,
- 100*(passed/total if total else 1.0)))
- print('tests failed %d/%d (%.1f%%)' % (failed, total,
- 100*(failed/total if total else 1.0)))
- return 1 if failed > 0 else 0
if __name__ == "__main__":
import argparse
+ import sys
+ argparse.ArgumentParser._handle_conflict_ignore = lambda *_: None
+ argparse._ArgumentGroup._handle_conflict_ignore = lambda *_: None
parser = argparse.ArgumentParser(
- description="Run parameterized tests in various configurations.")
- parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS],
- help="Description of test(s) to run. By default, this is all tests \
- found in the \"{0}\" directory. Here, you can specify a different \
- directory of tests, a specific file, a suite by name, and even \
- specific test cases and permutations. For example \
- \"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS))
- parser.add_argument('-D', action='append', default=[],
- help="Overriding parameter definitions.")
- parser.add_argument('-v', '--verbose', action='store_true',
- help="Output everything that is happening.")
- parser.add_argument('-k', '--keep-going', action='store_true',
- help="Run all tests instead of stopping on first error. Useful for CI.")
- parser.add_argument('-p', '--persist', choices=['erase', 'noerase'],
- nargs='?', const='erase',
- help="Store disk image in a file.")
- parser.add_argument('-b', '--build', action='store_true',
- help="Only build the tests, do not execute.")
- parser.add_argument('-g', '--gdb', choices=['init', 'main', 'assert'],
- nargs='?', const='assert',
+ description="Build and run tests.",
+ allow_abbrev=False,
+ conflict_handler='ignore')
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument(
+ '--color',
+ choices=['never', 'always', 'auto'],
+ default='auto',
+ help="When to use terminal colors. Defaults to 'auto'.")
+
+ # test flags
+ test_parser = parser.add_argument_group('test options')
+ test_parser.add_argument(
+ 'runner',
+ nargs='?',
+ type=lambda x: x.split(),
+ help="Test runner to use for testing. Defaults to %r." % RUNNER_PATH)
+ test_parser.add_argument(
+ 'test_ids',
+ nargs='*',
+ help="Description of tests to run.")
+ test_parser.add_argument(
+ '-Y', '--summary',
+ action='store_true',
+ help="Show quick summary.")
+ test_parser.add_argument(
+ '-l', '--list-suites',
+ action='store_true',
+ help="List test suites.")
+ test_parser.add_argument(
+ '-L', '--list-cases',
+ action='store_true',
+ help="List test cases.")
+ test_parser.add_argument(
+ '--list-suite-paths',
+ action='store_true',
+ help="List the path for each test suite.")
+ test_parser.add_argument(
+ '--list-case-paths',
+ action='store_true',
+ help="List the path and line number for each test case.")
+ test_parser.add_argument(
+ '--list-defines',
+ action='store_true',
+ help="List all defines in this test-runner.")
+ test_parser.add_argument(
+ '--list-permutation-defines',
+ action='store_true',
+ help="List explicit defines in this test-runner.")
+ test_parser.add_argument(
+ '--list-implicit-defines',
+ action='store_true',
+ help="List implicit defines in this test-runner.")
+ test_parser.add_argument(
+ '--list-geometries',
+ action='store_true',
+ help="List the available disk geometries.")
+ test_parser.add_argument(
+ '--list-powerlosses',
+ action='store_true',
+ help="List the available power-loss scenarios.")
+ test_parser.add_argument(
+ '-D', '--define',
+ action='append',
+ help="Override a test define.")
+ test_parser.add_argument(
+ '-G', '--geometry',
+ help="Comma-separated list of disk geometries to test.")
+ test_parser.add_argument(
+ '-P', '--powerloss',
+ help="Comma-separated list of power-loss scenarios to test.")
+ test_parser.add_argument(
+ '-d', '--disk',
+ help="Direct block device operations to this file.")
+ test_parser.add_argument(
+ '-t', '--trace',
+ help="Direct trace output to this file.")
+ test_parser.add_argument(
+ '--trace-backtrace',
+ action='store_true',
+ help="Include a backtrace with every trace statement.")
+ test_parser.add_argument(
+ '--trace-period',
+ help="Sample trace output at this period in cycles.")
+ test_parser.add_argument(
+ '--trace-freq',
+ help="Sample trace output at this frequency in hz.")
+ test_parser.add_argument(
+ '-O', '--stdout',
+ help="Direct stdout to this file. Note stderr is already merged here.")
+ test_parser.add_argument(
+ '-o', '--output',
+ help="CSV file to store results.")
+ test_parser.add_argument(
+ '--read-sleep',
+ help="Artificial read delay in seconds.")
+ test_parser.add_argument(
+ '--prog-sleep',
+ help="Artificial prog delay in seconds.")
+ test_parser.add_argument(
+ '--erase-sleep',
+ help="Artificial erase delay in seconds.")
+ test_parser.add_argument(
+ '-j', '--jobs',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Number of parallel runners to run. 0 runs one runner per core.")
+ test_parser.add_argument(
+ '-k', '--keep-going',
+ action='store_true',
+ help="Don't stop on first error.")
+ test_parser.add_argument(
+ '-i', '--isolate',
+ action='store_true',
+ help="Run each test permutation in a separate process.")
+ test_parser.add_argument(
+ '-b', '--by-suites',
+ action='store_true',
+ help="Step through tests by suite.")
+ test_parser.add_argument(
+ '-B', '--by-cases',
+ action='store_true',
+ help="Step through tests by case.")
+ test_parser.add_argument(
+ '--context',
+ type=lambda x: int(x, 0),
+ default=5,
+ help="Show this many lines of stdout on test failure. "
+ "Defaults to 5.")
+ test_parser.add_argument(
+ '--gdb',
+ action='store_true',
help="Drop into gdb on test failure.")
- parser.add_argument('--no-internal', action='store_true',
- help="Don't run tests that require internal knowledge.")
- parser.add_argument('-n', '--normal', action='store_true',
- help="Run tests normally.")
- parser.add_argument('-r', '--reentrant', action='store_true',
- help="Run reentrant tests with simulated power-loss.")
- parser.add_argument('--valgrind', action='store_true',
- help="Run non-leaky tests under valgrind to check for memory leaks.")
- parser.add_argument('--exec', default=[], type=lambda e: e.split(),
- help="Run tests with another executable prefixed on the command line.")
- parser.add_argument('--disk',
- help="Specify a file to use for persistent/reentrant tests.")
- parser.add_argument('--coverage', type=lambda x: x if x else True,
- nargs='?', const='',
- help="Collect coverage information during testing. This uses lcov/gcov \
- to accumulate coverage information into *.info files. May also \
- a path to a *.info file to accumulate coverage info into.")
- parser.add_argument('--build-dir',
- help="Build relative to the specified directory instead of the \
- current directory.")
-
- sys.exit(main(**vars(parser.parse_args())))
+ test_parser.add_argument(
+ '--gdb-case',
+ action='store_true',
+ help="Drop into gdb on test failure but stop at the beginning "
+ "of the failing test case.")
+ test_parser.add_argument(
+ '--gdb-main',
+ action='store_true',
+ help="Drop into gdb on test failure but stop at the beginning "
+ "of main.")
+ test_parser.add_argument(
+ '--gdb-pl',
+ type=lambda x: int(x, 0),
+ help="Drop into gdb on this specific powerloss.")
+ test_parser.add_argument(
+ '--gdb-pl-before',
+ action='store_true',
+ help="Drop into gdb before the powerloss that caused the failure.")
+ test_parser.add_argument(
+ '--gdb-pl-after',
+ action='store_true',
+ help="Drop into gdb after the powerloss that caused the failure.")
+ test_parser.add_argument(
+ '--gdb-path',
+ type=lambda x: x.split(),
+ default=GDB_PATH,
+ help="Path to the gdb executable, may include flags. "
+ "Defaults to %r." % GDB_PATH)
+ test_parser.add_argument(
+ '--exec',
+ type=lambda e: e.split(),
+ help="Run under another executable.")
+ test_parser.add_argument(
+ '--valgrind',
+ action='store_true',
+ help="Run under Valgrind to find memory errors. Implicitly sets "
+ "--isolate.")
+ test_parser.add_argument(
+ '--valgrind-path',
+ type=lambda x: x.split(),
+ default=VALGRIND_PATH,
+ help="Path to the Valgrind executable, may include flags. "
+ "Defaults to %r." % VALGRIND_PATH)
+ test_parser.add_argument(
+ '-p', '--perf',
+ help="Run under Linux's perf to sample performance counters, writing "
+ "samples to this file.")
+ test_parser.add_argument(
+ '--perf-freq',
+ help="perf sampling frequency. This is passed directly to the perf "
+ "script.")
+ test_parser.add_argument(
+ '--perf-period',
+ help="perf sampling period. This is passed directly to the perf "
+ "script.")
+ test_parser.add_argument(
+ '--perf-events',
+ help="perf events to record. This is passed directly to the perf "
+ "script.")
+ test_parser.add_argument(
+ '--perf-script',
+ type=lambda x: x.split(),
+ default=PERF_SCRIPT,
+ help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT)
+ test_parser.add_argument(
+ '--perf-path',
+ type=lambda x: x.split(),
+ help="Path to the perf executable, may include flags. This is passed "
+ "directly to the perf script")
+
+ # compilation flags
+ comp_parser = parser.add_argument_group('compilation options')
+ comp_parser.add_argument(
+ 'test_paths',
+ nargs='*',
+ help="Description of *.toml files to compile. May be a directory "
+ "or a list of paths.")
+ comp_parser.add_argument(
+ '-c', '--compile',
+ action='store_true',
+ help="Compile a test suite or source file.")
+ comp_parser.add_argument(
+ '-s', '--source',
+ help="Source file to compile, possibly injecting internal tests.")
+ comp_parser.add_argument(
+ '--include',
+ default=HEADER_PATH,
+ help="Inject this header file into every compiled test file. "
+ "Defaults to %r." % HEADER_PATH)
+ comp_parser.add_argument(
+ '-o', '--output',
+ help="Output file.")
+
+ # runner/test_paths overlap, so need to do some munging here
+ args = parser.parse_intermixed_args()
+ args.test_paths = [' '.join(args.runner or [])] + args.test_ids
+ args.runner = args.runner or [RUNNER_PATH]
+
+ sys.exit(main(**{k: v
+ for k, v in vars(args).items()
+ if v is not None}))
diff --git a/scripts/tracebd.py b/scripts/tracebd.py
new file mode 100755
index 00000000..ecf49a7c
--- /dev/null
+++ b/scripts/tracebd.py
@@ -0,0 +1,1002 @@
+#!/usr/bin/env python3
+#
+# Display operations on block devices based on trace output
+#
+# Example:
+# ./scripts/tracebd.py trace
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import collections as co
+import functools as ft
+import io
+import itertools as it
+import math as m
+import os
+import re
+import shutil
+import threading as th
+import time
+
+
+CHARS = 'rpe.'
+COLORS = ['42', '45', '44', '']
+
+WEAR_CHARS = '0123456789'
+WEAR_CHARS_SUBSCRIPTS = '.₁₂₃₄₅₆789'
+WEAR_COLORS = ['', '', '', '', '', '', '', '35', '35', '1;31']
+
+CHARS_DOTS = " .':"
+COLORS_DOTS = ['32', '35', '34', '']
+CHARS_BRAILLE = (
+ '⠀⢀⡀⣀⠠⢠⡠⣠⠄⢄⡄⣄⠤⢤⡤⣤' '⠐⢐⡐⣐⠰⢰⡰⣰⠔⢔⡔⣔⠴⢴⡴⣴'
+ '⠂⢂⡂⣂⠢⢢⡢⣢⠆⢆⡆⣆⠦⢦⡦⣦' '⠒⢒⡒⣒⠲⢲⡲⣲⠖⢖⡖⣖⠶⢶⡶⣶'
+ '⠈⢈⡈⣈⠨⢨⡨⣨⠌⢌⡌⣌⠬⢬⡬⣬' '⠘⢘⡘⣘⠸⢸⡸⣸⠜⢜⡜⣜⠼⢼⡼⣼'
+ '⠊⢊⡊⣊⠪⢪⡪⣪⠎⢎⡎⣎⠮⢮⡮⣮' '⠚⢚⡚⣚⠺⢺⡺⣺⠞⢞⡞⣞⠾⢾⡾⣾'
+ '⠁⢁⡁⣁⠡⢡⡡⣡⠅⢅⡅⣅⠥⢥⡥⣥' '⠑⢑⡑⣑⠱⢱⡱⣱⠕⢕⡕⣕⠵⢵⡵⣵'
+ '⠃⢃⡃⣃⠣⢣⡣⣣⠇⢇⡇⣇⠧⢧⡧⣧' '⠓⢓⡓⣓⠳⢳⡳⣳⠗⢗⡗⣗⠷⢷⡷⣷'
+ '⠉⢉⡉⣉⠩⢩⡩⣩⠍⢍⡍⣍⠭⢭⡭⣭' '⠙⢙⡙⣙⠹⢹⡹⣹⠝⢝⡝⣝⠽⢽⡽⣽'
+ '⠋⢋⡋⣋⠫⢫⡫⣫⠏⢏⡏⣏⠯⢯⡯⣯' '⠛⢛⡛⣛⠻⢻⡻⣻⠟⢟⡟⣟⠿⢿⡿⣿')
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+class LinesIO:
+ def __init__(self, maxlen=None):
+ self.maxlen = maxlen
+ self.lines = co.deque(maxlen=maxlen)
+ self.tail = io.StringIO()
+
+ # trigger automatic sizing
+ if maxlen == 0:
+ self.resize(0)
+
+ def write(self, s):
+ # note using split here ensures the trailing string has no newline
+ lines = s.split('\n')
+
+ if len(lines) > 1 and self.tail.getvalue():
+ self.tail.write(lines[0])
+ lines[0] = self.tail.getvalue()
+ self.tail = io.StringIO()
+
+ self.lines.extend(lines[:-1])
+
+ if lines[-1]:
+ self.tail.write(lines[-1])
+
+ def resize(self, maxlen):
+ self.maxlen = maxlen
+ if maxlen == 0:
+ maxlen = shutil.get_terminal_size((80, 5))[1]
+ if maxlen != self.lines.maxlen:
+ self.lines = co.deque(self.lines, maxlen=maxlen)
+
+ canvas_lines = 1
+ def draw(self):
+ # did terminal size change?
+ if self.maxlen == 0:
+ self.resize(0)
+
+ # first thing first, give ourself a canvas
+ while LinesIO.canvas_lines < len(self.lines):
+ sys.stdout.write('\n')
+ LinesIO.canvas_lines += 1
+
+ # clear the bottom of the canvas if we shrink
+ shrink = LinesIO.canvas_lines - len(self.lines)
+ if shrink > 0:
+ for i in range(shrink):
+ sys.stdout.write('\r')
+ if shrink-1-i > 0:
+ sys.stdout.write('\x1b[%dA' % (shrink-1-i))
+ sys.stdout.write('\x1b[K')
+ if shrink-1-i > 0:
+ sys.stdout.write('\x1b[%dB' % (shrink-1-i))
+ sys.stdout.write('\x1b[%dA' % shrink)
+ LinesIO.canvas_lines = len(self.lines)
+
+ for i, line in enumerate(self.lines):
+ # move cursor, clear line, disable/reenable line wrapping
+ sys.stdout.write('\r')
+ if len(self.lines)-1-i > 0:
+ sys.stdout.write('\x1b[%dA' % (len(self.lines)-1-i))
+ sys.stdout.write('\x1b[K')
+ sys.stdout.write('\x1b[?7l')
+ sys.stdout.write(line)
+ sys.stdout.write('\x1b[?7h')
+ if len(self.lines)-1-i > 0:
+ sys.stdout.write('\x1b[%dB' % (len(self.lines)-1-i))
+ sys.stdout.flush()
+
+
+# space filling Hilbert-curve
+#
+# note we memoize the last curve since this is a bit expensive
+#
+@ft.lru_cache(1)
+def hilbert_curve(width, height):
+ # based on generalized Hilbert curves:
+ # https://github.com/jakubcerveny/gilbert
+ #
+ def hilbert_(x, y, a_x, a_y, b_x, b_y):
+ w = abs(a_x+a_y)
+ h = abs(b_x+b_y)
+ a_dx = -1 if a_x < 0 else +1 if a_x > 0 else 0
+ a_dy = -1 if a_y < 0 else +1 if a_y > 0 else 0
+ b_dx = -1 if b_x < 0 else +1 if b_x > 0 else 0
+ b_dy = -1 if b_y < 0 else +1 if b_y > 0 else 0
+
+ # trivial row
+ if h == 1:
+ for _ in range(w):
+ yield (x,y)
+ x, y = x+a_dx, y+a_dy
+ return
+
+ # trivial column
+ if w == 1:
+ for _ in range(h):
+ yield (x,y)
+ x, y = x+b_dx, y+b_dy
+ return
+
+ a_x_, a_y_ = a_x//2, a_y//2
+ b_x_, b_y_ = b_x//2, b_y//2
+ w_ = abs(a_x_+a_y_)
+ h_ = abs(b_x_+b_y_)
+
+ if 2*w > 3*h:
+ # prefer even steps
+ if w_ % 2 != 0 and w > 2:
+ a_x_, a_y_ = a_x_+a_dx, a_y_+a_dy
+
+ # split in two
+ yield from hilbert_(x, y, a_x_, a_y_, b_x, b_y)
+ yield from hilbert_(x+a_x_, y+a_y_, a_x-a_x_, a_y-a_y_, b_x, b_y)
+ else:
+ # prefer even steps
+ if h_ % 2 != 0 and h > 2:
+ b_x_, b_y_ = b_x_+b_dx, b_y_+b_dy
+
+ # split in three
+ yield from hilbert_(x, y, b_x_, b_y_, a_x_, a_y_)
+ yield from hilbert_(x+b_x_, y+b_y_, a_x, a_y, b_x-b_x_, b_y-b_y_)
+ yield from hilbert_(
+ x+(a_x-a_dx)+(b_x_-b_dx), y+(a_y-a_dy)+(b_y_-b_dy),
+ -b_x_, -b_y_, -(a_x-a_x_), -(a_y-a_y_))
+
+ if width >= height:
+ curve = hilbert_(0, 0, +width, 0, 0, +height)
+ else:
+ curve = hilbert_(0, 0, 0, +height, +width, 0)
+
+ return list(curve)
+
+# space filling Z-curve/Lebesgue-curve
+#
+# note we memoize the last curve since this is a bit expensive
+#
+@ft.lru_cache(1)
+def lebesgue_curve(width, height):
+ # we create a truncated Z-curve by simply filtering out the points
+ # that are outside our region
+ curve = []
+ for i in range(2**(2*m.ceil(m.log2(max(width, height))))):
+ # we just operate on binary strings here because it's easier
+ b = '{:0{}b}'.format(i, 2*m.ceil(m.log2(i+1)/2))
+ x = int(b[1::2], 2) if b[1::2] else 0
+ y = int(b[0::2], 2) if b[0::2] else 0
+ if x < width and y < height:
+ curve.append((x, y))
+
+ return curve
+
+
+class Block(int):
+ __slots__ = ()
+ def __new__(cls, state=0, *,
+ wear=0,
+ readed=False,
+ proged=False,
+ erased=False):
+ return super().__new__(cls,
+ state
+ | (wear << 3)
+ | (1 if readed else 0)
+ | (2 if proged else 0)
+ | (4 if erased else 0))
+
+ @property
+ def wear(self):
+ return self >> 3
+
+ @property
+ def readed(self):
+ return (self & 1) != 0
+
+ @property
+ def proged(self):
+ return (self & 2) != 0
+
+ @property
+ def erased(self):
+ return (self & 4) != 0
+
+ def read(self):
+ return Block(int(self) | 1)
+
+ def prog(self):
+ return Block(int(self) | 2)
+
+ def erase(self):
+ return Block((int(self) | 4) + 8)
+
+ def clear(self):
+ return Block(int(self) & ~7)
+
+ def __or__(self, other):
+ return Block(
+ (int(self) | int(other)) & 7,
+ wear=max(self.wear, other.wear))
+
+ def worn(self, max_wear, *,
+ block_cycles=None,
+ wear_chars=None,
+ **_):
+ if wear_chars is None:
+ wear_chars = WEAR_CHARS
+
+ if block_cycles:
+ return self.wear / block_cycles
+ else:
+ return self.wear / max(max_wear, len(wear_chars))
+
+ def draw(self, max_wear, char=None, *,
+ read=True,
+ prog=True,
+ erase=True,
+ wear=False,
+ block_cycles=None,
+ color=True,
+ subscripts=False,
+ dots=False,
+ braille=False,
+ chars=None,
+ wear_chars=None,
+ colors=None,
+ wear_colors=None,
+ **_):
+ # fallback to default chars/colors
+ if chars is None:
+ chars = CHARS
+ if len(chars) < len(CHARS):
+ chars = chars + CHARS[len(chars):]
+
+ if colors is None:
+ if braille or dots:
+ colors = COLORS_DOTS
+ else:
+ colors = COLORS
+ if len(colors) < len(COLORS):
+ colors = colors + COLORS[len(colors):]
+
+ if wear_chars is None:
+ if subscripts:
+ wear_chars = WEAR_CHARS_SUBSCRIPTS
+ else:
+ wear_chars = WEAR_CHARS
+
+ if wear_colors is None:
+ wear_colors = WEAR_COLORS
+
+ # compute char/color
+ c = chars[3]
+ f = [colors[3]]
+
+ if wear:
+ w = min(
+ self.worn(
+ max_wear,
+ block_cycles=block_cycles,
+ wear_chars=wear_chars),
+ 1)
+
+ c = wear_chars[int(w * (len(wear_chars)-1))]
+ f.append(wear_colors[int(w * (len(wear_colors)-1))])
+
+ if erase and self.erased:
+ c = chars[2]
+ f.append(colors[2])
+ elif prog and self.proged:
+ c = chars[1]
+ f.append(colors[1])
+ elif read and self.readed:
+ c = chars[0]
+ f.append(colors[0])
+
+ # override char?
+ if char:
+ c = char
+
+ # apply colors
+ if f and color:
+ c = '%s%s\x1b[m' % (
+ ''.join('\x1b[%sm' % f_ for f_ in f),
+ c)
+
+ return c
+
+
+class Bd:
+ def __init__(self, *,
+ size=1,
+ count=1,
+ width=None,
+ height=1,
+ blocks=None):
+ if width is None:
+ width = count
+
+ if blocks is None:
+ self.blocks = [Block() for _ in range(width*height)]
+ else:
+ self.blocks = blocks
+ self.size = size
+ self.count = count
+ self.width = width
+ self.height = height
+
+ def _op(self, f, block=None, off=None, size=None):
+ if block is None:
+ range_ = range(len(self.blocks))
+ else:
+ if off is None:
+ off, size = 0, self.size
+ elif size is None:
+ off, size = 0, off
+
+ # update our geometry? this will do nothing if we haven't changed
+ self.resize(
+ size=max(self.size, off+size),
+ count=max(self.count, block+1))
+
+ # map to our block space
+ start = (block*self.size + off) / (self.size*self.count)
+ stop = (block*self.size + off+size) / (self.size*self.count)
+
+ range_ = range(
+ m.floor(start*len(self.blocks)),
+ m.ceil(stop*len(self.blocks)))
+
+ # apply the op
+ for i in range_:
+ self.blocks[i] = f(self.blocks[i])
+
+ def read(self, block=None, off=None, size=None):
+ self._op(Block.read, block, off, size)
+
+ def prog(self, block=None, off=None, size=None):
+ self._op(Block.prog, block, off, size)
+
+ def erase(self, block=None, off=None, size=None):
+ self._op(Block.erase, block, off, size)
+
+ def clear(self, block=None, off=None, size=None):
+ self._op(Block.clear, block, off, size)
+
+ def copy(self):
+ return Bd(
+ blocks=self.blocks.copy(),
+ size=self.size,
+ count=self.count,
+ width=self.width,
+ height=self.height)
+
+ def resize(self, *,
+ size=None,
+ count=None,
+ width=None,
+ height=None):
+ size = size if size is not None else self.size
+ count = count if count is not None else self.count
+ width = width if width is not None else self.width
+ height = height if height is not None else self.height
+
+ if (size == self.size
+ and count == self.count
+ and width == self.width
+ and height == self.height):
+ return
+
+ # transform our blocks
+ blocks = []
+ for x in range(width*height):
+ # map from new bd space
+ start = m.floor(x * (size*count)/(width*height))
+ stop = m.ceil((x+1) * (size*count)/(width*height))
+ start_block = start // size
+ start_off = start % size
+ stop_block = stop // size
+ stop_off = stop % size
+ # map to old bd space
+ start = start_block*self.size + start_off
+ stop = stop_block*self.size + stop_off
+ start = m.floor(start * len(self.blocks)/(self.size*self.count))
+ stop = m.ceil(stop * len(self.blocks)/(self.size*self.count))
+
+ # aggregate state
+ blocks.append(ft.reduce(
+ Block.__or__,
+ self.blocks[start:stop],
+ Block()))
+
+ self.size = size
+ self.count = count
+ self.width = width
+ self.height = height
+ self.blocks = blocks
+
+ def draw(self, row, *,
+ read=False,
+ prog=False,
+ erase=False,
+ wear=False,
+ hilbert=False,
+ lebesgue=False,
+ dots=False,
+ braille=False,
+ **args):
+ # find max wear?
+ max_wear = None
+ if wear:
+ max_wear = max(b.wear for b in self.blocks)
+
+ # fold via a curve?
+ if hilbert:
+ grid = [None]*(self.width*self.height)
+ for (x,y), b in zip(
+ hilbert_curve(self.width, self.height),
+ self.blocks):
+ grid[x + y*self.width] = b
+ elif lebesgue:
+ grid = [None]*(self.width*self.height)
+ for (x,y), b in zip(
+ lebesgue_curve(self.width, self.height),
+ self.blocks):
+ grid[x + y*self.width] = b
+ else:
+ grid = self.blocks
+
+ # need to wait for more trace output before rendering
+ #
+ # this is sort of a hack that knows the output is going to a terminal
+ if (braille and self.height < 4) or (dots and self.height < 2):
+ needed_height = 4 if braille else 2
+
+ self.history = getattr(self, 'history', [])
+ self.history.append(grid)
+
+ if len(self.history)*self.height < needed_height:
+ # skip for now
+ return None
+
+ grid = list(it.chain.from_iterable(
+ # did we resize?
+ it.islice(it.chain(h, it.repeat(Block())),
+ self.width*self.height)
+ for h in self.history))
+ self.history = []
+
+ line = []
+ if braille:
+ # encode into a byte
+ for x in range(0, self.width, 2):
+ byte_b = 0
+ best_b = Block()
+ for i in range(2*4):
+ b = grid[x+(2-1-(i%2)) + ((row*4)+(4-1-(i//2)))*self.width]
+ best_b |= b
+ if ((read and b.readed)
+ or (prog and b.proged)
+ or (erase and b.erased)
+ or (not read and not prog and not erase
+ and wear and b.worn(max_wear, **args) >= 0.7)):
+ byte_b |= 1 << i
+
+ line.append(best_b.draw(
+ max_wear,
+ CHARS_BRAILLE[byte_b],
+ braille=True,
+ read=read,
+ prog=prog,
+ erase=erase,
+ wear=wear,
+ **args))
+ elif dots:
+ # encode into a byte
+ for x in range(self.width):
+ byte_b = 0
+ best_b = Block()
+ for i in range(2):
+ b = grid[x + ((row*2)+(2-1-i))*self.width]
+ best_b |= b
+ if ((read and b.readed)
+ or (prog and b.proged)
+ or (erase and b.erased)
+ or (not read and not prog and not erase
+ and wear and b.worn(max_wear, **args) >= 0.7)):
+ byte_b |= 1 << i
+
+ line.append(best_b.draw(
+ max_wear,
+ CHARS_DOTS[byte_b],
+ dots=True,
+ read=read,
+ prog=prog,
+ erase=erase,
+ wear=wear,
+ **args))
+ else:
+ for x in range(self.width):
+ line.append(grid[x + row*self.width].draw(
+ max_wear,
+ read=read,
+ prog=prog,
+ erase=erase,
+ wear=wear,
+ **args))
+
+ return ''.join(line)
+
+
+
+def main(path='-', *,
+ read=False,
+ prog=False,
+ erase=False,
+ wear=False,
+ block=(None,None),
+ off=(None,None),
+ block_size=None,
+ block_count=None,
+ block_cycles=None,
+ reset=False,
+ color='auto',
+ dots=False,
+ braille=False,
+ width=None,
+ height=None,
+ lines=None,
+ cat=False,
+ hilbert=False,
+ lebesgue=False,
+ coalesce=None,
+ sleep=None,
+ keep_open=False,
+ **args):
+ # figure out what color should be
+ if color == 'auto':
+ color = sys.stdout.isatty()
+ elif color == 'always':
+ color = True
+ else:
+ color = False
+
+ # exclusive wear or read/prog/erase by default
+ if not read and not prog and not erase and not wear:
+ read = True
+ prog = True
+ erase = True
+
+ # assume a reasonable lines/height if not specified
+ #
+ # note that we let height = None if neither hilbert or lebesgue
+ # are specified, this is a bit special as the default may be less
+ # than one character in height.
+ if height is None and (hilbert or lebesgue):
+ if lines is not None:
+ height = lines
+ else:
+ height = 5
+
+ if lines is None:
+ if height is not None:
+ lines = height
+ else:
+ lines = 5
+
+ # allow ranges for blocks/offs
+ block_start = block[0]
+ block_stop = block[1] if len(block) > 1 else block[0]+1
+ off_start = off[0]
+ off_stop = off[1] if len(off) > 1 else off[0]+1
+
+ if block_start is None:
+ block_start = 0
+ if block_stop is None and block_count is not None:
+ block_stop = block_count
+ if off_start is None:
+ off_start = 0
+ if off_stop is None and block_size is not None:
+ off_stop = block_size
+
+ # create a block device representation
+ bd = Bd()
+
+ def resize(*, size=None, count=None):
+ nonlocal bd
+
+ # size may be overriden by cli args
+ if block_size is not None:
+ size = block_size
+ elif off_stop is not None:
+ size = off_stop-off_start
+
+ if block_count is not None:
+ count = block_count
+ elif block_stop is not None:
+ count = block_stop-block_start
+
+ # figure out best width/height
+ if width is None:
+ width_ = min(80, shutil.get_terminal_size((80, 5))[0])
+ elif width:
+ width_ = width
+ else:
+ width_ = shutil.get_terminal_size((80, 5))[0]
+
+ if height is None:
+ height_ = 0
+ elif height:
+ height_ = height
+ else:
+ height_ = shutil.get_terminal_size((80, 5))[1]
+
+ bd.resize(
+ size=size,
+ count=count,
+ # scale if we're printing with dots or braille
+ width=2*width_ if braille else width_,
+ height=max(1,
+ 4*height_ if braille
+ else 2*height_ if dots
+ else height_))
+ resize()
+
+ # parse a line of trace output
+ pattern = re.compile(
+ '^(?P[^:]*):(?P[0-9]+):trace:.*?bd_(?:'
+ '(?Pcreate\w*)\('
+ '(?:'
+ 'block_size=(?P\w+)'
+ '|' 'block_count=(?P\w+)'
+ '|' '.*?' ')*' '\)'
+ '|' '(?Pread)\('
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*\)'
+ '|' '(?Pprog)\('
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)' '\s*\)'
+ '|' '(?Perase)\('
+ '\s*(?P\w+)' '\s*,'
+ '\s*(?P\w+)'
+ '\s*\(\s*(?P\w+)\s*\)' '\s*\)'
+ '|' '(?Psync)\('
+ '\s*(?P\w+)' '\s*\)' ')\s*$')
+ def parse(line):
+ nonlocal bd
+
+ # string searching is much faster than the regex here, and this
+ # actually has a big impact given how much trace output comes
+ # through here
+ if 'trace' not in line or 'bd' not in line:
+ return False
+ m = pattern.match(line)
+ if not m:
+ return False
+
+ if m.group('create'):
+ # update our block size/count
+ size = int(m.group('block_size'), 0)
+ count = int(m.group('block_count'), 0)
+
+ resize(size=size, count=count)
+ if reset:
+ bd = Bd(
+ size=bd.size,
+ count=bd.count,
+ width=bd.width,
+ height=bd.height)
+ return True
+
+ elif m.group('read') and read:
+ block = int(m.group('read_block'), 0)
+ off = int(m.group('read_off'), 0)
+ size = int(m.group('read_size'), 0)
+
+ if block_stop is not None and block >= block_stop:
+ return False
+ block -= block_start
+ if off_stop is not None:
+ if off >= off_stop:
+ return False
+ size = min(size, off_stop-off)
+ off -= off_start
+
+ bd.read(block, off, size)
+ return True
+
+ elif m.group('prog') and prog:
+ block = int(m.group('prog_block'), 0)
+ off = int(m.group('prog_off'), 0)
+ size = int(m.group('prog_size'), 0)
+
+ if block_stop is not None and block >= block_stop:
+ return False
+ block -= block_start
+ if off_stop is not None:
+ if off >= off_stop:
+ return False
+ size = min(size, off_stop-off)
+ off -= off_start
+
+ bd.prog(block, off, size)
+ return True
+
+ elif m.group('erase') and (erase or wear):
+ block = int(m.group('erase_block'), 0)
+ size = int(m.group('erase_size'), 0)
+
+ if block_stop is not None and block >= block_stop:
+ return False
+ block -= block_start
+ if off_stop is not None:
+ size = min(size, off_stop)
+ off = -off_start
+
+ bd.erase(block, off, size)
+ return True
+
+ else:
+ return False
+
+ # print trace output
+ def draw(f):
+ def writeln(s=''):
+ f.write(s)
+ f.write('\n')
+ f.writeln = writeln
+
+ # don't forget we've scaled this for braille/dots!
+ for row in range(
+ m.ceil(bd.height/4) if braille
+ else m.ceil(bd.height/2) if dots
+ else bd.height):
+ line = bd.draw(row,
+ read=read,
+ prog=prog,
+ erase=erase,
+ wear=wear,
+ block_cycles=block_cycles,
+ color=color,
+ dots=dots,
+ braille=braille,
+ hilbert=hilbert,
+ lebesgue=lebesgue,
+ **args)
+ if line:
+ f.writeln(line)
+
+ bd.clear()
+ resize()
+
+
+ # read/parse/coalesce operations
+ if cat:
+ ring = sys.stdout
+ else:
+ ring = LinesIO(lines)
+
+ # if sleep print in background thread to avoid getting stuck in a read call
+ event = th.Event()
+ lock = th.Lock()
+ if sleep:
+ done = False
+ def background():
+ while not done:
+ event.wait()
+ event.clear()
+ with lock:
+ draw(ring)
+ if not cat:
+ ring.draw()
+ time.sleep(sleep or 0.01)
+ th.Thread(target=background, daemon=True).start()
+
+ try:
+ while True:
+ with openio(path) as f:
+ changed = 0
+ for line in f:
+ with lock:
+ changed += parse(line)
+
+ # need to redraw?
+ if changed and (not coalesce or changed >= coalesce):
+ if sleep:
+ event.set()
+ else:
+ draw(ring)
+ if not cat:
+ ring.draw()
+ changed = 0
+
+ if not keep_open:
+ break
+ # don't just flood open calls
+ time.sleep(sleep or 0.1)
+ except FileNotFoundError as e:
+ print("error: file not found %r" % path)
+ sys.exit(-1)
+ except KeyboardInterrupt:
+ pass
+
+ if sleep:
+ done = True
+ lock.acquire() # avoids https://bugs.python.org/issue42717
+ if not cat:
+ sys.stdout.write('\n')
+
+
+if __name__ == "__main__":
+ import sys
+ import argparse
+ parser = argparse.ArgumentParser(
+ description="Display operations on block devices based on "
+ "trace output.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'path',
+ nargs='?',
+ help="Path to read from.")
+ parser.add_argument(
+ '-r', '--read',
+ action='store_true',
+ help="Render reads.")
+ parser.add_argument(
+ '-p', '--prog',
+ action='store_true',
+ help="Render progs.")
+ parser.add_argument(
+ '-e', '--erase',
+ action='store_true',
+ help="Render erases.")
+ parser.add_argument(
+ '-w', '--wear',
+ action='store_true',
+ help="Render wear.")
+ parser.add_argument(
+ '-b', '--block',
+ type=lambda x: tuple(
+ int(x, 0) if x.strip() else None
+ for x in x.split(',')),
+ help="Show a specific block or range of blocks.")
+ parser.add_argument(
+ '-i', '--off',
+ type=lambda x: tuple(
+ int(x, 0) if x.strip() else None
+ for x in x.split(',')),
+ help="Show a specific offset or range of offsets.")
+ parser.add_argument(
+ '-B', '--block-size',
+ type=lambda x: int(x, 0),
+ help="Assume a specific block size.")
+ parser.add_argument(
+ '--block-count',
+ type=lambda x: int(x, 0),
+ help="Assume a specific block count.")
+ parser.add_argument(
+ '-C', '--block-cycles',
+ type=lambda x: int(x, 0),
+ help="Assumed maximum number of erase cycles when measuring wear.")
+ parser.add_argument(
+ '-R', '--reset',
+ action='store_true',
+ help="Reset wear on block device initialization.")
+ parser.add_argument(
+ '--color',
+ choices=['never', 'always', 'auto'],
+ default='auto',
+ help="When to use terminal colors. Defaults to 'auto'.")
+ parser.add_argument(
+ '--subscripts',
+ action='store_true',
+ help="Use unicode subscripts for showing wear.")
+ parser.add_argument(
+ '-:', '--dots',
+ action='store_true',
+ help="Use 1x2 ascii dot characters.")
+ parser.add_argument(
+ '-⣿', '--braille',
+ action='store_true',
+ help="Use 2x4 unicode braille characters. Note that braille characters "
+ "sometimes suffer from inconsistent widths.")
+ parser.add_argument(
+ '--chars',
+ help="Characters to use for read, prog, erase, noop operations.")
+ parser.add_argument(
+ '--wear-chars',
+ help="Characters to use for showing wear.")
+ parser.add_argument(
+ '--colors',
+ type=lambda x: [x.strip() for x in x.split(',')],
+ help="Colors to use for read, prog, erase, noop operations.")
+ parser.add_argument(
+ '--wear-colors',
+ type=lambda x: [x.strip() for x in x.split(',')],
+ help="Colors to use for showing wear.")
+ parser.add_argument(
+ '-W', '--width',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Width in columns. 0 uses the terminal width. Defaults to "
+ "min(terminal, 80).")
+ parser.add_argument(
+ '-H', '--height',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Height in rows. 0 uses the terminal height. Defaults to 1.")
+ parser.add_argument(
+ '-n', '--lines',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Show this many lines of history. 0 uses the terminal height. "
+ "Defaults to 5.")
+ parser.add_argument(
+ '-z', '--cat',
+ action='store_true',
+ help="Pipe directly to stdout.")
+ parser.add_argument(
+ '-U', '--hilbert',
+ action='store_true',
+ help="Render as a space-filling Hilbert curve.")
+ parser.add_argument(
+ '-Z', '--lebesgue',
+ action='store_true',
+ help="Render as a space-filling Z-curve.")
+ parser.add_argument(
+ '-c', '--coalesce',
+ type=lambda x: int(x, 0),
+ help="Number of operations to coalesce together.")
+ parser.add_argument(
+ '-s', '--sleep',
+ type=float,
+ help="Time in seconds to sleep between reads, coalescing operations.")
+ parser.add_argument(
+ '-k', '--keep-open',
+ action='store_true',
+ help="Reopen the pipe on EOF, useful when multiple "
+ "processes are writing.")
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_intermixed_args()).items()
+ if v is not None}))
diff --git a/scripts/watch.py b/scripts/watch.py
new file mode 100755
index 00000000..dff06011
--- /dev/null
+++ b/scripts/watch.py
@@ -0,0 +1,265 @@
+#!/usr/bin/env python3
+#
+# Traditional watch command, but with higher resolution updates and a bit
+# different options/output format
+#
+# Example:
+# ./scripts/watch.py -s0.1 date
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import collections as co
+import errno
+import fcntl
+import io
+import os
+import pty
+import re
+import shutil
+import struct
+import subprocess as sp
+import sys
+import termios
+import time
+
+try:
+ import inotify_simple
+except ModuleNotFoundError:
+ inotify_simple = None
+
+
+def openio(path, mode='r', buffering=-1):
+ # allow '-' for stdin/stdout
+ if path == '-':
+ if mode == 'r':
+ return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
+ else:
+ return open(path, mode, buffering)
+
+def inotifywait(paths):
+ # wait for interesting events
+ inotify = inotify_simple.INotify()
+ flags = (inotify_simple.flags.ATTRIB
+ | inotify_simple.flags.CREATE
+ | inotify_simple.flags.DELETE
+ | inotify_simple.flags.DELETE_SELF
+ | inotify_simple.flags.MODIFY
+ | inotify_simple.flags.MOVED_FROM
+ | inotify_simple.flags.MOVED_TO
+ | inotify_simple.flags.MOVE_SELF)
+
+ # recurse into directories
+ for path in paths:
+ if os.path.isdir(path):
+ for dir, _, files in os.walk(path):
+ inotify.add_watch(dir, flags)
+ for f in files:
+ inotify.add_watch(os.path.join(dir, f), flags)
+ else:
+ inotify.add_watch(path, flags)
+
+ # wait for event
+ inotify.read()
+
+class LinesIO:
+ def __init__(self, maxlen=None):
+ self.maxlen = maxlen
+ self.lines = co.deque(maxlen=maxlen)
+ self.tail = io.StringIO()
+
+ # trigger automatic sizing
+ if maxlen == 0:
+ self.resize(0)
+
+ def write(self, s):
+ # note using split here ensures the trailing string has no newline
+ lines = s.split('\n')
+
+ if len(lines) > 1 and self.tail.getvalue():
+ self.tail.write(lines[0])
+ lines[0] = self.tail.getvalue()
+ self.tail = io.StringIO()
+
+ self.lines.extend(lines[:-1])
+
+ if lines[-1]:
+ self.tail.write(lines[-1])
+
+ def resize(self, maxlen):
+ self.maxlen = maxlen
+ if maxlen == 0:
+ maxlen = shutil.get_terminal_size((80, 5))[1]
+ if maxlen != self.lines.maxlen:
+ self.lines = co.deque(self.lines, maxlen=maxlen)
+
+ canvas_lines = 1
+ def draw(self):
+ # did terminal size change?
+ if self.maxlen == 0:
+ self.resize(0)
+
+ # first thing first, give ourself a canvas
+ while LinesIO.canvas_lines < len(self.lines):
+ sys.stdout.write('\n')
+ LinesIO.canvas_lines += 1
+
+ # clear the bottom of the canvas if we shrink
+ shrink = LinesIO.canvas_lines - len(self.lines)
+ if shrink > 0:
+ for i in range(shrink):
+ sys.stdout.write('\r')
+ if shrink-1-i > 0:
+ sys.stdout.write('\x1b[%dA' % (shrink-1-i))
+ sys.stdout.write('\x1b[K')
+ if shrink-1-i > 0:
+ sys.stdout.write('\x1b[%dB' % (shrink-1-i))
+ sys.stdout.write('\x1b[%dA' % shrink)
+ LinesIO.canvas_lines = len(self.lines)
+
+ for i, line in enumerate(self.lines):
+ # move cursor, clear line, disable/reenable line wrapping
+ sys.stdout.write('\r')
+ if len(self.lines)-1-i > 0:
+ sys.stdout.write('\x1b[%dA' % (len(self.lines)-1-i))
+ sys.stdout.write('\x1b[K')
+ sys.stdout.write('\x1b[?7l')
+ sys.stdout.write(line)
+ sys.stdout.write('\x1b[?7h')
+ if len(self.lines)-1-i > 0:
+ sys.stdout.write('\x1b[%dB' % (len(self.lines)-1-i))
+ sys.stdout.flush()
+
+
+def main(command, *,
+ lines=0,
+ cat=False,
+ sleep=None,
+ keep_open=False,
+ keep_open_paths=None,
+ exit_on_error=False):
+ returncode = 0
+ try:
+ while True:
+ # reset ring each run
+ if cat:
+ ring = sys.stdout
+ else:
+ ring = LinesIO(lines)
+
+ try:
+ # run the command under a pseudoterminal
+ mpty, spty = pty.openpty()
+
+ # forward terminal size
+ w, h = shutil.get_terminal_size((80, 5))
+ if lines:
+ h = lines
+ fcntl.ioctl(spty, termios.TIOCSWINSZ,
+ struct.pack('HHHH', h, w, 0, 0))
+
+ proc = sp.Popen(command,
+ stdout=spty,
+ stderr=spty,
+ close_fds=False)
+ os.close(spty)
+ mpty = os.fdopen(mpty, 'r', 1)
+
+ while True:
+ try:
+ line = mpty.readline()
+ except OSError as e:
+ if e.errno != errno.EIO:
+ raise
+ break
+ if not line:
+ break
+
+ ring.write(line)
+ if not cat:
+ ring.draw()
+
+ mpty.close()
+ proc.wait()
+ if exit_on_error and proc.returncode != 0:
+ returncode = proc.returncode
+ break
+ except OSError as e:
+ if e.errno != errno.ETXTBSY:
+ raise
+ pass
+
+ # try to inotifywait
+ if keep_open and inotify_simple is not None:
+ if keep_open_paths:
+ paths = set(keep_paths)
+ else:
+ # guess inotify paths from command
+ paths = set()
+ for p in command:
+ for p in {
+ p,
+ re.sub('^-.', '', p),
+ re.sub('^--[^=]+=', '', p)}:
+ if p and os.path.exists(p):
+ paths.add(p)
+ ptime = time.time()
+ inotifywait(paths)
+ # sleep for a minimum amount of time, this helps issues around
+ # rapidly updating files
+ time.sleep(max(0, (sleep or 0.1) - (time.time()-ptime)))
+ else:
+ time.sleep(sleep or 0.1)
+ except KeyboardInterrupt:
+ pass
+
+ if not cat:
+ sys.stdout.write('\n')
+ sys.exit(returncode)
+
+
+if __name__ == "__main__":
+ import sys
+ import argparse
+ parser = argparse.ArgumentParser(
+ description="Traditional watch command, but with higher resolution "
+ "updates and a bit different options/output format.",
+ allow_abbrev=False)
+ parser.add_argument(
+ 'command',
+ nargs=argparse.REMAINDER,
+ help="Command to run.")
+ parser.add_argument(
+ '-n', '--lines',
+ nargs='?',
+ type=lambda x: int(x, 0),
+ const=0,
+ help="Show this many lines of history. 0 uses the terminal height. "
+ "Defaults to 0.")
+ parser.add_argument(
+ '-z', '--cat',
+ action='store_true',
+ help="Pipe directly to stdout.")
+ parser.add_argument(
+ '-s', '--sleep',
+ type=float,
+ help="Seconds to sleep between runs. Defaults to 0.1.")
+ parser.add_argument(
+ '-k', '--keep-open',
+ action='store_true',
+ help="Try to use inotify to wait for changes.")
+ parser.add_argument(
+ '-K', '--keep-open-path',
+ dest='keep_open_paths',
+ action='append',
+ help="Use this path for inotify. Defaults to guessing.")
+ parser.add_argument(
+ '-e', '--exit-on-error',
+ action='store_true',
+ help="Exit on error.")
+ sys.exit(main(**{k: v
+ for k, v in vars(parser.parse_args()).items()
+ if v is not None}))
diff --git a/tests/test_alloc.toml b/tests/test_alloc.toml
index fa92da51..205efbb1 100644
--- a/tests/test_alloc.toml
+++ b/tests/test_alloc.toml
@@ -1,27 +1,30 @@
# allocator tests
# note for these to work there are a number constraints on the device geometry
-if = 'LFS_BLOCK_CYCLES == -1'
+if = 'BLOCK_CYCLES == -1'
-[[case]] # parallel allocation test
-define.FILES = 3
-define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
+# parallel allocation test
+[cases.test_alloc_parallel]
+defines.FILES = 3
+defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
code = '''
- const char *names[FILES] = {"bacon", "eggs", "pancakes"};
+ const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_file_t files[FILES];
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
+ char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &files[n], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
}
for (int n = 0; n < FILES; n++) {
- size = strlen(names[n]);
+ size_t size = strlen(names[n]);
for (lfs_size_t i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &files[n], names[n], size) => size;
}
@@ -31,12 +34,15 @@ code = '''
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
+ char path[1024];
sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
- size = strlen(names[n]);
+ size_t size = strlen(names[n]);
for (lfs_size_t i = 0; i < SIZE; i += size) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
@@ -45,23 +51,28 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # serial allocation test
-define.FILES = 3
-define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
+# serial allocation test
+[cases.test_alloc_serial]
+defines.FILES = 3
+defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
code = '''
- const char *names[FILES] = {"bacon", "eggs", "pancakes"};
+ const char *names[] = {"bacon", "eggs", "pancakes"};
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
for (int n = 0; n < FILES; n++) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ char path[1024];
sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
- size = strlen(names[n]);
+ size_t size = strlen(names[n]);
+ uint8_t buffer[1024];
memcpy(buffer, names[n], size);
for (int i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
@@ -70,12 +81,15 @@ code = '''
lfs_unmount(&lfs) => 0;
}
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
+ char path[1024];
sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
- size = strlen(names[n]);
+ size_t size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
@@ -84,29 +98,32 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # parallel allocation reuse test
-define.FILES = 3
-define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
-define.CYCLES = [1, 10]
+# parallel allocation reuse test
+[cases.test_alloc_parallel_reuse]
+defines.FILES = 3
+defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
+defines.CYCLES = [1, 10]
code = '''
- const char *names[FILES] = {"bacon", "eggs", "pancakes"};
+ const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_file_t files[FILES];
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
for (int c = 0; c < CYCLES; c++) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
+ char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &files[n], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
}
for (int n = 0; n < FILES; n++) {
- size = strlen(names[n]);
+ size_t size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &files[n], names[n], size) => size;
}
@@ -116,12 +133,15 @@ code = '''
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
+ char path[1024];
sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
- size = strlen(names[n]);
+ size_t size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
@@ -129,8 +149,9 @@ code = '''
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
+ char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_remove(&lfs, path) => 0;
}
@@ -139,26 +160,31 @@ code = '''
}
'''
-[[case]] # serial allocation reuse test
-define.FILES = 3
-define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
-define.CYCLES = [1, 10]
+# serial allocation reuse test
+[cases.test_alloc_serial_reuse]
+defines.FILES = 3
+defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
+defines.CYCLES = [1, 10]
code = '''
- const char *names[FILES] = {"bacon", "eggs", "pancakes"};
+ const char *names[] = {"bacon", "eggs", "pancakes"};
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
for (int c = 0; c < CYCLES; c++) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
for (int n = 0; n < FILES; n++) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ char path[1024];
sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
- size = strlen(names[n]);
+ size_t size = strlen(names[n]);
+ uint8_t buffer[1024];
memcpy(buffer, names[n], size);
for (int i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
@@ -167,12 +193,15 @@ code = '''
lfs_unmount(&lfs) => 0;
}
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
+ char path[1024];
sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
- size = strlen(names[n]);
+ size_t size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
@@ -180,8 +209,9 @@ code = '''
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
+ char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_remove(&lfs, path) => 0;
}
@@ -190,12 +220,16 @@ code = '''
}
'''
-[[case]] # exhaustion test
+# exhaustion test
+[cases.test_alloc_exhaustion]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
- size = strlen("exhaustion");
+ size_t size = strlen("exhaustion");
+ uint8_t buffer[1024];
memcpy(buffer, "exhaustion", size);
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
@@ -216,7 +250,7 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
size = strlen("exhaustion");
lfs_file_size(&lfs, &file) => size;
@@ -226,14 +260,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # exhaustion wraparound test
-define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / 3)'
+# exhaustion wraparound test
+[cases.test_alloc_exhaustion_wraparound]
+defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-4)) / 3)'
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "padding", LFS_O_WRONLY | LFS_O_CREAT);
- size = strlen("buffering");
+ size_t size = strlen("buffering");
+ uint8_t buffer[1024];
memcpy(buffer, "buffering", size);
for (int i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
@@ -263,7 +301,7 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
size = strlen("exhaustion");
lfs_file_size(&lfs, &file) => size;
@@ -274,17 +312,22 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # dir exhaustion test
+# dir exhaustion test
+[cases.test_alloc_dir_exhaustion]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// find out max file size
lfs_mkdir(&lfs, "exhaustiondir") => 0;
- size = strlen("blahblahblahblah");
+ size_t size = strlen("blahblahblahblah");
+ uint8_t buffer[1024];
memcpy(buffer, "blahblahblahblah", size);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
int count = 0;
+ int err;
while (true) {
err = lfs_file_write(&lfs, &file, buffer, size);
if (err < 0) {
@@ -323,17 +366,21 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # what if we have a bad block during an allocation scan?
+# what if we have a bad block during an allocation scan?
+[cases.test_alloc_bad_blocks]
in = "lfs.c"
-define.LFS_ERASE_CYCLES = 0xffffffff
-define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_READERROR'
+defines.ERASE_CYCLES = 0xffffffff
+defines.BADBLOCK_BEHAVIOR = 'LFS_EMUBD_BADBLOCK_READERROR'
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// first fill to exhaustion to find available space
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ uint8_t buffer[1024];
strcpy((char*)buffer, "waka");
- size = strlen("waka");
+ size_t size = strlen("waka");
lfs_size_t filesize = 0;
while (true) {
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
@@ -345,7 +392,7 @@ code = '''
}
lfs_file_close(&lfs, &file) => 0;
// now fill all but a couple of blocks of the filesystem with data
- filesize -= 3*LFS_BLOCK_SIZE;
+ filesize -= 3*BLOCK_SIZE;
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "waka");
size = strlen("waka");
@@ -358,11 +405,11 @@ code = '''
lfs_unmount(&lfs) => 0;
// remount to force an alloc scan
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// but mark the head of our file as a "bad block", this is force our
// scan to bail early
- lfs_testbd_setwear(&cfg, fileblock, 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, fileblock, 0xffffffff) => 0;
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "chomp");
size = strlen("chomp");
@@ -377,7 +424,7 @@ code = '''
// now reverse the "bad block" and try to write the file again until we
// run out of space
- lfs_testbd_setwear(&cfg, fileblock, 0) => 0;
+ lfs_emubd_setwear(cfg, fileblock, 0) => 0;
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "chomp");
size = strlen("chomp");
@@ -393,7 +440,7 @@ code = '''
lfs_unmount(&lfs) => 0;
// check that the disk isn't hurt
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "pacman", LFS_O_RDONLY) => 0;
strcpy((char*)buffer, "waka");
size = strlen("waka");
@@ -411,24 +458,29 @@ code = '''
# on the geometry of the block device. But they are valuable. Eventually they
# should be removed and replaced with generalized tests.
-[[case]] # chained dir exhaustion test
-define.LFS_BLOCK_SIZE = 512
-define.LFS_BLOCK_COUNT = 1024
-if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
+# chained dir exhaustion test
+[cases.test_alloc_chained_dir_exhaustion]
+if = 'BLOCK_SIZE == 512'
+defines.BLOCK_COUNT = 1024
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// find out max file size
lfs_mkdir(&lfs, "exhaustiondir") => 0;
for (int i = 0; i < 10; i++) {
+ char path[1024];
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
lfs_mkdir(&lfs, path) => 0;
}
- size = strlen("blahblahblahblah");
+ size_t size = strlen("blahblahblahblah");
+ uint8_t buffer[1024];
memcpy(buffer, "blahblahblahblah", size);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
int count = 0;
+ int err;
while (true) {
err = lfs_file_write(&lfs, &file, buffer, size);
if (err < 0) {
@@ -443,6 +495,7 @@ code = '''
lfs_remove(&lfs, "exhaustion") => 0;
lfs_remove(&lfs, "exhaustiondir") => 0;
for (int i = 0; i < 10; i++) {
+ char path[1024];
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
lfs_remove(&lfs, path) => 0;
}
@@ -455,6 +508,7 @@ code = '''
lfs_file_sync(&lfs, &file) => 0;
for (int i = 0; i < 10; i++) {
+ char path[1024];
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
lfs_mkdir(&lfs, path) => 0;
}
@@ -482,27 +536,31 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # split dir test
-define.LFS_BLOCK_SIZE = 512
-define.LFS_BLOCK_COUNT = 1024
-if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
+# split dir test
+[cases.test_alloc_split_dir]
+if = 'BLOCK_SIZE == 512'
+defines.BLOCK_COUNT = 1024
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// create one block hole for half a directory
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "bump", LFS_O_WRONLY | LFS_O_CREAT) => 0;
- for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
+ for (lfs_size_t i = 0; i < cfg->block_size; i += 2) {
+ uint8_t buffer[1024];
memcpy(&buffer[i], "hi", 2);
}
- lfs_file_write(&lfs, &file, buffer, cfg.block_size) => cfg.block_size;
+ uint8_t buffer[1024];
+ lfs_file_write(&lfs, &file, buffer, cfg->block_size) => cfg->block_size;
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
- size = strlen("blahblahblahblah");
+ size_t size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
- i < (cfg.block_count-4)*(cfg.block_size-8);
+ i < (cfg->block_count-4)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -510,7 +568,7 @@ code = '''
// remount to force reset of lookahead
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// open hole
lfs_remove(&lfs, "bump") => 0;
@@ -518,30 +576,33 @@ code = '''
lfs_mkdir(&lfs, "splitdir") => 0;
lfs_file_open(&lfs, &file, "splitdir/bump",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
- for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
+ for (lfs_size_t i = 0; i < cfg->block_size; i += 2) {
memcpy(&buffer[i], "hi", 2);
}
- lfs_file_write(&lfs, &file, buffer, 2*cfg.block_size) => LFS_ERR_NOSPC;
+ lfs_file_write(&lfs, &file, buffer, 2*cfg->block_size) => LFS_ERR_NOSPC;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
-[[case]] # outdated lookahead test
-define.LFS_BLOCK_SIZE = 512
-define.LFS_BLOCK_COUNT = 1024
-if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
+# outdated lookahead test
+[cases.test_alloc_outdated_lookahead]
+if = 'BLOCK_SIZE == 512'
+defines.BLOCK_COUNT = 1024
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// fill completely with two files
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "exhaustion1",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
- size = strlen("blahblahblahblah");
+ size_t size = strlen("blahblahblahblah");
+ uint8_t buffer[1024];
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
- i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
+ i < ((cfg->block_count-2)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -552,7 +613,7 @@ code = '''
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
- i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
+ i < ((cfg->block_count-2+1)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -560,7 +621,7 @@ code = '''
// remount to force reset of lookahead
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// rewrite one file
lfs_file_open(&lfs, &file, "exhaustion1",
@@ -569,7 +630,7 @@ code = '''
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
- i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
+ i < ((cfg->block_count-2)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -583,7 +644,7 @@ code = '''
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
- i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
+ i < ((cfg->block_count-2+1)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -592,21 +653,24 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # outdated lookahead and split dir test
-define.LFS_BLOCK_SIZE = 512
-define.LFS_BLOCK_COUNT = 1024
-if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
+# outdated lookahead and split dir test
+[cases.test_alloc_outdated_lookahead_split_dir]
+if = 'BLOCK_SIZE == 512'
+defines.BLOCK_COUNT = 1024
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// fill completely with two files
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "exhaustion1",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
- size = strlen("blahblahblahblah");
+ size_t size = strlen("blahblahblahblah");
+ uint8_t buffer[1024];
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
- i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
+ i < ((cfg->block_count-2)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -617,7 +681,7 @@ code = '''
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
- i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
+ i < ((cfg->block_count-2+1)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -625,7 +689,7 @@ code = '''
// remount to force reset of lookahead
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// rewrite one file with a hole of one block
lfs_file_open(&lfs, &file, "exhaustion1",
@@ -634,7 +698,7 @@ code = '''
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
- i < ((cfg.block_count-2)/2 - 1)*(cfg.block_size-8);
+ i < ((cfg->block_count-2)/2 - 1)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
diff --git a/tests/test_attrs.toml b/tests/test_attrs.toml
index db8d0c7e..3c69001c 100644
--- a/tests/test_attrs.toml
+++ b/tests/test_attrs.toml
@@ -1,14 +1,17 @@
-[[case]] # set/get attribute
+[cases.test_attrs_get_set]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ uint8_t buffer[1024];
memset(buffer, 0, sizeof(buffer));
lfs_setattr(&lfs, "hello", 'A', "aaaa", 4) => 0;
lfs_setattr(&lfs, "hello", 'B', "bbbbbb", 6) => 0;
@@ -60,7 +63,7 @@ code = '''
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
memset(buffer, 0, sizeof(buffer));
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
lfs_getattr(&lfs, "hello", 'B', buffer+4, 9) => 9;
@@ -76,17 +79,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # set/get root attribute
+[cases.test_attrs_get_set_root]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ uint8_t buffer[1024];
memset(buffer, 0, sizeof(buffer));
lfs_setattr(&lfs, "/", 'A', "aaaa", 4) => 0;
lfs_setattr(&lfs, "/", 'B', "bbbbbb", 6) => 0;
@@ -137,7 +143,7 @@ code = '''
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
memset(buffer, 0, sizeof(buffer));
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
lfs_getattr(&lfs, "/", 'B', buffer+4, 9) => 9;
@@ -153,17 +159,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # set/get file attribute
+[cases.test_attrs_get_set_file]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ uint8_t buffer[1024];
memset(buffer, 0, sizeof(buffer));
struct lfs_attr attrs1[] = {
{'A', buffer, 4},
@@ -238,7 +247,7 @@ code = '''
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
memset(buffer, 0, sizeof(buffer));
struct lfs_attr attrs3[] = {
{'A', buffer, 4},
@@ -260,20 +269,23 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # deferred file attributes
+[cases.test_attrs_deferred_file]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_setattr(&lfs, "hello/hello", 'B', "fffffffff", 9) => 0;
lfs_setattr(&lfs, "hello/hello", 'C', "ccccc", 5) => 0;
+ uint8_t buffer[1024];
memset(buffer, 0, sizeof(buffer));
struct lfs_attr attrs1[] = {
{'B', "gggg", 4},
diff --git a/tests/test_badblocks.toml b/tests/test_badblocks.toml
index 06967a67..b50b3933 100644
--- a/tests/test_badblocks.toml
+++ b/tests/test_badblocks.toml
@@ -1,28 +1,30 @@
# bad blocks with block cycles should be tested in test_relocations
-if = 'LFS_BLOCK_CYCLES == -1'
-
-[[case]] # single bad blocks
-define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
-define.LFS_ERASE_CYCLES = 0xffffffff
-define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
-define.LFS_BADBLOCK_BEHAVIOR = [
- 'LFS_TESTBD_BADBLOCK_PROGERROR',
- 'LFS_TESTBD_BADBLOCK_ERASEERROR',
- 'LFS_TESTBD_BADBLOCK_READERROR',
- 'LFS_TESTBD_BADBLOCK_PROGNOOP',
- 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+if = '(int32_t)BLOCK_CYCLES == -1'
+
+[cases.test_badblocks_single]
+defines.BLOCK_COUNT = 256 # small bd so test runs faster
+defines.ERASE_CYCLES = 0xffffffff
+defines.ERASE_VALUE = [0x00, 0xff, -1]
+defines.BADBLOCK_BEHAVIOR = [
+ 'LFS_EMUBD_BADBLOCK_PROGERROR',
+ 'LFS_EMUBD_BADBLOCK_ERASEERROR',
+ 'LFS_EMUBD_BADBLOCK_READERROR',
+ 'LFS_EMUBD_BADBLOCK_PROGNOOP',
+ 'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
-define.NAMEMULT = 64
-define.FILEMULT = 1
+defines.NAMEMULT = 64
+defines.FILEMULT = 1
code = '''
- for (lfs_block_t badblock = 2; badblock < LFS_BLOCK_COUNT; badblock++) {
- lfs_testbd_setwear(&cfg, badblock-1, 0) => 0;
- lfs_testbd_setwear(&cfg, badblock, 0xffffffff) => 0;
-
- lfs_format(&lfs, &cfg) => 0;
+ for (lfs_block_t badblock = 2; badblock < BLOCK_COUNT; badblock++) {
+ lfs_emubd_setwear(cfg, badblock-1, 0) => 0;
+ lfs_emubd_setwear(cfg, badblock, 0xffffffff) => 0;
+
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
+ uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
@@ -34,10 +36,11 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
- size = NAMEMULT;
+ lfs_size_t size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -46,12 +49,14 @@ code = '''
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
+ uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
+ struct lfs_info info;
lfs_stat(&lfs, (char*)buffer, &info) => 0;
info.type => LFS_TYPE_DIR;
@@ -60,9 +65,10 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
- size = NAMEMULT;
+ int size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
@@ -75,28 +81,30 @@ code = '''
}
'''
-[[case]] # region corruption (causes cascading failures)
-define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
-define.LFS_ERASE_CYCLES = 0xffffffff
-define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
-define.LFS_BADBLOCK_BEHAVIOR = [
- 'LFS_TESTBD_BADBLOCK_PROGERROR',
- 'LFS_TESTBD_BADBLOCK_ERASEERROR',
- 'LFS_TESTBD_BADBLOCK_READERROR',
- 'LFS_TESTBD_BADBLOCK_PROGNOOP',
- 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+[cases.test_badblocks_region_corruption] # (causes cascading failures)
+defines.BLOCK_COUNT = 256 # small bd so test runs faster
+defines.ERASE_CYCLES = 0xffffffff
+defines.ERASE_VALUE = [0x00, 0xff, -1]
+defines.BADBLOCK_BEHAVIOR = [
+ 'LFS_EMUBD_BADBLOCK_PROGERROR',
+ 'LFS_EMUBD_BADBLOCK_ERASEERROR',
+ 'LFS_EMUBD_BADBLOCK_READERROR',
+ 'LFS_EMUBD_BADBLOCK_PROGNOOP',
+ 'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
-define.NAMEMULT = 64
-define.FILEMULT = 1
+defines.NAMEMULT = 64
+defines.FILEMULT = 1
code = '''
- for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
- lfs_testbd_setwear(&cfg, i+2, 0xffffffff) => 0;
+ for (lfs_block_t i = 0; i < (BLOCK_COUNT-2)/2; i++) {
+ lfs_emubd_setwear(cfg, i+2, 0xffffffff) => 0;
}
-
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
+ uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
@@ -108,10 +116,11 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
- size = NAMEMULT;
+ lfs_size_t size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -120,12 +129,14 @@ code = '''
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
+ uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
+ struct lfs_info info;
lfs_stat(&lfs, (char*)buffer, &info) => 0;
info.type => LFS_TYPE_DIR;
@@ -134,9 +145,10 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
- size = NAMEMULT;
+ lfs_size_t size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
@@ -148,28 +160,30 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # alternating corruption (causes cascading failures)
-define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
-define.LFS_ERASE_CYCLES = 0xffffffff
-define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
-define.LFS_BADBLOCK_BEHAVIOR = [
- 'LFS_TESTBD_BADBLOCK_PROGERROR',
- 'LFS_TESTBD_BADBLOCK_ERASEERROR',
- 'LFS_TESTBD_BADBLOCK_READERROR',
- 'LFS_TESTBD_BADBLOCK_PROGNOOP',
- 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+[cases.test_badblocks_alternating_corruption] # (causes cascading failures)
+defines.BLOCK_COUNT = 256 # small bd so test runs faster
+defines.ERASE_CYCLES = 0xffffffff
+defines.ERASE_VALUE = [0x00, 0xff, -1]
+defines.BADBLOCK_BEHAVIOR = [
+ 'LFS_EMUBD_BADBLOCK_PROGERROR',
+ 'LFS_EMUBD_BADBLOCK_ERASEERROR',
+ 'LFS_EMUBD_BADBLOCK_READERROR',
+ 'LFS_EMUBD_BADBLOCK_PROGNOOP',
+ 'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
-define.NAMEMULT = 64
-define.FILEMULT = 1
+defines.NAMEMULT = 64
+defines.FILEMULT = 1
code = '''
- for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
- lfs_testbd_setwear(&cfg, (2*i) + 2, 0xffffffff) => 0;
+ for (lfs_block_t i = 0; i < (BLOCK_COUNT-2)/2; i++) {
+ lfs_emubd_setwear(cfg, (2*i) + 2, 0xffffffff) => 0;
}
-
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
+ uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
@@ -181,10 +195,11 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
- size = NAMEMULT;
+ lfs_size_t size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -193,12 +208,14 @@ code = '''
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
+ uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
+ struct lfs_info info;
lfs_stat(&lfs, (char*)buffer, &info) => 0;
info.type => LFS_TYPE_DIR;
@@ -207,9 +224,10 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
- size = NAMEMULT;
+ lfs_size_t size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
@@ -222,20 +240,21 @@ code = '''
'''
# other corner cases
-[[case]] # bad superblocks (corrupt 1 or 0)
-define.LFS_ERASE_CYCLES = 0xffffffff
-define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
-define.LFS_BADBLOCK_BEHAVIOR = [
- 'LFS_TESTBD_BADBLOCK_PROGERROR',
- 'LFS_TESTBD_BADBLOCK_ERASEERROR',
- 'LFS_TESTBD_BADBLOCK_READERROR',
- 'LFS_TESTBD_BADBLOCK_PROGNOOP',
- 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+[cases.test_badblocks_superblocks] # (corrupt 1 or 0)
+defines.ERASE_CYCLES = 0xffffffff
+defines.ERASE_VALUE = [0x00, 0xff, -1]
+defines.BADBLOCK_BEHAVIOR = [
+ 'LFS_EMUBD_BADBLOCK_PROGERROR',
+ 'LFS_EMUBD_BADBLOCK_ERASEERROR',
+ 'LFS_EMUBD_BADBLOCK_READERROR',
+ 'LFS_EMUBD_BADBLOCK_PROGNOOP',
+ 'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
code = '''
- lfs_testbd_setwear(&cfg, 0, 0xffffffff) => 0;
- lfs_testbd_setwear(&cfg, 1, 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, 0, 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, 1, 0xffffffff) => 0;
- lfs_format(&lfs, &cfg) => LFS_ERR_NOSPC;
- lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => LFS_ERR_NOSPC;
+ lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''
diff --git a/tests/test_bd.toml b/tests/test_bd.toml
new file mode 100644
index 00000000..8c6510df
--- /dev/null
+++ b/tests/test_bd.toml
@@ -0,0 +1,248 @@
+# These tests don't really test littlefs at all, they are here only to make
+# sure the underlying block device is working.
+#
+# Note we use 251, a prime, in places to avoid aliasing powers of 2.
+#
+
+[cases.test_bd_one_block]
+defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
+defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
+code = '''
+ uint8_t buffer[lfs_max(READ, PROG)];
+
+ // write data
+ cfg->erase(cfg, 0) => 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
+ for (lfs_off_t j = 0; j < PROG; j++) {
+ buffer[j] = (i+j) % 251;
+ }
+ cfg->prog(cfg, 0, i, buffer, PROG) => 0;
+ }
+
+ // read data
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, 0, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (i+j) % 251);
+ }
+ }
+'''
+
+[cases.test_bd_two_block]
+defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
+defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
+code = '''
+ uint8_t buffer[lfs_max(READ, PROG)];
+ lfs_block_t block;
+
+ // write block 0
+ block = 0;
+ cfg->erase(cfg, block) => 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
+ for (lfs_off_t j = 0; j < PROG; j++) {
+ buffer[j] = (block+i+j) % 251;
+ }
+ cfg->prog(cfg, block, i, buffer, PROG) => 0;
+ }
+
+ // read block 0
+ block = 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, block, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (block+i+j) % 251);
+ }
+ }
+
+ // write block 1
+ block = 1;
+ cfg->erase(cfg, block) => 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
+ for (lfs_off_t j = 0; j < PROG; j++) {
+ buffer[j] = (block+i+j) % 251;
+ }
+ cfg->prog(cfg, block, i, buffer, PROG) => 0;
+ }
+
+ // read block 1
+ block = 1;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, block, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (block+i+j) % 251);
+ }
+ }
+
+ // read block 0 again
+ block = 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, block, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (block+i+j) % 251);
+ }
+ }
+'''
+
+[cases.test_bd_last_block]
+defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
+defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
+code = '''
+ uint8_t buffer[lfs_max(READ, PROG)];
+ lfs_block_t block;
+
+ // write block 0
+ block = 0;
+ cfg->erase(cfg, block) => 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
+ for (lfs_off_t j = 0; j < PROG; j++) {
+ buffer[j] = (block+i+j) % 251;
+ }
+ cfg->prog(cfg, block, i, buffer, PROG) => 0;
+ }
+
+ // read block 0
+ block = 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, block, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (block+i+j) % 251);
+ }
+ }
+
+ // write block n-1
+ block = cfg->block_count-1;
+ cfg->erase(cfg, block) => 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
+ for (lfs_off_t j = 0; j < PROG; j++) {
+ buffer[j] = (block+i+j) % 251;
+ }
+ cfg->prog(cfg, block, i, buffer, PROG) => 0;
+ }
+
+ // read block n-1
+ block = cfg->block_count-1;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, block, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (block+i+j) % 251);
+ }
+ }
+
+ // read block 0 again
+ block = 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, block, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (block+i+j) % 251);
+ }
+ }
+'''
+
+[cases.test_bd_powers_of_two]
+defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
+defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
+code = '''
+ uint8_t buffer[lfs_max(READ, PROG)];
+
+ // write/read every power of 2
+ lfs_block_t block = 1;
+ while (block < cfg->block_count) {
+ // write
+ cfg->erase(cfg, block) => 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
+ for (lfs_off_t j = 0; j < PROG; j++) {
+ buffer[j] = (block+i+j) % 251;
+ }
+ cfg->prog(cfg, block, i, buffer, PROG) => 0;
+ }
+
+ // read
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, block, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (block+i+j) % 251);
+ }
+ }
+
+ block *= 2;
+ }
+
+ // read every power of 2 again
+ block = 1;
+ while (block < cfg->block_count) {
+ // read
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, block, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (block+i+j) % 251);
+ }
+ }
+
+ block *= 2;
+ }
+'''
+
+[cases.test_bd_fibonacci]
+defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
+defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
+code = '''
+ uint8_t buffer[lfs_max(READ, PROG)];
+
+ // write/read every fibonacci number on our device
+ lfs_block_t block = 1;
+ lfs_block_t block_ = 1;
+ while (block < cfg->block_count) {
+ // write
+ cfg->erase(cfg, block) => 0;
+ for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
+ for (lfs_off_t j = 0; j < PROG; j++) {
+ buffer[j] = (block+i+j) % 251;
+ }
+ cfg->prog(cfg, block, i, buffer, PROG) => 0;
+ }
+
+ // read
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, block, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (block+i+j) % 251);
+ }
+ }
+
+ lfs_block_t nblock = block + block_;
+ block_ = block;
+ block = nblock;
+ }
+
+ // read every fibonacci number again
+ block = 1;
+ block_ = 1;
+ while (block < cfg->block_count) {
+ // read
+ for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
+ cfg->read(cfg, block, i, buffer, READ) => 0;
+
+ for (lfs_off_t j = 0; j < READ; j++) {
+ LFS_ASSERT(buffer[j] == (block+i+j) % 251);
+ }
+ }
+
+ lfs_block_t nblock = block + block_;
+ block_ = block;
+ block = nblock;
+ }
+'''
+
+
+
+
diff --git a/tests/test_compat.toml b/tests/test_compat.toml
new file mode 100644
index 00000000..a36c38a4
--- /dev/null
+++ b/tests/test_compat.toml
@@ -0,0 +1,1360 @@
+# Test for compatibility between different littlefs versions
+#
+# Note, these tests are a bit special. They expect to be linked against two
+# different versions of littlefs:
+# - lfs => the new/current version of littlefs
+# - lfsp => the previous version of littlefs
+#
+# If lfsp is not linked, and LFSP is not defined, these tests will alias
+# the relevant lfs types/functions as necessary so at least the tests can
+# themselves be tested locally.
+#
+# But to get value from these tests, it's expected that the previous version
+# of littlefs be linked in during CI, with the help of scripts/changeprefix.py
+#
+
+# alias littlefs symbols as needed
+#
+# there may be a better way to do this, but oh well, explicit aliases works
+code = '''
+#ifdef LFSP
+#define STRINGIZE(x) STRINGIZE_(x)
+#define STRINGIZE_(x) #x
+#include STRINGIZE(LFSP)
+#else
+#define LFSP_VERSION LFS_VERSION
+#define LFSP_VERSION_MAJOR LFS_VERSION_MAJOR
+#define LFSP_VERSION_MINOR LFS_VERSION_MINOR
+#define lfsp_t lfs_t
+#define lfsp_config lfs_config
+#define lfsp_format lfs_format
+#define lfsp_mount lfs_mount
+#define lfsp_unmount lfs_unmount
+#define lfsp_dir_t lfs_dir_t
+#define lfsp_info lfs_info
+#define LFSP_TYPE_REG LFS_TYPE_REG
+#define LFSP_TYPE_DIR LFS_TYPE_DIR
+#define lfsp_mkdir lfs_mkdir
+#define lfsp_dir_open lfs_dir_open
+#define lfsp_dir_read lfs_dir_read
+#define lfsp_dir_close lfs_dir_close
+#define lfsp_file_t lfs_file_t
+#define LFSP_O_RDONLY LFS_O_RDONLY
+#define LFSP_O_WRONLY LFS_O_WRONLY
+#define LFSP_O_CREAT LFS_O_CREAT
+#define LFSP_O_EXCL LFS_O_EXCL
+#define LFSP_SEEK_SET LFS_SEEK_SET
+#define lfsp_file_open lfs_file_open
+#define lfsp_file_write lfs_file_write
+#define lfsp_file_read lfs_file_read
+#define lfsp_file_seek lfs_file_seek
+#define lfsp_file_close lfs_file_close
+#endif
+'''
+
+
+
+## forward-compatibility tests ##
+
+# test we can mount in a new version
+[cases.test_compat_forward_mount]
+if = 'LFS_VERSION_MAJOR == LFSP_VERSION_MAJOR'
+code = '''
+ // create the previous version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_format(&lfsp, &cfgp) => 0;
+
+ // confirm the previous mount works
+ lfsp_mount(&lfsp, &cfgp) => 0;
+ lfsp_unmount(&lfsp) => 0;
+
+
+ // now test the new mount
+ lfs_t lfs;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+# test we can read dirs in a new version
+[cases.test_compat_forward_read_dirs]
+defines.COUNT = 5
+if = 'LFS_VERSION_MAJOR == LFSP_VERSION_MAJOR'
+code = '''
+ // create the previous version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_format(&lfsp, &cfgp) => 0;
+
+ // write COUNT dirs
+ lfsp_mount(&lfsp, &cfgp) => 0;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ lfsp_mkdir(&lfsp, name) => 0;
+ }
+ lfsp_unmount(&lfsp) => 0;
+
+
+ // mount the new version
+ lfs_t lfs;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // can we list the directories?
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ }
+
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+# test we can read files in a new version
+[cases.test_compat_forward_read_files]
+defines.COUNT = 5
+defines.SIZE = [4, 32, 512, 8192]
+defines.CHUNK = 4
+if = 'LFS_VERSION_MAJOR == LFSP_VERSION_MAJOR'
+code = '''
+ // create the previous version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_format(&lfsp, &cfgp) => 0;
+
+ // write COUNT files
+ lfsp_mount(&lfsp, &cfgp) => 0;
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_file_t file;
+ char name[8];
+ sprintf(name, "file%03d", i);
+ lfsp_file_open(&lfsp, &file, name,
+ LFSP_O_WRONLY | LFSP_O_CREAT | LFSP_O_EXCL) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfsp_file_write(&lfsp, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfsp_file_close(&lfsp, &file) => 0;
+ }
+ lfsp_unmount(&lfsp) => 0;
+
+
+ // mount the new version
+ lfs_t lfs;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // can we list the files?
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ char name[8];
+ sprintf(name, "file%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ assert(info.size == SIZE);
+ }
+
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+
+ // now can we read the files?
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_file_t file;
+ char name[8];
+ sprintf(name, "file%03d", i);
+ lfs_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ lfs_file_read(&lfs, &file, chunk, CHUNK) => CHUNK;
+
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ assert(chunk[k] == TEST_PRNG(&prng) & 0xff);
+ }
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+# test we can read files in dirs in a new version
+[cases.test_compat_forward_read_files_in_dirs]
+defines.COUNT = 5
+defines.SIZE = [4, 32, 512, 8192]
+defines.CHUNK = 4
+if = 'LFS_VERSION_MAJOR == LFSP_VERSION_MAJOR'
+code = '''
+ // create the previous version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_format(&lfsp, &cfgp) => 0;
+
+ // write COUNT files+dirs
+ lfsp_mount(&lfsp, &cfgp) => 0;
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ char name[16];
+ sprintf(name, "dir%03d", i);
+ lfsp_mkdir(&lfsp, name) => 0;
+
+ lfsp_file_t file;
+ sprintf(name, "dir%03d/file%03d", i, i);
+ lfsp_file_open(&lfsp, &file, name,
+ LFSP_O_WRONLY | LFSP_O_CREAT | LFSP_O_EXCL) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfsp_file_write(&lfsp, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfsp_file_close(&lfsp, &file) => 0;
+ }
+ lfsp_unmount(&lfsp) => 0;
+
+
+ // mount the new version
+ lfs_t lfs;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // can we list the directories?
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ }
+
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ // can we list the files?
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, name) => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ sprintf(name, "file%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ assert(info.size == SIZE);
+
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ }
+
+ // now can we read the files?
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_file_t file;
+ char name[16];
+ sprintf(name, "dir%03d/file%03d", i, i);
+ lfs_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ lfs_file_read(&lfs, &file, chunk, CHUNK) => CHUNK;
+
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ assert(chunk[k] == TEST_PRNG(&prng) & 0xff);
+ }
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+# test we can write dirs in a new version
+[cases.test_compat_forward_write_dirs]
+defines.COUNT = 10
+if = 'LFS_VERSION_MAJOR == LFSP_VERSION_MAJOR'
+code = '''
+ // create the previous version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_format(&lfsp, &cfgp) => 0;
+
+ // write COUNT/2 dirs
+ lfsp_mount(&lfsp, &cfgp) => 0;
+ for (lfs_size_t i = 0; i < COUNT/2; i++) {
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ lfsp_mkdir(&lfsp, name) => 0;
+ }
+ lfsp_unmount(&lfsp) => 0;
+
+
+ // mount the new version
+ lfs_t lfs;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // write another COUNT/2 dirs
+ for (lfs_size_t i = COUNT/2; i < COUNT; i++) {
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ lfs_mkdir(&lfs, name) => 0;
+ }
+
+ // can we list the directories?
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ }
+
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+# test we can write files in a new version
+[cases.test_compat_forward_write_files]
+defines.COUNT = 5
+defines.SIZE = [4, 32, 512, 8192]
+defines.CHUNK = 2
+if = 'LFS_VERSION_MAJOR == LFSP_VERSION_MAJOR'
+code = '''
+ // create the previous version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_format(&lfsp, &cfgp) => 0;
+
+ // write half COUNT files
+ lfsp_mount(&lfsp, &cfgp) => 0;
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ // write half
+ lfsp_file_t file;
+ char name[8];
+ sprintf(name, "file%03d", i);
+ lfsp_file_open(&lfsp, &file, name,
+ LFSP_O_WRONLY | LFSP_O_CREAT | LFSP_O_EXCL) => 0;
+ for (lfs_size_t j = 0; j < SIZE/2; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfsp_file_write(&lfsp, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfsp_file_close(&lfsp, &file) => 0;
+
+ // skip the other half but keep our prng reproducible
+ for (lfs_size_t j = SIZE/2; j < SIZE; j++) {
+ TEST_PRNG(&prng);
+ }
+ }
+ lfsp_unmount(&lfsp) => 0;
+
+
+ // mount the new version
+ lfs_t lfs;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // write half COUNT files
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ // skip half but keep our prng reproducible
+ for (lfs_size_t j = 0; j < SIZE/2; j++) {
+ TEST_PRNG(&prng);
+ }
+
+ // write the other half
+ lfs_file_t file;
+ char name[8];
+ sprintf(name, "file%03d", i);
+ lfs_file_open(&lfs, &file, name, LFS_O_WRONLY) => 0;
+ lfs_file_seek(&lfs, &file, SIZE/2, LFS_SEEK_SET) => SIZE/2;
+
+ for (lfs_size_t j = SIZE/2; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfs_file_write(&lfs, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ // can we list the files?
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ char name[8];
+ sprintf(name, "file%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ assert(info.size == SIZE);
+ }
+
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+
+ // now can we read the files?
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_file_t file;
+ char name[8];
+ sprintf(name, "file%03d", i);
+ lfs_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ lfs_file_read(&lfs, &file, chunk, CHUNK) => CHUNK;
+
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ assert(chunk[k] == TEST_PRNG(&prng) & 0xff);
+ }
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+# test we can write files in dirs in a new version
+[cases.test_compat_forward_write_files_in_dirs]
+defines.COUNT = 5
+defines.SIZE = [4, 32, 512, 8192]
+defines.CHUNK = 2
+if = 'LFS_VERSION_MAJOR == LFSP_VERSION_MAJOR'
+code = '''
+ // create the previous version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_format(&lfsp, &cfgp) => 0;
+
+ // write half COUNT files
+ lfsp_mount(&lfsp, &cfgp) => 0;
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ char name[16];
+ sprintf(name, "dir%03d", i);
+ lfsp_mkdir(&lfsp, name) => 0;
+
+ // write half
+ lfsp_file_t file;
+ sprintf(name, "dir%03d/file%03d", i, i);
+ lfsp_file_open(&lfsp, &file, name,
+ LFSP_O_WRONLY | LFSP_O_CREAT | LFSP_O_EXCL) => 0;
+ for (lfs_size_t j = 0; j < SIZE/2; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfsp_file_write(&lfsp, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfsp_file_close(&lfsp, &file) => 0;
+
+ // skip the other half but keep our prng reproducible
+ for (lfs_size_t j = SIZE/2; j < SIZE; j++) {
+ TEST_PRNG(&prng);
+ }
+ }
+ lfsp_unmount(&lfsp) => 0;
+
+
+ // mount the new version
+ lfs_t lfs;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // write half COUNT files
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ // skip half but keep our prng reproducible
+ for (lfs_size_t j = 0; j < SIZE/2; j++) {
+ TEST_PRNG(&prng);
+ }
+
+ // write the other half
+ lfs_file_t file;
+ char name[16];
+ sprintf(name, "dir%03d/file%03d", i, i);
+ lfs_file_open(&lfs, &file, name, LFS_O_WRONLY) => 0;
+ lfs_file_seek(&lfs, &file, SIZE/2, LFS_SEEK_SET) => SIZE/2;
+
+ for (lfs_size_t j = SIZE/2; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfs_file_write(&lfs, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ // can we list the directories?
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ }
+
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ // can we list the files?
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, name) => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ sprintf(name, "file%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ assert(info.size == SIZE);
+
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ }
+
+ // now can we read the files?
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_file_t file;
+ char name[16];
+ sprintf(name, "dir%03d/file%03d", i, i);
+ lfs_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ lfs_file_read(&lfs, &file, chunk, CHUNK) => CHUNK;
+
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ assert(chunk[k] == TEST_PRNG(&prng) & 0xff);
+ }
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+
+
+## backwards-compatibility tests ##
+
+# test we can mount in an old version
+[cases.test_compat_backward_mount]
+if = 'LFS_VERSION == LFSP_VERSION'
+code = '''
+ // create the new version
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // confirm the new mount works
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // now test the previous mount
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_mount(&lfsp, &cfgp) => 0;
+ lfsp_unmount(&lfsp) => 0;
+'''
+
+# test we can read dirs in an old version
+[cases.test_compat_backward_read_dirs]
+defines.COUNT = 5
+if = 'LFS_VERSION == LFSP_VERSION'
+code = '''
+ // create the new version
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // write COUNT dirs
+ lfs_mount(&lfs, cfg) => 0;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ lfs_mkdir(&lfs, name) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+
+ // mount the new version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_mount(&lfsp, &cfgp) => 0;
+
+ // can we list the directories?
+ lfsp_dir_t dir;
+ lfsp_dir_open(&lfsp, &dir, "/") => 0;
+ struct lfsp_info info;
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ }
+
+ lfsp_dir_read(&lfsp, &dir, &info) => 0;
+ lfsp_dir_close(&lfsp, &dir) => 0;
+
+ lfsp_unmount(&lfsp) => 0;
+'''
+
+# test we can read files in an old version
+[cases.test_compat_backward_read_files]
+defines.COUNT = 5
+defines.SIZE = [4, 32, 512, 8192]
+defines.CHUNK = 4
+if = 'LFS_VERSION == LFSP_VERSION'
+code = '''
+ // create the new version
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // write COUNT files
+ lfs_mount(&lfs, cfg) => 0;
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfs_file_t file;
+ char name[8];
+ sprintf(name, "file%03d", i);
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfs_file_write(&lfs, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+
+ // mount the previous version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_mount(&lfsp, &cfgp) => 0;
+
+ // can we list the files?
+ lfsp_dir_t dir;
+ lfsp_dir_open(&lfsp, &dir, "/") => 0;
+ struct lfsp_info info;
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_REG);
+ char name[8];
+ sprintf(name, "file%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ assert(info.size == SIZE);
+ }
+
+ lfsp_dir_read(&lfsp, &dir, &info) => 0;
+
+ // now can we read the files?
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_file_t file;
+ char name[8];
+ sprintf(name, "file%03d", i);
+ lfsp_file_open(&lfsp, &file, name, LFSP_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ lfsp_file_read(&lfsp, &file, chunk, CHUNK) => CHUNK;
+
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ assert(chunk[k] == TEST_PRNG(&prng) & 0xff);
+ }
+ }
+ lfsp_file_close(&lfsp, &file) => 0;
+ }
+
+ lfsp_unmount(&lfsp) => 0;
+'''
+
+# test we can read files in dirs in an old version
+[cases.test_compat_backward_read_files_in_dirs]
+defines.COUNT = 5
+defines.SIZE = [4, 32, 512, 8192]
+defines.CHUNK = 4
+if = 'LFS_VERSION == LFSP_VERSION'
+code = '''
+ // create the new version
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // write COUNT files+dirs
+ lfs_mount(&lfs, cfg) => 0;
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ char name[16];
+ sprintf(name, "dir%03d", i);
+ lfs_mkdir(&lfs, name) => 0;
+
+ lfs_file_t file;
+ sprintf(name, "dir%03d/file%03d", i, i);
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfs_file_write(&lfs, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+
+ // mount the previous version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_mount(&lfsp, &cfgp) => 0;
+
+ // can we list the directories?
+ lfsp_dir_t dir;
+ lfsp_dir_open(&lfsp, &dir, "/") => 0;
+ struct lfsp_info info;
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ }
+
+ lfsp_dir_read(&lfsp, &dir, &info) => 0;
+ lfsp_dir_close(&lfsp, &dir) => 0;
+
+ // can we list the files?
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ lfsp_dir_t dir;
+ lfsp_dir_open(&lfsp, &dir, name) => 0;
+ struct lfsp_info info;
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_REG);
+ sprintf(name, "file%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ assert(info.size == SIZE);
+
+ lfsp_dir_read(&lfsp, &dir, &info) => 0;
+ lfsp_dir_close(&lfsp, &dir) => 0;
+ }
+
+ // now can we read the files?
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_file_t file;
+ char name[16];
+ sprintf(name, "dir%03d/file%03d", i, i);
+ lfsp_file_open(&lfsp, &file, name, LFSP_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ lfsp_file_read(&lfsp, &file, chunk, CHUNK) => CHUNK;
+
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ assert(chunk[k] == TEST_PRNG(&prng) & 0xff);
+ }
+ }
+ lfsp_file_close(&lfsp, &file) => 0;
+ }
+
+ lfsp_unmount(&lfsp) => 0;
+'''
+
+# test we can write dirs in an old version
+[cases.test_compat_backward_write_dirs]
+defines.COUNT = 10
+if = 'LFS_VERSION == LFSP_VERSION'
+code = '''
+ // create the new version
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // write COUNT/2 dirs
+ lfs_mount(&lfs, cfg) => 0;
+ for (lfs_size_t i = 0; i < COUNT/2; i++) {
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ lfs_mkdir(&lfs, name) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+
+ // mount the previous version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_mount(&lfsp, &cfgp) => 0;
+
+ // write another COUNT/2 dirs
+ for (lfs_size_t i = COUNT/2; i < COUNT; i++) {
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ lfsp_mkdir(&lfsp, name) => 0;
+ }
+
+ // can we list the directories?
+ lfsp_dir_t dir;
+ lfsp_dir_open(&lfsp, &dir, "/") => 0;
+ struct lfsp_info info;
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ }
+
+ lfsp_dir_read(&lfsp, &dir, &info) => 0;
+ lfsp_dir_close(&lfsp, &dir) => 0;
+
+ lfsp_unmount(&lfsp) => 0;
+'''
+
+# test we can write files in an old version
+[cases.test_compat_backward_write_files]
+defines.COUNT = 5
+defines.SIZE = [4, 32, 512, 8192]
+defines.CHUNK = 2
+if = 'LFS_VERSION == LFSP_VERSION'
+code = '''
+ // create the previous version
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // write half COUNT files
+ lfs_mount(&lfs, cfg) => 0;
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ // write half
+ lfs_file_t file;
+ char name[8];
+ sprintf(name, "file%03d", i);
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ for (lfs_size_t j = 0; j < SIZE/2; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfs_file_write(&lfs, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // skip the other half but keep our prng reproducible
+ for (lfs_size_t j = SIZE/2; j < SIZE; j++) {
+ TEST_PRNG(&prng);
+ }
+ }
+ lfs_unmount(&lfs) => 0;
+
+
+ // mount the new version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_mount(&lfsp, &cfgp) => 0;
+
+ // write half COUNT files
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ // skip half but keep our prng reproducible
+ for (lfs_size_t j = 0; j < SIZE/2; j++) {
+ TEST_PRNG(&prng);
+ }
+
+ // write the other half
+ lfsp_file_t file;
+ char name[8];
+ sprintf(name, "file%03d", i);
+ lfsp_file_open(&lfsp, &file, name, LFSP_O_WRONLY) => 0;
+ lfsp_file_seek(&lfsp, &file, SIZE/2, LFSP_SEEK_SET) => SIZE/2;
+
+ for (lfs_size_t j = SIZE/2; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfsp_file_write(&lfsp, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfsp_file_close(&lfsp, &file) => 0;
+ }
+
+ // can we list the files?
+ lfsp_dir_t dir;
+ lfsp_dir_open(&lfsp, &dir, "/") => 0;
+ struct lfsp_info info;
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_REG);
+ char name[8];
+ sprintf(name, "file%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ assert(info.size == SIZE);
+ }
+
+ lfsp_dir_read(&lfsp, &dir, &info) => 0;
+
+ // now can we read the files?
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_file_t file;
+ char name[8];
+ sprintf(name, "file%03d", i);
+ lfsp_file_open(&lfsp, &file, name, LFSP_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ lfsp_file_read(&lfsp, &file, chunk, CHUNK) => CHUNK;
+
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ assert(chunk[k] == TEST_PRNG(&prng) & 0xff);
+ }
+ }
+ lfsp_file_close(&lfsp, &file) => 0;
+ }
+
+ lfsp_unmount(&lfsp) => 0;
+'''
+
+# test we can write files in dirs in an old version
+[cases.test_compat_backward_write_files_in_dirs]
+defines.COUNT = 5
+defines.SIZE = [4, 32, 512, 8192]
+defines.CHUNK = 2
+if = 'LFS_VERSION == LFSP_VERSION'
+code = '''
+ // create the previous version
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // write half COUNT files
+ lfs_mount(&lfs, cfg) => 0;
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ char name[16];
+ sprintf(name, "dir%03d", i);
+ lfs_mkdir(&lfs, name) => 0;
+
+ // write half
+ lfs_file_t file;
+ sprintf(name, "dir%03d/file%03d", i, i);
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ for (lfs_size_t j = 0; j < SIZE/2; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfs_file_write(&lfs, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // skip the other half but keep our prng reproducible
+ for (lfs_size_t j = SIZE/2; j < SIZE; j++) {
+ TEST_PRNG(&prng);
+ }
+ }
+ lfs_unmount(&lfs) => 0;
+
+
+ // mount the new version
+ struct lfsp_config cfgp;
+ memcpy(&cfgp, cfg, sizeof(cfgp));
+ lfsp_t lfsp;
+ lfsp_mount(&lfsp, &cfgp) => 0;
+
+ // write half COUNT files
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ // skip half but keep our prng reproducible
+ for (lfs_size_t j = 0; j < SIZE/2; j++) {
+ TEST_PRNG(&prng);
+ }
+
+ // write the other half
+ lfsp_file_t file;
+ char name[16];
+ sprintf(name, "dir%03d/file%03d", i, i);
+ lfsp_file_open(&lfsp, &file, name, LFSP_O_WRONLY) => 0;
+ lfsp_file_seek(&lfsp, &file, SIZE/2, LFSP_SEEK_SET) => SIZE/2;
+
+ for (lfs_size_t j = SIZE/2; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ chunk[k] = TEST_PRNG(&prng) & 0xff;
+ }
+
+ lfsp_file_write(&lfsp, &file, chunk, CHUNK) => CHUNK;
+ }
+ lfsp_file_close(&lfsp, &file) => 0;
+ }
+
+ // can we list the directories?
+ lfsp_dir_t dir;
+ lfsp_dir_open(&lfsp, &dir, "/") => 0;
+ struct lfsp_info info;
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ }
+
+ lfsp_dir_read(&lfsp, &dir, &info) => 0;
+ lfsp_dir_close(&lfsp, &dir) => 0;
+
+ // can we list the files?
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ char name[8];
+ sprintf(name, "dir%03d", i);
+ lfsp_dir_t dir;
+ lfsp_dir_open(&lfsp, &dir, name) => 0;
+ struct lfsp_info info;
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+
+ lfsp_dir_read(&lfsp, &dir, &info) => 1;
+ assert(info.type == LFSP_TYPE_REG);
+ sprintf(name, "file%03d", i);
+ assert(strcmp(info.name, name) == 0);
+ assert(info.size == SIZE);
+
+ lfsp_dir_read(&lfsp, &dir, &info) => 0;
+ lfsp_dir_close(&lfsp, &dir) => 0;
+ }
+
+ // now can we read the files?
+ prng = 42;
+ for (lfs_size_t i = 0; i < COUNT; i++) {
+ lfsp_file_t file;
+ char name[16];
+ sprintf(name, "dir%03d/file%03d", i, i);
+ lfsp_file_open(&lfsp, &file, name, LFSP_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < SIZE; j += CHUNK) {
+ uint8_t chunk[CHUNK];
+ lfsp_file_read(&lfsp, &file, chunk, CHUNK) => CHUNK;
+
+ for (lfs_size_t k = 0; k < CHUNK; k++) {
+ assert(chunk[k] == TEST_PRNG(&prng) & 0xff);
+ }
+ }
+ lfsp_file_close(&lfsp, &file) => 0;
+ }
+
+ lfsp_unmount(&lfsp) => 0;
+'''
+
+
+
+## incompatiblity tests ##
+
+# test that we fail to mount after a major version bump
+[cases.test_compat_major_incompat]
+in = 'lfs.c'
+code = '''
+ // create a superblock
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // bump the major version
+ //
+ // note we're messing around with internals to do this! this
+ // is not a user API
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_superblock_t superblock = {
+ .version = LFS_DISK_VERSION + 0x00010000,
+ .block_size = lfs.cfg->block_size,
+ .block_count = lfs.cfg->block_count,
+ .name_max = lfs.name_max,
+ .file_max = lfs.file_max,
+ .attr_max = lfs.attr_max,
+ };
+ lfs_superblock_tole32(&superblock);
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock})) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // mount should now fail
+ lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
+'''
+
+# test that we fail to mount after a minor version bump
+[cases.test_compat_minor_incompat]
+in = 'lfs.c'
+code = '''
+ // create a superblock
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // bump the minor version
+ //
+ // note we're messing around with internals to do this! this
+ // is not a user API
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_superblock_t superblock = {
+ .version = LFS_DISK_VERSION + 0x00000001,
+ .block_size = lfs.cfg->block_size,
+ .block_count = lfs.cfg->block_count,
+ .name_max = lfs.name_max,
+ .file_max = lfs.file_max,
+ .attr_max = lfs.attr_max,
+ };
+ lfs_superblock_tole32(&superblock);
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock})) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // mount should now fail
+ lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
+'''
+
+# test that we correctly bump the minor version
+[cases.test_compat_minor_bump]
+in = 'lfs.c'
+if = 'LFS_DISK_VERSION_MINOR > 0'
+code = '''
+ // create a superblock
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, "test",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ lfs_file_write(&lfs, &file, "testtest", 8) => 8;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // write an old minor version
+ //
+ // note we're messing around with internals to do this! this
+ // is not a user API
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_superblock_t superblock = {
+ .version = LFS_DISK_VERSION - 0x00000001,
+ .block_size = lfs.cfg->block_size,
+ .block_count = lfs.cfg->block_count,
+ .name_max = lfs.name_max,
+ .file_max = lfs.file_max,
+ .attr_max = lfs.attr_max,
+ };
+ lfs_superblock_tole32(&superblock);
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock})) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // mount should still work
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_open(&lfs, &file, "test", LFS_O_RDONLY) => 0;
+ uint8_t buffer[8];
+ lfs_file_read(&lfs, &file, buffer, 8) => 8;
+ assert(memcmp(buffer, "testtest", 8) == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // if we write, we need to bump the minor version
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_open(&lfs, &file, "test", LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_write(&lfs, &file, "teeeeest", 8) => 8;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // minor version should have changed
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_dir_get(&lfs, &mdir, LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock)
+ => LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock));
+ lfs_superblock_fromle32(&superblock);
+ assert((superblock.version >> 16) & 0xffff == LFS_DISK_VERSION_MAJOR);
+ assert((superblock.version >> 0) & 0xffff == LFS_DISK_VERSION_MINOR);
+ lfs_unmount(&lfs) => 0;
+
+ // and of course mount should still work
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_open(&lfs, &file, "test", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 8) => 8;
+ assert(memcmp(buffer, "teeeeest", 8) == 0);
+ lfs_file_close(&lfs, &file) => 0;
+
+ // minor version should have changed
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_dir_get(&lfs, &mdir, LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock)
+ => LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock));
+ lfs_superblock_fromle32(&superblock);
+ assert((superblock.version >> 16) & 0xffff == LFS_DISK_VERSION_MAJOR);
+ assert((superblock.version >> 0) & 0xffff == LFS_DISK_VERSION_MINOR);
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/tests/test_dirs.toml b/tests/test_dirs.toml
index 270f4f8e..4262a1aa 100644
--- a/tests/test_dirs.toml
+++ b/tests/test_dirs.toml
@@ -1,8 +1,11 @@
-[[case]] # root
+[cases.test_dirs_root]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -14,20 +17,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # many directory creation
-define.N = 'range(0, 100, 3)'
+[cases.test_dirs_many_creation]
+defines.N = 'range(3, 100, 3)'
+if = 'N < BLOCK_COUNT/2'
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "dir%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -35,6 +43,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "dir%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -45,20 +54,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # many directory removal
-define.N = 'range(3, 100, 11)'
+[cases.test_dirs_many_removal]
+defines.N = 'range(3, 100, 11)'
+if = 'N < BLOCK_COUNT/2'
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "removeme%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -66,6 +80,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "removeme%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -75,14 +90,15 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "removeme%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs);
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -95,20 +111,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # many directory rename
-define.N = 'range(3, 100, 11)'
+[cases.test_dirs_many_rename]
+defines.N = 'range(3, 100, 11)'
+if = 'N < BLOCK_COUNT/2'
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "test%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -116,6 +137,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "test%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -125,7 +147,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char oldpath[128];
char newpath[128];
@@ -135,7 +157,7 @@ code = '''
}
lfs_unmount(&lfs);
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -144,6 +166,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "tedd%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -154,29 +177,35 @@ code = '''
lfs_unmount(&lfs);
'''
-[[case]] # reentrant many directory creation/rename/removal
-define.N = [5, 11]
+[cases.test_dirs_many_reentrant]
+defines.N = [5, 11]
+if = 'BLOCK_COUNT >= 4*N'
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "hi%03d", i);
err = lfs_mkdir(&lfs, path);
assert(err == 0 || err == LFS_ERR_EXIST);
}
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "hello%03d", i);
err = lfs_remove(&lfs, path);
assert(err == 0 || err == LFS_ERR_NOENT);
}
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -184,6 +213,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "hi%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -209,6 +239,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "hello%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -218,6 +249,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "hello%03d", i);
lfs_remove(&lfs, path) => 0;
}
@@ -234,22 +266,28 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # file creation
-define.N = 'range(3, 100, 11)'
+[cases.test_dirs_file_creation]
+defines.N = 'range(3, 100, 11)'
+if = 'N < BLOCK_COUNT/2'
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "file%03d", i);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -257,6 +295,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "file%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@@ -267,22 +306,28 @@ code = '''
lfs_unmount(&lfs);
'''
-[[case]] # file removal
-define.N = 'range(0, 100, 3)'
+[cases.test_dirs_file_removal]
+defines.N = 'range(3, 100, 11)'
+if = 'N < BLOCK_COUNT/2'
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "removeme%03d", i);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -290,6 +335,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "removeme%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@@ -299,14 +345,15 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "removeme%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs);
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -319,22 +366,28 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # file rename
-define.N = 'range(0, 100, 3)'
+[cases.test_dirs_file_rename]
+defines.N = 'range(3, 100, 11)'
+if = 'N < BLOCK_COUNT/2'
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "test%03d", i);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -342,6 +395,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "test%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@@ -351,7 +405,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char oldpath[128];
char newpath[128];
@@ -361,7 +415,7 @@ code = '''
}
lfs_unmount(&lfs);
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -370,6 +424,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "tedd%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@@ -380,29 +435,36 @@ code = '''
lfs_unmount(&lfs);
'''
-[[case]] # reentrant file creation/rename/removal
-define.N = [5, 25]
+[cases.test_dirs_file_reentrant]
+defines.N = [5, 25]
+if = 'N < BLOCK_COUNT/2'
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "hi%03d", i);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "hello%03d", i);
err = lfs_remove(&lfs, path);
assert(err == 0 || err == LFS_ERR_NOENT);
}
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -410,6 +472,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "hi%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@@ -435,6 +498,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "hello%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@@ -444,6 +508,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "hello%03d", i);
lfs_remove(&lfs, path) => 0;
}
@@ -460,24 +525,28 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # nested directories
+[cases.test_dirs_nested]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "potato") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "burito",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "potato/baked") => 0;
lfs_mkdir(&lfs, "potato/sweet") => 0;
lfs_mkdir(&lfs, "potato/fried") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "potato") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
info.type => LFS_TYPE_DIR;
@@ -498,21 +567,21 @@ code = '''
lfs_unmount(&lfs) => 0;
// try removing?
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "potato") => LFS_ERR_NOTEMPTY;
lfs_unmount(&lfs) => 0;
// try renaming?
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "potato", "coldpotato") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "coldpotato", "warmpotato") => 0;
lfs_rename(&lfs, "warmpotato", "hotpotato") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "potato") => LFS_ERR_NOENT;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOENT;
lfs_remove(&lfs, "warmpotato") => LFS_ERR_NOENT;
@@ -520,7 +589,7 @@ code = '''
lfs_unmount(&lfs) => 0;
// try cross-directory renaming
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "coldpotato") => 0;
lfs_rename(&lfs, "hotpotato/baked", "coldpotato/baked") => 0;
lfs_rename(&lfs, "coldpotato", "hotpotato") => LFS_ERR_NOTEMPTY;
@@ -536,7 +605,7 @@ code = '''
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "hotpotato") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -558,7 +627,7 @@ code = '''
lfs_unmount(&lfs) => 0;
// final remove
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "hotpotato/baked") => 0;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
@@ -568,7 +637,7 @@ code = '''
lfs_remove(&lfs, "hotpotato") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -584,17 +653,22 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # recursive remove
-define.N = [10, 100]
+[cases.test_dirs_recursive_remove]
+defines.N = [10, 100]
+if = 'N < BLOCK_COUNT/2'
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "prickly-pear") => 0;
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "prickly-pear/cactus%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -602,6 +676,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "cactus%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -611,7 +686,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOTEMPTY;
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
@@ -622,6 +697,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
+ char path[1024];
sprintf(path, "cactus%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -636,22 +712,24 @@ code = '''
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
'''
-[[case]] # other error cases
+[cases.test_dirs_other_errors]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "potato") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "burito",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "potato") => LFS_ERR_EXIST;
lfs_mkdir(&lfs, "burito") => LFS_ERR_EXIST;
@@ -659,6 +737,7 @@ code = '''
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
lfs_file_open(&lfs, &file, "potato",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "tomato") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "burito") => LFS_ERR_NOTDIR;
lfs_file_open(&lfs, &file, "tomato", LFS_O_RDONLY) => LFS_ERR_NOENT;
@@ -678,6 +757,7 @@ code = '''
// check that errors did not corrupt directory
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@@ -696,7 +776,7 @@ code = '''
lfs_unmount(&lfs) => 0;
// or on disk
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@@ -715,21 +795,27 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # directory seek
-define.COUNT = [4, 128, 132]
+[cases.test_dirs_seek]
+defines.COUNT = [4, 128, 132]
+if = 'COUNT < BLOCK_COUNT/2'
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
for (int i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "hello/kitty%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
- for (int j = 2; j < COUNT; j++) {
- lfs_mount(&lfs, &cfg) => 0;
+ // try seeking to each dir entry
+ for (int j = 0; j < COUNT; j++) {
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "hello") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@@ -737,24 +823,25 @@ code = '''
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
- lfs_soff_t pos;
for (int i = 0; i < j; i++) {
+ char path[1024];
sprintf(path, "kitty%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
- pos = lfs_dir_tell(&lfs, &dir);
- assert(pos >= 0);
}
+ lfs_soff_t pos = lfs_dir_tell(&lfs, &dir);
+ assert(pos >= 0);
lfs_dir_seek(&lfs, &dir, pos) => 0;
+ char path[1024];
sprintf(path, "kitty%03d", j);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_rewind(&lfs, &dir) => 0;
- sprintf(path, "kitty%03d", 0);
+ sprintf(path, "kitty%03u", 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@@ -774,22 +861,73 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
}
+
+ // try seeking to end of dir
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "hello") => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ for (int i = 0; i < COUNT; i++) {
+ char path[1024];
+ sprintf(path, "kitty%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ }
+ lfs_soff_t pos = lfs_dir_tell(&lfs, &dir);
+ assert(pos >= 0);
+
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+
+ lfs_dir_seek(&lfs, &dir, pos) => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+
+ lfs_dir_rewind(&lfs, &dir) => 0;
+ char path[1024];
+ sprintf(path, "kitty%03d", 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_dir_seek(&lfs, &dir, pos) => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
'''
-[[case]] # root seek
-define.COUNT = [4, 128, 132]
+[cases.test_dirs_toot_seek]
+defines.COUNT = [4, 128, 132]
+if = 'COUNT < BLOCK_COUNT/2'
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "hi%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
- for (int j = 2; j < COUNT; j++) {
- lfs_mount(&lfs, &cfg) => 0;
+ for (int j = 0; j < COUNT; j++) {
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@@ -797,24 +935,25 @@ code = '''
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
- lfs_soff_t pos;
for (int i = 0; i < j; i++) {
+ char path[1024];
sprintf(path, "hi%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
- pos = lfs_dir_tell(&lfs, &dir);
- assert(pos >= 0);
}
+ lfs_soff_t pos = lfs_dir_tell(&lfs, &dir);
+ assert(pos >= 0);
lfs_dir_seek(&lfs, &dir, pos) => 0;
+ char path[1024];
sprintf(path, "hi%03d", j);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_rewind(&lfs, &dir) => 0;
- sprintf(path, "hi%03d", 0);
+ sprintf(path, "hi%03u", 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@@ -834,5 +973,51 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
}
+
+ // try seeking to end of dir
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ for (int i = 0; i < COUNT; i++) {
+ char path[1024];
+ sprintf(path, "hi%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ }
+ lfs_soff_t pos = lfs_dir_tell(&lfs, &dir);
+ assert(pos >= 0);
+
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+
+ lfs_dir_seek(&lfs, &dir, pos) => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+
+ lfs_dir_rewind(&lfs, &dir) => 0;
+ char path[1024];
+ sprintf(path, "hi%03d", 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_dir_seek(&lfs, &dir, pos) => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
'''
diff --git a/tests/test_entries.toml b/tests/test_entries.toml
index 81e175f5..7aa551e0 100644
--- a/tests/test_entries.toml
+++ b/tests/test_entries.toml
@@ -2,19 +2,23 @@
# Note that these tests are intended for 512 byte inline sizes. They should
# still pass with other inline sizes but wouldn't be testing anything.
-define.LFS_CACHE_SIZE = 512
-if = 'LFS_CACHE_SIZE % LFS_PROG_SIZE == 0 && LFS_CACHE_SIZE == 512'
+defines.CACHE_SIZE = 512
+if = 'CACHE_SIZE % PROG_SIZE == 0 && CACHE_SIZE == 512'
-[[case]] # entry grow test
+[cases.test_entries_grow]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// write hi0 20
+ char path[1024];
+ lfs_size_t size;
sprintf(path, "hi0"); size = 20;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@@ -94,16 +98,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # entry shrink test
+[cases.test_entries_shrink]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// write hi0 20
+ char path[1024];
+ lfs_size_t size;
sprintf(path, "hi0"); size = 20;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@@ -183,16 +191,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # entry spill test
+[cases.test_entries_spill]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// write hi0 200
+ char path[1024];
+ lfs_size_t size;
sprintf(path, "hi0"); size = 200;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@@ -256,16 +268,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # entry push spill test
+[cases.test_entries_push_spill]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// write hi0 200
+ char path[1024];
+ lfs_size_t size;
sprintf(path, "hi0"); size = 200;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@@ -345,16 +361,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # entry push spill two test
+[cases.test_entries_push_spill_two]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// write hi0 200
+ char path[1024];
+ lfs_size_t size;
sprintf(path, "hi0"); size = 200;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@@ -449,16 +469,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # entry drop test
+[cases.test_entries_drop]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// write hi0 200
+ char path[1024];
+ lfs_size_t size;
sprintf(path, "hi0"); size = 200;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@@ -491,6 +515,7 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, "hi1") => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "hi1", &info) => LFS_ERR_NOENT;
// read hi0 200
sprintf(path, "hi0"); size = 200;
@@ -547,15 +572,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # create too big
+[cases.test_entries_create_too_big]
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ char path[1024];
memset(path, 'm', 200);
path[200] = '\0';
- size = 400;
+ lfs_size_t size = 400;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
uint8_t wbuffer[1024];
@@ -572,15 +600,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # resize too big
+[cases.test_entries_resize_too_big]
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ char path[1024];
memset(path, 'm', 200);
path[200] = '\0';
- size = 40;
+ lfs_size_t size = 40;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
uint8_t wbuffer[1024];
diff --git a/tests/test_evil.toml b/tests/test_evil.toml
index 920d3a0e..4acd5ef0 100644
--- a/tests/test_evil.toml
+++ b/tests/test_evil.toml
@@ -3,16 +3,17 @@
# invalid pointer tests (outside of block_count)
-[[case]] # invalid tail-pointer test
-define.TAIL_TYPE = ['LFS_TYPE_HARDTAIL', 'LFS_TYPE_SOFTTAIL']
-define.INVALSET = [0x3, 0x1, 0x2]
+[cases.test_evil_invalid_tail_pointer]
+defines.TAIL_TYPE = ['LFS_TYPE_HARDTAIL', 'LFS_TYPE_SOFTTAIL']
+defines.INVALSET = [0x3, 0x1, 0x2]
in = "lfs.c"
code = '''
// create littlefs
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// change tail-pointer to invalid pointers
- lfs_init(&lfs, &cfg) => 0;
+ lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
@@ -23,25 +24,27 @@ code = '''
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
- lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+ lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''
-[[case]] # invalid dir pointer test
-define.INVALSET = [0x3, 0x1, 0x2]
+[cases.test_evil_invalid_dir_pointer]
+defines.INVALSET = [0x3, 0x1, 0x2]
in = "lfs.c"
code = '''
// create littlefs
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// make a dir
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "dir_here") => 0;
lfs_unmount(&lfs) => 0;
// change the dir pointer to be invalid
- lfs_init(&lfs, &cfg) => 0;
+ lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
// make sure id 1 == our directory
+ uint8_t buffer[1024];
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("dir_here")), buffer)
@@ -57,14 +60,17 @@ code = '''
// test that accessing our bad dir fails, note there's a number
// of ways to access the dir, some can fail, but some don't
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "dir_here", &info) => 0;
assert(strcmp(info.name, "dir_here") == 0);
assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "dir_here") => LFS_ERR_CORRUPT;
lfs_stat(&lfs, "dir_here/file_here", &info) => LFS_ERR_CORRUPT;
lfs_dir_open(&lfs, &dir, "dir_here/dir_here") => LFS_ERR_CORRUPT;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "dir_here/file_here",
LFS_O_RDONLY) => LFS_ERR_CORRUPT;
lfs_file_open(&lfs, &file, "dir_here/file_here",
@@ -72,24 +78,27 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # invalid file pointer test
+[cases.test_evil_invalid_file_pointer]
in = "lfs.c"
-define.SIZE = [10, 1000, 100000] # faked file size
+defines.SIZE = [10, 1000, 100000] # faked file size
code = '''
// create littlefs
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// make a file
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "file_here",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// change the file pointer to be invalid
- lfs_init(&lfs, &cfg) => 0;
+ lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
// make sure id 1 == our file
+ uint8_t buffer[1024];
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
@@ -103,7 +112,8 @@ code = '''
// test that accessing our bad file fails, note there's a number
// of ways to access the dir, some can fail, but some don't
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "file_here", &info) => 0;
assert(strcmp(info.name, "file_here") == 0);
assert(info.type == LFS_TYPE_REG);
@@ -114,20 +124,22 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
// any allocs that traverse CTZ must unfortunately must fail
- if (SIZE > 2*LFS_BLOCK_SIZE) {
+ if (SIZE > 2*BLOCK_SIZE) {
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
}
lfs_unmount(&lfs) => 0;
'''
-[[case]] # invalid pointer in CTZ skip-list test
-define.SIZE = ['2*LFS_BLOCK_SIZE', '3*LFS_BLOCK_SIZE', '4*LFS_BLOCK_SIZE']
+[cases.test_evil_invalid_ctz_pointer] # invalid pointer in CTZ skip-list test
+defines.SIZE = ['2*BLOCK_SIZE', '3*BLOCK_SIZE', '4*BLOCK_SIZE']
in = "lfs.c"
code = '''
// create littlefs
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// make a file
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "file_here",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (int i = 0; i < SIZE; i++) {
@@ -137,10 +149,11 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// change pointer in CTZ skip-list to be invalid
- lfs_init(&lfs, &cfg) => 0;
+ lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
// make sure id 1 == our file and get our CTZ structure
+ uint8_t buffer[4*BLOCK_SIZE];
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
@@ -153,18 +166,19 @@ code = '''
=> LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz));
lfs_ctz_fromle32(&ctz);
// rewrite block to contain bad pointer
- uint8_t bbuffer[LFS_BLOCK_SIZE];
- cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ uint8_t bbuffer[BLOCK_SIZE];
+ cfg->read(cfg, ctz.head, 0, bbuffer, BLOCK_SIZE) => 0;
uint32_t bad = lfs_tole32(0xcccccccc);
memcpy(&bbuffer[0], &bad, sizeof(bad));
memcpy(&bbuffer[4], &bad, sizeof(bad));
- cfg.erase(&cfg, ctz.head) => 0;
- cfg.prog(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg->erase(cfg, ctz.head) => 0;
+ cfg->prog(cfg, ctz.head, 0, bbuffer, BLOCK_SIZE) => 0;
lfs_deinit(&lfs) => 0;
// test that accessing our bad file fails, note there's a number
// of ways to access the dir, some can fail, but some don't
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "file_here", &info) => 0;
assert(strcmp(info.name, "file_here") == 0);
assert(info.type == LFS_TYPE_REG);
@@ -175,22 +189,23 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
// any allocs that traverse CTZ must unfortunately must fail
- if (SIZE > 2*LFS_BLOCK_SIZE) {
+ if (SIZE > 2*BLOCK_SIZE) {
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
}
lfs_unmount(&lfs) => 0;
'''
-[[case]] # invalid gstate pointer
-define.INVALSET = [0x3, 0x1, 0x2]
+[cases.test_evil_invalid_gstate_pointer]
+defines.INVALSET = [0x3, 0x1, 0x2]
in = "lfs.c"
code = '''
// create littlefs
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// create an invalid gstate
- lfs_init(&lfs, &cfg) => 0;
+ lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_fs_prepmove(&lfs, 1, (lfs_block_t [2]){
@@ -202,21 +217,22 @@ code = '''
// test that mount fails gracefully
// mount may not fail, but our first alloc should fail when
// we try to fix the gstate
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "should_fail") => LFS_ERR_CORRUPT;
lfs_unmount(&lfs) => 0;
'''
# cycle detection/recovery tests
-[[case]] # metadata-pair threaded-list loop test
+[cases.test_evil_mdir_loop] # metadata-pair threaded-list loop test
in = "lfs.c"
code = '''
// create littlefs
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// change tail-pointer to point to ourself
- lfs_init(&lfs, &cfg) => 0;
+ lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
@@ -225,20 +241,21 @@ code = '''
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
- lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+ lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''
-[[case]] # metadata-pair threaded-list 2-length loop test
+[cases.test_evil_mdir_loop2] # metadata-pair threaded-list 2-length loop test
in = "lfs.c"
code = '''
// create littlefs with child dir
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
// find child
- lfs_init(&lfs, &cfg) => 0;
+ lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_block_t pair[2];
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
@@ -255,20 +272,21 @@ code = '''
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
- lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+ lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''
-[[case]] # metadata-pair threaded-list 1-length child loop test
+[cases.test_evil_mdir_loop_child] # metadata-pair threaded-list 1-length child loop test
in = "lfs.c"
code = '''
// create littlefs with child dir
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
// find child
- lfs_init(&lfs, &cfg) => 0;
+ lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_block_t pair[2];
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
@@ -284,5 +302,5 @@ code = '''
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
- lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+ lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''
diff --git a/tests/test_exhaustion.toml b/tests/test_exhaustion.toml
index 569611c5..2cf6aed1 100644
--- a/tests/test_exhaustion.toml
+++ b/tests/test_exhaustion.toml
@@ -1,46 +1,50 @@
-[[case]] # test running a filesystem to exhaustion
-define.LFS_ERASE_CYCLES = 10
-define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
-define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
-define.LFS_BADBLOCK_BEHAVIOR = [
- 'LFS_TESTBD_BADBLOCK_PROGERROR',
- 'LFS_TESTBD_BADBLOCK_ERASEERROR',
- 'LFS_TESTBD_BADBLOCK_READERROR',
- 'LFS_TESTBD_BADBLOCK_PROGNOOP',
- 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+# test running a filesystem to exhaustion
+[cases.test_exhaustion_normal]
+defines.ERASE_CYCLES = 10
+defines.BLOCK_COUNT = 256 # small bd so test runs faster
+defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
+defines.BADBLOCK_BEHAVIOR = [
+ 'LFS_EMUBD_BADBLOCK_PROGERROR',
+ 'LFS_EMUBD_BADBLOCK_ERASEERROR',
+ 'LFS_EMUBD_BADBLOCK_READERROR',
+ 'LFS_EMUBD_BADBLOCK_PROGNOOP',
+ 'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
-define.FILES = 10
+defines.FILES = 10
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "roadrunner") => 0;
lfs_unmount(&lfs) => 0;
uint32_t cycle = 0;
while (true) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
+ char path[1024];
sprintf(path, "roadrunner/test%d", i);
- srand(cycle * i);
- size = 1 << ((rand() % 10)+2);
+ uint32_t prng = cycle * i;
+ lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
- char c = 'a' + (rand() % 26);
+ char c = 'a' + (TEST_PRNG(&prng) % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
- err = lfs_file_close(&lfs, &file);
+ int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
- err = lfs_file_close(&lfs, &file);
+ int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
@@ -50,13 +54,15 @@ code = '''
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
+ char path[1024];
sprintf(path, "roadrunner/test%d", i);
- srand(cycle * i);
- size = 1 << ((rand() % 10)+2);
+ uint32_t prng = cycle * i;
+ lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
- char c = 'a' + (rand() % 26);
+ char c = 'a' + (TEST_PRNG(&prng) % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
@@ -71,10 +77,12 @@ code = '''
exhausted:
// should still be readable
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
+ char path[1024];
sprintf(path, "roadrunner/test%d", i);
+ struct lfs_info info;
lfs_stat(&lfs, path, &info) => 0;
}
lfs_unmount(&lfs) => 0;
@@ -82,47 +90,51 @@ exhausted:
LFS_WARN("completed %d cycles", cycle);
'''
-[[case]] # test running a filesystem to exhaustion
- # which also requires expanding superblocks
-define.LFS_ERASE_CYCLES = 10
-define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
-define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
-define.LFS_BADBLOCK_BEHAVIOR = [
- 'LFS_TESTBD_BADBLOCK_PROGERROR',
- 'LFS_TESTBD_BADBLOCK_ERASEERROR',
- 'LFS_TESTBD_BADBLOCK_READERROR',
- 'LFS_TESTBD_BADBLOCK_PROGNOOP',
- 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+# test running a filesystem to exhaustion
+# which also requires expanding superblocks
+[cases.test_exhaustion_superblocks]
+defines.ERASE_CYCLES = 10
+defines.BLOCK_COUNT = 256 # small bd so test runs faster
+defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
+defines.BADBLOCK_BEHAVIOR = [
+ 'LFS_EMUBD_BADBLOCK_PROGERROR',
+ 'LFS_EMUBD_BADBLOCK_ERASEERROR',
+ 'LFS_EMUBD_BADBLOCK_READERROR',
+ 'LFS_EMUBD_BADBLOCK_PROGNOOP',
+ 'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
-define.FILES = 10
+defines.FILES = 10
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
uint32_t cycle = 0;
while (true) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
+ char path[1024];
sprintf(path, "test%d", i);
- srand(cycle * i);
- size = 1 << ((rand() % 10)+2);
+ uint32_t prng = cycle * i;
+ lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
- char c = 'a' + (rand() % 26);
+ char c = 'a' + (TEST_PRNG(&prng) % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
- err = lfs_file_close(&lfs, &file);
+ int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
- err = lfs_file_close(&lfs, &file);
+ int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
@@ -132,13 +144,15 @@ code = '''
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
+ char path[1024];
sprintf(path, "test%d", i);
- srand(cycle * i);
- size = 1 << ((rand() % 10)+2);
+ uint32_t prng = cycle * i;
+ lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
- char c = 'a' + (rand() % 26);
+ char c = 'a' + (TEST_PRNG(&prng) % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
@@ -153,9 +167,11 @@ code = '''
exhausted:
// should still be readable
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
+ char path[1024];
+ struct lfs_info info;
sprintf(path, "test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
@@ -169,51 +185,55 @@ exhausted:
# into increasing the block devices lifetime. This is something we can actually
# check for.
-[[case]] # wear-level test running a filesystem to exhaustion
-define.LFS_ERASE_CYCLES = 20
-define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
-define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
-define.FILES = 10
+# wear-level test running a filesystem to exhaustion
+[cases.test_exhuastion_wear_leveling]
+defines.ERASE_CYCLES = 20
+defines.BLOCK_COUNT = 256 # small bd so test runs faster
+defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
+defines.FILES = 10
code = '''
uint32_t run_cycles[2];
- const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
+ const uint32_t run_block_count[2] = {BLOCK_COUNT/2, BLOCK_COUNT};
for (int run = 0; run < 2; run++) {
- for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
- lfs_testbd_setwear(&cfg, b,
- (b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
+ for (lfs_block_t b = 0; b < BLOCK_COUNT; b++) {
+ lfs_emubd_setwear(cfg, b,
+ (b < run_block_count[run]) ? 0 : ERASE_CYCLES) => 0;
}
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "roadrunner") => 0;
lfs_unmount(&lfs) => 0;
uint32_t cycle = 0;
while (true) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
+ char path[1024];
sprintf(path, "roadrunner/test%d", i);
- srand(cycle * i);
- size = 1 << ((rand() % 10)+2);
+ uint32_t prng = cycle * i;
+ lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
- char c = 'a' + (rand() % 26);
+ char c = 'a' + (TEST_PRNG(&prng) % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
- err = lfs_file_close(&lfs, &file);
+ int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
- err = lfs_file_close(&lfs, &file);
+ int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
@@ -223,13 +243,15 @@ code = '''
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
+ char path[1024];
sprintf(path, "roadrunner/test%d", i);
- srand(cycle * i);
- size = 1 << ((rand() % 10)+2);
+ uint32_t prng = cycle * i;
+ lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
- char c = 'a' + (rand() % 26);
+ char c = 'a' + (TEST_PRNG(&prng) % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
@@ -244,9 +266,11 @@ code = '''
exhausted:
// should still be readable
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
+ char path[1024];
+ struct lfs_info info;
sprintf(path, "roadrunner/test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
@@ -261,48 +285,52 @@ exhausted:
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
'''
-[[case]] # wear-level test + expanding superblock
-define.LFS_ERASE_CYCLES = 20
-define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
-define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
-define.FILES = 10
+# wear-level test + expanding superblock
+[cases.test_exhaustion_wear_leveling_superblocks]
+defines.ERASE_CYCLES = 20
+defines.BLOCK_COUNT = 256 # small bd so test runs faster
+defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
+defines.FILES = 10
code = '''
uint32_t run_cycles[2];
- const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
+ const uint32_t run_block_count[2] = {BLOCK_COUNT/2, BLOCK_COUNT};
for (int run = 0; run < 2; run++) {
- for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
- lfs_testbd_setwear(&cfg, b,
- (b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
+ for (lfs_block_t b = 0; b < BLOCK_COUNT; b++) {
+ lfs_emubd_setwear(cfg, b,
+ (b < run_block_count[run]) ? 0 : ERASE_CYCLES) => 0;
}
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
uint32_t cycle = 0;
while (true) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
+ char path[1024];
sprintf(path, "test%d", i);
- srand(cycle * i);
- size = 1 << ((rand() % 10)+2);
+ uint32_t prng = cycle * i;
+ lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
- char c = 'a' + (rand() % 26);
+ char c = 'a' + (TEST_PRNG(&prng) % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
- err = lfs_file_close(&lfs, &file);
+ int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
- err = lfs_file_close(&lfs, &file);
+ int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
@@ -312,13 +340,15 @@ code = '''
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
+ char path[1024];
sprintf(path, "test%d", i);
- srand(cycle * i);
- size = 1 << ((rand() % 10)+2);
+ uint32_t prng = cycle * i;
+ lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
- char c = 'a' + (rand() % 26);
+ char c = 'a' + (TEST_PRNG(&prng) % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
@@ -333,9 +363,11 @@ code = '''
exhausted:
// should still be readable
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
+ char path[1024];
+ struct lfs_info info;
sprintf(path, "test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
@@ -350,44 +382,48 @@ exhausted:
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
'''
-[[case]] # test that we wear blocks roughly evenly
-define.LFS_ERASE_CYCLES = 0xffffffff
-define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
-define.LFS_BLOCK_CYCLES = [5, 4, 3, 2, 1]
-define.CYCLES = 100
-define.FILES = 10
-if = 'LFS_BLOCK_CYCLES < CYCLES/10'
+# test that we wear blocks roughly evenly
+[cases.test_exhaustion_wear_distribution]
+defines.ERASE_CYCLES = 0xffffffff
+defines.BLOCK_COUNT = 256 # small bd so test runs faster
+defines.BLOCK_CYCLES = [5, 4, 3, 2, 1]
+defines.CYCLES = 100
+defines.FILES = 10
+if = 'BLOCK_CYCLES < CYCLES/10'
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "roadrunner") => 0;
lfs_unmount(&lfs) => 0;
uint32_t cycle = 0;
while (cycle < CYCLES) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
+ char path[1024];
sprintf(path, "roadrunner/test%d", i);
- srand(cycle * i);
- size = 1 << 4; //((rand() % 10)+2);
+ uint32_t prng = cycle * i;
+ lfs_size_t size = 1 << 4; //((TEST_PRNG(&prng) % 10)+2);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
- char c = 'a' + (rand() % 26);
+ char c = 'a' + (TEST_PRNG(&prng) % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
- err = lfs_file_close(&lfs, &file);
+ int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
- err = lfs_file_close(&lfs, &file);
+ int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
@@ -397,13 +433,15 @@ code = '''
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
+ char path[1024];
sprintf(path, "roadrunner/test%d", i);
- srand(cycle * i);
- size = 1 << 4; //((rand() % 10)+2);
+ uint32_t prng = cycle * i;
+ lfs_size_t size = 1 << 4; //((TEST_PRNG(&prng) % 10)+2);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
- char c = 'a' + (rand() % 26);
+ char c = 'a' + (TEST_PRNG(&prng) % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
@@ -418,9 +456,11 @@ code = '''
exhausted:
// should still be readable
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
+ char path[1024];
+ struct lfs_info info;
sprintf(path, "roadrunner/test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
@@ -429,12 +469,12 @@ exhausted:
LFS_WARN("completed %d cycles", cycle);
// check the wear on our block device
- lfs_testbd_wear_t minwear = -1;
- lfs_testbd_wear_t totalwear = 0;
- lfs_testbd_wear_t maxwear = 0;
+ lfs_emubd_wear_t minwear = -1;
+ lfs_emubd_wear_t totalwear = 0;
+ lfs_emubd_wear_t maxwear = 0;
// skip 0 and 1 as superblock movement is intentionally avoided
- for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
- lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
+ for (lfs_block_t b = 2; b < BLOCK_COUNT; b++) {
+ lfs_emubd_wear_t wear = lfs_emubd_wear(cfg, b);
printf("%08x: wear %d\n", b, wear);
assert(wear >= 0);
if (wear < minwear) {
@@ -445,17 +485,17 @@ exhausted:
}
totalwear += wear;
}
- lfs_testbd_wear_t avgwear = totalwear / LFS_BLOCK_COUNT;
+ lfs_emubd_wear_t avgwear = totalwear / BLOCK_COUNT;
LFS_WARN("max wear: %d cycles", maxwear);
- LFS_WARN("avg wear: %d cycles", totalwear / LFS_BLOCK_COUNT);
+ LFS_WARN("avg wear: %d cycles", totalwear / (int)BLOCK_COUNT);
LFS_WARN("min wear: %d cycles", minwear);
// find standard deviation^2
- lfs_testbd_wear_t dev2 = 0;
- for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
- lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
+ lfs_emubd_wear_t dev2 = 0;
+ for (lfs_block_t b = 2; b < BLOCK_COUNT; b++) {
+ lfs_emubd_wear_t wear = lfs_emubd_wear(cfg, b);
assert(wear >= 0);
- lfs_testbd_swear_t diff = wear - avgwear;
+ lfs_emubd_swear_t diff = wear - avgwear;
dev2 += diff*diff;
}
dev2 /= totalwear;
diff --git a/tests/test_files.toml b/tests/test_files.toml
index 565e665b..afb0811f 100644
--- a/tests/test_files.toml
+++ b/tests/test_files.toml
@@ -1,17 +1,20 @@
-[[case]] # simple file test
+[cases.test_files_simple]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "hello",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
- size = strlen("Hello World!")+1;
+ lfs_size_t size = strlen("Hello World!")+1;
+ uint8_t buffer[1024];
strcpy((char*)buffer, "Hello World!");
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "hello", LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(strcmp((char*)buffer, "Hello World!") == 0);
@@ -19,21 +22,24 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # larger files
-define.SIZE = [32, 8192, 262144, 0, 7, 8193]
-define.CHUNKSIZE = [31, 16, 33, 1, 1023]
+[cases.test_files_large]
+defines.SIZE = [32, 8192, 262144, 0, 7, 8193]
+defines.CHUNKSIZE = [31, 16, 33, 1, 1023]
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// write
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
- srand(1);
+ uint32_t prng = 1;
+ uint8_t buffer[1024];
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
for (lfs_size_t b = 0; b < chunk; b++) {
- buffer[b] = rand() & 0xff;
+ buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@@ -41,15 +47,15 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE;
- srand(1);
+ prng = 1;
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@@ -57,22 +63,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # rewriting files
-define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
-define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
-define.CHUNKSIZE = [31, 16, 1]
+[cases.test_files_rewrite]
+defines.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
+defines.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
+defines.CHUNKSIZE = [31, 16, 1]
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// write
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
+ uint8_t buffer[1024];
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
- srand(1);
+ uint32_t prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
for (lfs_size_t b = 0; b < chunk; b++) {
- buffer[b] = rand() & 0xff;
+ buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@@ -80,15 +89,15 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1;
- srand(1);
+ prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@@ -96,13 +105,13 @@ code = '''
lfs_unmount(&lfs) => 0;
// rewrite
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY) => 0;
- srand(2);
+ prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
for (lfs_size_t b = 0; b < chunk; b++) {
- buffer[b] = rand() & 0xff;
+ buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@@ -110,27 +119,27 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => lfs_max(SIZE1, SIZE2);
- srand(2);
+ prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
if (SIZE1 > SIZE2) {
- srand(1);
+ prng = 1;
for (lfs_size_t b = 0; b < SIZE2; b++) {
- rand();
+ TEST_PRNG(&prng);
}
for (lfs_size_t i = SIZE2; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
}
@@ -139,22 +148,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # appending files
-define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
-define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
-define.CHUNKSIZE = [31, 16, 1]
+[cases.test_files_append]
+defines.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
+defines.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
+defines.CHUNKSIZE = [31, 16, 1]
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// write
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
+ uint8_t buffer[1024];
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
- srand(1);
+ uint32_t prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
for (lfs_size_t b = 0; b < chunk; b++) {
- buffer[b] = rand() & 0xff;
+ buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@@ -162,15 +174,15 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1;
- srand(1);
+ prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@@ -178,13 +190,13 @@ code = '''
lfs_unmount(&lfs) => 0;
// append
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_APPEND) => 0;
- srand(2);
+ prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
for (lfs_size_t b = 0; b < chunk; b++) {
- buffer[b] = rand() & 0xff;
+ buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@@ -192,23 +204,23 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1 + SIZE2;
- srand(1);
+ prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
- srand(2);
+ prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@@ -216,22 +228,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # truncating files
-define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
-define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
-define.CHUNKSIZE = [31, 16, 1]
+[cases.test_files_truncate]
+defines.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
+defines.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
+defines.CHUNKSIZE = [31, 16, 1]
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// write
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
+ uint8_t buffer[1024];
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
- srand(1);
+ uint32_t prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
for (lfs_size_t b = 0; b < chunk; b++) {
- buffer[b] = rand() & 0xff;
+ buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@@ -239,15 +254,15 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1;
- srand(1);
+ prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@@ -255,13 +270,13 @@ code = '''
lfs_unmount(&lfs) => 0;
// truncate
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_TRUNC) => 0;
- srand(2);
+ prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
for (lfs_size_t b = 0; b < chunk; b++) {
- buffer[b] = rand() & 0xff;
+ buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@@ -269,15 +284,15 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE2;
- srand(2);
+ prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@@ -285,33 +300,36 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # reentrant file writing
-define.SIZE = [32, 0, 7, 2049]
-define.CHUNKSIZE = [31, 16, 65]
+[cases.test_files_reentrant_write]
+defines.SIZE = [32, 0, 7, 2049]
+defines.CHUNKSIZE = [31, 16, 65]
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
+ lfs_file_t file;
+ uint8_t buffer[1024];
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
assert(err == LFS_ERR_NOENT || err == 0);
if (err == 0) {
// can only be 0 (new file) or full size
- size = lfs_file_size(&lfs, &file);
+ lfs_size_t size = lfs_file_size(&lfs, &file);
assert(size == 0 || size == SIZE);
lfs_file_close(&lfs, &file) => 0;
}
// write
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_CREAT) => 0;
- srand(1);
+ uint32_t prng = 1;
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
for (lfs_size_t b = 0; b < chunk; b++) {
- buffer[b] = rand() & 0xff;
+ buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@@ -320,12 +338,12 @@ code = '''
// read
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE;
- srand(1);
+ prng = 1;
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@@ -333,8 +351,8 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # reentrant file writing with syncs
-define = [
+[cases.test_files_reentrant_write_sync]
+defines = [
# append (O(n))
{MODE='LFS_O_APPEND', SIZE=[32, 0, 7, 2049], CHUNKSIZE=[31, 16, 65]},
# truncate (O(n^2))
@@ -344,24 +362,27 @@ define = [
]
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
+ lfs_file_t file;
+ uint8_t buffer[1024];
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
assert(err == LFS_ERR_NOENT || err == 0);
if (err == 0) {
// with syncs we could be any size, but it at least must be valid data
- size = lfs_file_size(&lfs, &file);
+ lfs_size_t size = lfs_file_size(&lfs, &file);
assert(size <= SIZE);
- srand(1);
+ uint32_t prng = 1;
for (lfs_size_t i = 0; i < size; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, size-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_close(&lfs, &file) => 0;
@@ -370,17 +391,17 @@ code = '''
// write
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | MODE) => 0;
- size = lfs_file_size(&lfs, &file);
+ lfs_size_t size = lfs_file_size(&lfs, &file);
assert(size <= SIZE);
- srand(1);
+ uint32_t prng = 1;
lfs_size_t skip = (MODE == LFS_O_APPEND) ? size : 0;
for (lfs_size_t b = 0; b < skip; b++) {
- rand();
+ TEST_PRNG(&prng);
}
for (lfs_size_t i = skip; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
for (lfs_size_t b = 0; b < chunk; b++) {
- buffer[b] = rand() & 0xff;
+ buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
lfs_file_sync(&lfs, &file) => 0;
@@ -390,12 +411,12 @@ code = '''
// read
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE;
- srand(1);
+ prng = 1;
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
- assert(buffer[b] == (rand() & 0xff));
+ assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@@ -403,19 +424,22 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # many files
-define.N = 300
+[cases.test_files_many]
+defines.N = 300
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// create N files of 7 bytes
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ lfs_file_t file;
+ char path[1024];
sprintf(path, "file_%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
char wbuffer[1024];
- size = 7;
- snprintf(wbuffer, size, "Hi %03d", i);
+ lfs_size_t size = 7;
+ sprintf(wbuffer, "Hi %03d", i);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
@@ -428,25 +452,28 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # many files with power cycle
-define.N = 300
+[cases.test_files_many_power_cycle]
+defines.N = 300
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// create N files of 7 bytes
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ lfs_file_t file;
+ char path[1024];
sprintf(path, "file_%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
char wbuffer[1024];
- size = 7;
- snprintf(wbuffer, size, "Hi %03d", i);
+ lfs_size_t size = 7;
+ sprintf(wbuffer, "Hi %03d", i);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
char rbuffer[1024];
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(strcmp(rbuffer, wbuffer) == 0);
@@ -455,22 +482,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # many files with power loss
-define.N = 300
+[cases.test_files_many_power_loss]
+defines.N = 300
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
// create N files of 7 bytes
for (int i = 0; i < N; i++) {
+ lfs_file_t file;
+ char path[1024];
sprintf(path, "file_%03d", i);
err = lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT);
char wbuffer[1024];
- size = 7;
- snprintf(wbuffer, size, "Hi %03d", i);
+ lfs_size_t size = 7;
+ sprintf(wbuffer, "Hi %03d", i);
if ((lfs_size_t)lfs_file_size(&lfs, &file) != size) {
lfs_file_write(&lfs, &file, wbuffer, size) => size;
}
diff --git a/tests/test_interspersed.toml b/tests/test_interspersed.toml
index 87a05780..d7143f61 100644
--- a/tests/test_interspersed.toml
+++ b/tests/test_interspersed.toml
@@ -1,13 +1,15 @@
-[[case]] # interspersed file test
-define.SIZE = [10, 100]
-define.FILES = [4, 10, 26]
+[cases.test_interspersed_files]
+defines.SIZE = [10, 100]
+defines.FILES = [4, 10, 26]
code = '''
+ lfs_t lfs;
lfs_file_t files[FILES];
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int j = 0; j < FILES; j++) {
+ char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
@@ -23,7 +25,9 @@ code = '''
lfs_file_close(&lfs, &files[j]);
}
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@@ -31,6 +35,7 @@ code = '''
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
for (int j = 0; j < FILES; j++) {
+ char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
@@ -41,12 +46,14 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
for (int j = 0; j < FILES; j++) {
+ char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
}
for (int i = 0; i < 10; i++) {
for (int j = 0; j < FILES; j++) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
assert(buffer[0] == alphas[j]);
}
@@ -59,15 +66,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # interspersed remove file test
-define.SIZE = [10, 100]
-define.FILES = [4, 10, 26]
+[cases.test_interspersed_remove_files]
+defines.SIZE = [10, 100]
+defines.FILES = [4, 10, 26]
code = '''
+ lfs_t lfs;
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int j = 0; j < FILES; j++) {
+ char path[1024];
sprintf(path, "%c", alphas[j]);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
for (int i = 0; i < SIZE; i++) {
@@ -77,18 +87,22 @@ code = '''
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "zzz", LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (int j = 0; j < FILES; j++) {
lfs_file_write(&lfs, &file, (const void*)"~", 1) => 1;
lfs_file_sync(&lfs, &file) => 0;
+ char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_remove(&lfs, path) => 0;
}
lfs_file_close(&lfs, &file);
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@@ -104,6 +118,7 @@ code = '''
lfs_file_open(&lfs, &file, "zzz", LFS_O_RDONLY) => 0;
for (int i = 0; i < FILES; i++) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 1) => 1;
assert(buffer[0] == '~');
}
@@ -112,11 +127,12 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # remove inconveniently test
-define.SIZE = [10, 100]
+[cases.test_interspersed_remove_inconveniently]
+defines.SIZE = [10, 100]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_t files[3];
lfs_file_open(&lfs, &files[0], "e", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_open(&lfs, &files[1], "f", LFS_O_WRONLY | LFS_O_CREAT) => 0;
@@ -140,7 +156,9 @@ code = '''
lfs_file_close(&lfs, &files[1]);
lfs_file_close(&lfs, &files[2]);
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@@ -161,6 +179,7 @@ code = '''
lfs_file_open(&lfs, &files[0], "e", LFS_O_RDONLY) => 0;
lfs_file_open(&lfs, &files[1], "g", LFS_O_RDONLY) => 0;
for (int i = 0; i < SIZE; i++) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &files[0], buffer, 1) => 1;
assert(buffer[0] == 'e');
lfs_file_read(&lfs, &files[1], buffer, 1) => 1;
@@ -172,21 +191,23 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # reentrant interspersed file test
-define.SIZE = [10, 100]
-define.FILES = [4, 10, 26]
+[cases.test_interspersed_reentrant_files]
+defines.SIZE = [10, 100]
+defines.FILES = [4, 10, 26]
reentrant = true
code = '''
+ lfs_t lfs;
lfs_file_t files[FILES];
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
- err = lfs_mount(&lfs, &cfg);
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
for (int j = 0; j < FILES; j++) {
+ char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
@@ -194,8 +215,8 @@ code = '''
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < FILES; j++) {
- size = lfs_file_size(&lfs, &files[j]);
- assert((int)size >= 0);
+ lfs_ssize_t size = lfs_file_size(&lfs, &files[j]);
+ assert(size >= 0);
if ((int)size <= i) {
lfs_file_write(&lfs, &files[j], &alphas[j], 1) => 1;
lfs_file_sync(&lfs, &files[j]) => 0;
@@ -207,7 +228,9 @@ code = '''
lfs_file_close(&lfs, &files[j]);
}
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@@ -215,6 +238,7 @@ code = '''
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
for (int j = 0; j < FILES; j++) {
+ char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
@@ -225,12 +249,14 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
for (int j = 0; j < FILES; j++) {
+ char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
}
for (int i = 0; i < 10; i++) {
for (int j = 0; j < FILES; j++) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
assert(buffer[0] == alphas[j]);
}
diff --git a/tests/test_move.toml b/tests/test_move.toml
index bb3b713f..0537f486 100644
--- a/tests/test_move.toml
+++ b/tests/test_move.toml
@@ -1,11 +1,13 @@
-[[case]] # move file
+[cases.test_move_file]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
@@ -13,11 +15,13 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -44,6 +48,7 @@ code = '''
lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => 0;
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 5) => 5;
memcmp(buffer, "hola\n", 5) => 0;
lfs_file_read(&lfs, &file, buffer, 8) => 8;
@@ -55,31 +60,35 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # noop move, yes this is legal
+[cases.test_move_nop] # yes this is legal
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hi") => 0;
lfs_rename(&lfs, "hi", "hi") => 0;
lfs_mkdir(&lfs, "hi/hi") => 0;
lfs_rename(&lfs, "hi/hi", "hi/hi") => 0;
lfs_mkdir(&lfs, "hi/hi/hi") => 0;
lfs_rename(&lfs, "hi/hi/hi", "hi/hi/hi") => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "hi/hi/hi", &info) => 0;
assert(strcmp(info.name, "hi") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_unmount(&lfs) => 0;
'''
-[[case]] # move file corrupt source
+[cases.test_move_file_corrupt_source]
in = "lfs.c"
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
@@ -87,28 +96,30 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- uint8_t bbuffer[LFS_BLOCK_SIZE];
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- int off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ uint8_t buffer[BLOCK_SIZE];
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ int off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -146,16 +157,19 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # move file corrupt source and dest
+# move file corrupt source and dest
+[cases.test_move_file_corrupt_source_dest]
in = "lfs.c"
-if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
+if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
@@ -163,44 +177,46 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- uint8_t bbuffer[LFS_BLOCK_SIZE];
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- int off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ uint8_t buffer[BLOCK_SIZE];
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ int off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
// corrupt the destination
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -238,16 +254,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # move file after corrupt
+[cases.test_move_file_after_corrupt]
in = "lfs.c"
-if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
+if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
@@ -255,49 +273,51 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- uint8_t bbuffer[LFS_BLOCK_SIZE];
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- int off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ uint8_t buffer[BLOCK_SIZE];
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ int off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
// corrupt the destination
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
// continue move
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -335,13 +355,14 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # simple reentrant move file
+[cases.test_move_reentrant_file]
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
err = lfs_mkdir(&lfs, "a");
assert(!err || err == LFS_ERR_EXIST);
@@ -354,9 +375,10 @@ code = '''
lfs_unmount(&lfs) => 0;
while (true) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// there should never exist _2_ hello files
int count = 0;
+ struct lfs_info info;
if (lfs_stat(&lfs, "a/hello", &info) == 0) {
assert(strcmp(info.name, "hello") == 0);
assert(info.type == LFS_TYPE_REG);
@@ -384,7 +406,7 @@ code = '''
assert(count <= 1);
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
if (lfs_stat(&lfs, "a/hello", &info) == 0 && info.size > 0) {
lfs_rename(&lfs, "a/hello", "b/hello") => 0;
} else if (lfs_stat(&lfs, "b/hello", &info) == 0) {
@@ -397,6 +419,7 @@ code = '''
break;
} else {
// create file
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
@@ -407,7 +430,9 @@ code = '''
lfs_unmount(&lfs) => 0;
}
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -431,10 +456,12 @@ code = '''
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => 0;
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 5) => 5;
memcmp(buffer, "hola\n", 5) => 0;
lfs_file_read(&lfs, &file, buffer, 8) => 8;
@@ -445,10 +472,11 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # move dir
+[cases.test_move_dir]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
@@ -459,11 +487,13 @@ code = '''
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -510,11 +540,12 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # move dir corrupt source
+[cases.test_move_dir_corrupt_source]
in = "lfs.c"
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
@@ -525,28 +556,30 @@ code = '''
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- uint8_t bbuffer[LFS_BLOCK_SIZE];
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- int off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ uint8_t buffer[BLOCK_SIZE];
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ int off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -593,12 +626,13 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # move dir corrupt source and dest
+[cases.test_move_dir_corrupt_source_dest]
in = "lfs.c"
-if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
+if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
@@ -609,44 +643,46 @@ code = '''
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- uint8_t bbuffer[LFS_BLOCK_SIZE];
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- int off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ uint8_t buffer[BLOCK_SIZE];
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ int off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
// corrupt the destination
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -693,12 +729,13 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # move dir after corrupt
+[cases.test_move_dir_after_corrupt]
in = "lfs.c"
-if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
+if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
@@ -709,49 +746,51 @@ code = '''
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- uint8_t bbuffer[LFS_BLOCK_SIZE];
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- int off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ uint8_t buffer[BLOCK_SIZE];
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ int off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
// corrupt the destination
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
// continue move
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -798,13 +837,14 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # simple reentrant move dir
+[cases.test_reentrant_dir]
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
err = lfs_mkdir(&lfs, "a");
assert(!err || err == LFS_ERR_EXIST);
@@ -817,9 +857,10 @@ code = '''
lfs_unmount(&lfs) => 0;
while (true) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// there should never exist _2_ hi directories
int count = 0;
+ struct lfs_info info;
if (lfs_stat(&lfs, "a/hi", &info) == 0) {
assert(strcmp(info.name, "hi") == 0);
assert(info.type == LFS_TYPE_DIR);
@@ -843,7 +884,7 @@ code = '''
assert(count <= 1);
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
if (lfs_stat(&lfs, "a/hi", &info) == 0) {
lfs_rename(&lfs, "a/hi", "b/hi") => 0;
} else if (lfs_stat(&lfs, "b/hi", &info) == 0) {
@@ -868,7 +909,9 @@ code = '''
lfs_unmount(&lfs) => 0;
}
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -915,14 +958,16 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # move state stealing
+[cases.test_move_state_stealing]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
@@ -930,21 +975,22 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "b/hello") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "b/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "c/hello", "d/hello") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => 0;
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 5) => 5;
memcmp(buffer, "hola\n", 5) => 0;
lfs_file_read(&lfs, &file, buffer, 8) => 8;
@@ -954,12 +1000,13 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "b") => 0;
lfs_remove(&lfs, "c") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "a", &info) => 0;
lfs_stat(&lfs, "b", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "c", &info) => LFS_ERR_NOENT;
@@ -979,12 +1026,16 @@ code = '''
'''
# Other specific corner cases
-[[case]] # create + delete in same commit with neighbors
+
+# create + delete in same commit with neighbors
+[cases.test_move_create_delete_same]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// littlefs keeps files sorted, so we know the order these will be in
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "/1.move_me",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
@@ -1024,6 +1075,8 @@ code = '''
lfs_file_close(&lfs, &files[2]) => 0;
// check that nothing was corrupted
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -1051,6 +1104,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "/0.before", LFS_O_RDONLY) => 0;
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 7) => 7;
assert(strcmp((char*)buffer, "test.4") == 0);
lfs_file_close(&lfs, &file) => 0;
@@ -1124,13 +1178,15 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-# Other specific corner cases
-[[case]] # create + delete + delete in same commit with neighbors
+# create + delete + delete in same commit with neighbors
+[cases.test_move_create_delete_delete_same]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// littlefs keeps files sorted, so we know the order these will be in
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "/1.move_me",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
@@ -1175,6 +1231,8 @@ code = '''
lfs_file_close(&lfs, &files[2]) => 0;
// check that nothing was corrupted
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -1202,6 +1260,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "/0.before", LFS_O_RDONLY) => 0;
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 7) => 7;
assert(strcmp((char*)buffer, "test.4") == 0);
lfs_file_close(&lfs, &file) => 0;
@@ -1281,14 +1340,17 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # create + delete in different dirs with neighbors
+# create + delete in different dirs with neighbors
+[cases.test_move_create_delete_different]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// littlefs keeps files sorted, so we know the order these will be in
lfs_mkdir(&lfs, "/dir.1") => 0;
lfs_mkdir(&lfs, "/dir.2") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "/dir.1/1.move_me",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
@@ -1340,6 +1402,8 @@ code = '''
lfs_file_close(&lfs, &files[3]) => 0;
// check that nothing was corrupted
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -1397,6 +1461,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "/dir.1/0.before", LFS_O_RDONLY) => 0;
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 7) => 7;
assert(strcmp((char*)buffer, "test.5") == 0);
lfs_file_close(&lfs, &file) => 0;
@@ -1518,17 +1583,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # move fix in relocation
+# move fix in relocation
+[cases.test_move_fix_relocation]
in = "lfs.c"
-define.RELOCATIONS = 'range(0x3+1)'
-define.LFS_ERASE_CYCLES = 0xffffffff
+defines.RELOCATIONS = 'range(4)'
+defines.ERASE_CYCLES = 0xffffffff
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "/parent") => 0;
lfs_mkdir(&lfs, "/parent/child") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "/parent/1.move_me",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "move me",
@@ -1568,15 +1636,17 @@ code = '''
// force specific directories to relocate
if (RELOCATIONS & 0x1) {
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/parent");
- lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
- lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, dir.m.pair[0], 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_dir_close(&lfs, &dir) => 0;
}
if (RELOCATIONS & 0x2) {
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/parent/child");
- lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
- lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, dir.m.pair[0], 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_dir_close(&lfs, &dir) => 0;
}
@@ -1593,6 +1663,8 @@ code = '''
lfs_file_close(&lfs, &files[3]) => 0;
// check that nothing was corrupted
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "/parent") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -1637,6 +1709,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "/parent/0.before", LFS_O_RDONLY) => 0;
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 7) => 7;
assert(strcmp((char*)buffer, "test.5") == 0);
lfs_file_close(&lfs, &file) => 0;
@@ -1655,18 +1728,21 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # move fix in relocation with predecessor
+# move fix in relocation with predecessor
+[cases.test_move_fix_relocation_predecessor]
in = "lfs.c"
-define.RELOCATIONS = 'range(0x7+1)'
-define.LFS_ERASE_CYCLES = 0xffffffff
+defines.RELOCATIONS = 'range(8)'
+defines.ERASE_CYCLES = 0xffffffff
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "/parent") => 0;
lfs_mkdir(&lfs, "/parent/child") => 0;
lfs_mkdir(&lfs, "/parent/sibling") => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "/parent/sibling/1.move_me",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "move me",
@@ -1706,21 +1782,24 @@ code = '''
// force specific directories to relocate
if (RELOCATIONS & 0x1) {
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/parent");
- lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
- lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, dir.m.pair[0], 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_dir_close(&lfs, &dir) => 0;
}
if (RELOCATIONS & 0x2) {
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/parent/sibling");
- lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
- lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, dir.m.pair[0], 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_dir_close(&lfs, &dir) => 0;
}
if (RELOCATIONS & 0x4) {
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/parent/child");
- lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
- lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, dir.m.pair[0], 0xffffffff) => 0;
+ lfs_emubd_setwear(cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_dir_close(&lfs, &dir) => 0;
}
@@ -1739,6 +1818,8 @@ code = '''
lfs_file_close(&lfs, &files[3]) => 0;
// check that nothing was corrupted
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "/parent") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@@ -1796,6 +1877,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "/parent/sibling/0.before", LFS_O_RDONLY) => 0;
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 7) => 7;
assert(strcmp((char*)buffer, "test.5") == 0);
lfs_file_close(&lfs, &file) => 0;
diff --git a/tests/test_orphans.toml b/tests/test_orphans.toml
index 241e273e..2c8405aa 100644
--- a/tests/test_orphans.toml
+++ b/tests/test_orphans.toml
@@ -1,9 +1,10 @@
-[[case]] # orphan test
+[cases.test_orphans_normal]
in = "lfs.c"
-if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
+if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "parent") => 0;
lfs_mkdir(&lfs, "parent/orphan") => 0;
lfs_mkdir(&lfs, "parent/child") => 0;
@@ -13,29 +14,31 @@ code = '''
// corrupt the child's most recent commit, this should be the update
// to the linked-list entry, which should orphan the orphan. Note this
// makes a lot of assumptions about the remove operation.
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "parent/child") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
- uint8_t bbuffer[LFS_BLOCK_SIZE];
- cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- int off = LFS_BLOCK_SIZE-1;
- while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ uint8_t buffer[BLOCK_SIZE];
+ cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ int off = BLOCK_SIZE-1;
+ while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
- memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
- cfg.erase(&cfg, block) => 0;
- cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
- cfg.sync(&cfg) => 0;
+ memset(&buffer[off-3], BLOCK_SIZE, 3);
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
+ cfg->sync(cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "parent/child", &info) => 0;
lfs_fs_size(&lfs) => 8;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "parent/child", &info) => 0;
lfs_fs_size(&lfs) => 8;
@@ -48,7 +51,7 @@ code = '''
lfs_fs_size(&lfs) => 8;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "parent/child", &info) => 0;
lfs_stat(&lfs, "parent/otherchild", &info) => 0;
@@ -56,43 +59,192 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # reentrant testing for orphans, basically just spam mkdir/remove
+# test that we only run deorphan once per power-cycle
+[cases.test_orphans_no_orphans]
+in = 'lfs.c'
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ lfs_mount(&lfs, cfg) => 0;
+ // mark the filesystem as having orphans
+ lfs_fs_preporphans(&lfs, +1) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
+
+ // we should have orphans at this state
+ assert(lfs_gstate_hasorphans(&lfs.gstate));
+ lfs_unmount(&lfs) => 0;
+
+ // mount
+ lfs_mount(&lfs, cfg) => 0;
+ // we should detect orphans
+ assert(lfs_gstate_hasorphans(&lfs.gstate));
+ // force consistency
+ lfs_fs_forceconsistency(&lfs) => 0;
+ // we should no longer have orphans
+ assert(!lfs_gstate_hasorphans(&lfs.gstate));
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.test_orphans_one_orphan]
+in = 'lfs.c'
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ lfs_mount(&lfs, cfg) => 0;
+ // create an orphan
+ lfs_mdir_t orphan;
+ lfs_alloc_ack(&lfs);
+ lfs_dir_alloc(&lfs, &orphan) => 0;
+ lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;
+
+ // append our orphan and mark the filesystem as having orphans
+ lfs_fs_preporphans(&lfs, +1) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_pair_tole32(orphan.pair);
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), orphan.pair})) => 0;
+
+ // we should have orphans at this state
+ assert(lfs_gstate_hasorphans(&lfs.gstate));
+ lfs_unmount(&lfs) => 0;
+
+ // mount
+ lfs_mount(&lfs, cfg) => 0;
+ // we should detect orphans
+ assert(lfs_gstate_hasorphans(&lfs.gstate));
+ // force consistency
+ lfs_fs_forceconsistency(&lfs) => 0;
+ // we should no longer have orphans
+ assert(!lfs_gstate_hasorphans(&lfs.gstate));
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+# test that we can persist gstate with lfs_fs_mkconsistent
+[cases.test_orphans_mkconsistent_no_orphans]
+in = 'lfs.c'
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ lfs_mount(&lfs, cfg) => 0;
+ // mark the filesystem as having orphans
+ lfs_fs_preporphans(&lfs, +1) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
+
+ // we should have orphans at this state
+ assert(lfs_gstate_hasorphans(&lfs.gstate));
+ lfs_unmount(&lfs) => 0;
+
+ // mount
+ lfs_mount(&lfs, cfg) => 0;
+ // we should detect orphans
+ assert(lfs_gstate_hasorphans(&lfs.gstate));
+ // force consistency
+ lfs_fs_mkconsistent(&lfs) => 0;
+ // we should no longer have orphans
+ assert(!lfs_gstate_hasorphans(&lfs.gstate));
+
+ // remount
+ lfs_unmount(&lfs) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ // we should still have no orphans
+ assert(!lfs_gstate_hasorphans(&lfs.gstate));
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.test_orphans_mkconsistent_one_orphan]
+in = 'lfs.c'
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ lfs_mount(&lfs, cfg) => 0;
+ // create an orphan
+ lfs_mdir_t orphan;
+ lfs_alloc_ack(&lfs);
+ lfs_dir_alloc(&lfs, &orphan) => 0;
+ lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;
+
+ // append our orphan and mark the filesystem as having orphans
+ lfs_fs_preporphans(&lfs, +1) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_pair_tole32(orphan.pair);
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), orphan.pair})) => 0;
+
+ // we should have orphans at this state
+ assert(lfs_gstate_hasorphans(&lfs.gstate));
+ lfs_unmount(&lfs) => 0;
+
+ // mount
+ lfs_mount(&lfs, cfg) => 0;
+ // we should detect orphans
+ assert(lfs_gstate_hasorphans(&lfs.gstate));
+ // force consistency
+ lfs_fs_mkconsistent(&lfs) => 0;
+ // we should no longer have orphans
+ assert(!lfs_gstate_hasorphans(&lfs.gstate));
+
+ // remount
+ lfs_unmount(&lfs) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ // we should still have no orphans
+ assert(!lfs_gstate_hasorphans(&lfs.gstate));
+ lfs_unmount(&lfs) => 0;
+'''
+
+# reentrant testing for orphans, basically just spam mkdir/remove
+[cases.test_orphans_reentrant]
reentrant = true
# TODO fix this case, caused by non-DAG trees
-if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
-define = [
+if = '!(DEPTH == 3 && CACHE_SIZE != 64)'
+defines = [
{FILES=6, DEPTH=1, CYCLES=20},
{FILES=26, DEPTH=1, CYCLES=20},
{FILES=3, DEPTH=3, CYCLES=20},
]
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
- srand(1);
+ uint32_t prng = 1;
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
- for (int i = 0; i < CYCLES; i++) {
+ for (unsigned i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
- for (int d = 0; d < DEPTH; d++) {
- sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
+ for (unsigned d = 0; d < DEPTH; d++) {
+ sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if it does not exist, we create it, else we destroy
+ struct lfs_info info;
int res = lfs_stat(&lfs, full_path, &info);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
- for (int d = 0; d < DEPTH; d++) {
+ for (unsigned d = 0; d < DEPTH; d++) {
+ char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
- for (int d = 0; d < DEPTH; d++) {
+ for (unsigned d = 0; d < DEPTH; d++) {
+ char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
@@ -106,6 +258,7 @@ code = '''
// try to delete path in reverse order, ignore if dir is not empty
for (int d = DEPTH-1; d >= 0; d--) {
+ char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_remove(&lfs, path);
diff --git a/tests/test_paths.toml b/tests/test_paths.toml
index a7474c0b..97a519ea 100644
--- a/tests/test_paths.toml
+++ b/tests/test_paths.toml
@@ -1,13 +1,16 @@
-[[case]] # simple path test
+# simple path test
+[cases.test_paths_normal]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "/tea/hottea", &info) => 0;
@@ -21,15 +24,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # redundant slashes
+# redundant slashes
+[cases.test_paths_redundant_slashes]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "/tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "//tea//hottea", &info) => 0;
@@ -45,15 +51,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # dot path test
+# dot path test
+[cases.test_paths_dot]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "./tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "/./tea/hottea", &info) => 0;
@@ -71,10 +80,12 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # dot dot path test
+# dot dot path test
+[cases.test_paths_dot_dot]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
@@ -84,6 +95,7 @@ code = '''
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "coffee/../tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "tea/coldtea/../hottea", &info) => 0;
@@ -101,15 +113,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # trailing dot path test
+# trailing dot path test
+[cases.test_paths_trailing_dot]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "tea/hottea/", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "tea/hottea/.", &info) => 0;
@@ -123,11 +138,14 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # leading dot path test
+# leading dot path test
+[cases.test_paths_leading_dot]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, ".milk") => 0;
+ struct lfs_info info;
lfs_stat(&lfs, ".milk", &info) => 0;
strcmp(info.name, ".milk") => 0;
lfs_stat(&lfs, "tea/.././.milk", &info) => 0;
@@ -135,10 +153,12 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # root dot dot path test
+# root dot dot path test
+[cases.test_paths_root_dot_dot]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
@@ -148,6 +168,7 @@ code = '''
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "coffee/../../../../../../tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
@@ -159,10 +180,13 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # invalid path tests
+# invalid path tests
+[cases.test_paths_invalid]
code = '''
- lfs_format(&lfs, &cfg);
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg);
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "dirt", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "dirt/ground", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "dirt/ground/earth", &info) => LFS_ERR_NOENT;
@@ -172,6 +196,7 @@ code = '''
lfs_remove(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
lfs_mkdir(&lfs, "dirt/ground") => LFS_ERR_NOENT;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "dirt/ground", LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_NOENT;
lfs_mkdir(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
@@ -180,15 +205,19 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # root operations
+# root operations
+[cases.test_paths_root]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "/", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_mkdir(&lfs, "/") => LFS_ERR_EXIST;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "/", LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_ISDIR;
@@ -196,10 +225,13 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # root representations
+# root representations
+[cases.test_paths_root_reprs]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "/", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
@@ -221,10 +253,13 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # superblock conflict test
+# superblock conflict test
+[cases.test_paths_superblock_conflict]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "littlefs", &info) => LFS_ERR_NOENT;
lfs_remove(&lfs, "littlefs") => LFS_ERR_NOENT;
@@ -237,18 +272,22 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # max path test
+# max path test
+[cases.test_paths_max]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "coffee") => 0;
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
+ char path[1024];
memset(path, 'w', LFS_NAME_MAX+1);
path[LFS_NAME_MAX+1] = '\0';
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_NAMETOOLONG;
@@ -261,19 +300,23 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # really big path test
+# really big path test
+[cases.test_paths_really_big]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "coffee") => 0;
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
+ char path[1024];
memset(path, 'w', LFS_NAME_MAX);
path[LFS_NAME_MAX] = '\0';
lfs_mkdir(&lfs, path) => 0;
lfs_remove(&lfs, path) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
diff --git a/tests/test_powerloss.toml b/tests/test_powerloss.toml
new file mode 100644
index 00000000..06f8661d
--- /dev/null
+++ b/tests/test_powerloss.toml
@@ -0,0 +1,182 @@
+# There are already a number of tests that test general operations under
+# power-loss (see the reentrant attribute). These tests are for explicitly
+# testing specific corner cases.
+
+# only a revision count
+[cases.test_powerloss_only_rev]
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_mkdir(&lfs, "notebook") => 0;
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, "notebook/paper",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ char buffer[256];
+ strcpy(buffer, "hello");
+ lfs_size_t size = strlen("hello");
+ for (int i = 0; i < 5; i++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_sync(&lfs, &file) => 0;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ char rbuffer[256];
+ lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
+ for (int i = 0; i < 5; i++) {
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(memcmp(rbuffer, buffer, size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // get pair/rev count
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "notebook") => 0;
+ lfs_block_t pair[2] = {dir.m.pair[0], dir.m.pair[1]};
+ uint32_t rev = dir.m.rev;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // write just the revision count
+ uint8_t bbuffer[BLOCK_SIZE];
+ cfg->read(cfg, pair[1], 0, bbuffer, BLOCK_SIZE) => 0;
+
+ memcpy(bbuffer, &(uint32_t){lfs_tole32(rev+1)}, sizeof(uint32_t));
+
+ cfg->erase(cfg, pair[1]) => 0;
+ cfg->prog(cfg, pair[1], 0, bbuffer, BLOCK_SIZE) => 0;
+
+ lfs_mount(&lfs, cfg) => 0;
+
+ // can read?
+ lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
+ for (int i = 0; i < 5; i++) {
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(memcmp(rbuffer, buffer, size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // can write?
+ lfs_file_open(&lfs, &file, "notebook/paper",
+ LFS_O_WRONLY | LFS_O_APPEND) => 0;
+ strcpy(buffer, "goodbye");
+ size = strlen("goodbye");
+ for (int i = 0; i < 5; i++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_sync(&lfs, &file) => 0;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
+ strcpy(buffer, "hello");
+ size = strlen("hello");
+ for (int i = 0; i < 5; i++) {
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(memcmp(rbuffer, buffer, size) == 0);
+ }
+ strcpy(buffer, "goodbye");
+ size = strlen("goodbye");
+ for (int i = 0; i < 5; i++) {
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(memcmp(rbuffer, buffer, size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+# partial prog, may not be byte in order!
+[cases.test_powerloss_partial_prog]
+if = "PROG_SIZE < BLOCK_SIZE"
+defines.BYTE_OFF = ["0", "PROG_SIZE-1", "PROG_SIZE/2"]
+defines.BYTE_VALUE = [0x33, 0xcc]
+in = "lfs.c"
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_mkdir(&lfs, "notebook") => 0;
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, "notebook/paper",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ char buffer[256];
+ strcpy(buffer, "hello");
+ lfs_size_t size = strlen("hello");
+ for (int i = 0; i < 5; i++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_sync(&lfs, &file) => 0;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ char rbuffer[256];
+ lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
+ for (int i = 0; i < 5; i++) {
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(memcmp(rbuffer, buffer, size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // imitate a partial prog, value should not matter, if littlefs
+ // doesn't notice the partial prog testbd will assert
+
+ // get offset to next prog
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "notebook") => 0;
+ lfs_block_t block = dir.m.pair[0];
+ lfs_off_t off = dir.m.off;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // tweak byte
+ uint8_t bbuffer[BLOCK_SIZE];
+ cfg->read(cfg, block, 0, bbuffer, BLOCK_SIZE) => 0;
+
+ bbuffer[off + BYTE_OFF] = BYTE_VALUE;
+
+ cfg->erase(cfg, block) => 0;
+ cfg->prog(cfg, block, 0, bbuffer, BLOCK_SIZE) => 0;
+
+ lfs_mount(&lfs, cfg) => 0;
+
+ // can read?
+ lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
+ for (int i = 0; i < 5; i++) {
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(memcmp(rbuffer, buffer, size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // can write?
+ lfs_file_open(&lfs, &file, "notebook/paper",
+ LFS_O_WRONLY | LFS_O_APPEND) => 0;
+ strcpy(buffer, "goodbye");
+ size = strlen("goodbye");
+ for (int i = 0; i < 5; i++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_sync(&lfs, &file) => 0;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
+ strcpy(buffer, "hello");
+ size = strlen("hello");
+ for (int i = 0; i < 5; i++) {
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(memcmp(rbuffer, buffer, size) == 0);
+ }
+ strcpy(buffer, "goodbye");
+ size = strlen("goodbye");
+ for (int i = 0; i < 5; i++) {
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(memcmp(rbuffer, buffer, size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/tests/test_relocations.toml b/tests/test_relocations.toml
index 71b10475..d20cb8cf 100644
--- a/tests/test_relocations.toml
+++ b/tests/test_relocations.toml
@@ -1,15 +1,18 @@
# specific corner cases worth explicitly testing for
-[[case]] # dangling split dir test
-define.ITERATIONS = 20
-define.COUNT = 10
-define.LFS_BLOCK_CYCLES = [8, 1]
+[cases.test_relocations_dangling_split_dir]
+defines.ITERATIONS = 20
+defines.COUNT = 10
+defines.BLOCK_CYCLES = [8, 1]
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// fill up filesystem so only ~16 blocks are left
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ uint8_t buffer[512];
memset(buffer, 0, 512);
- while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
+ while (BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
lfs_file_write(&lfs, &file, buffer, 512) => 512;
}
lfs_file_close(&lfs, &file) => 0;
@@ -17,18 +20,22 @@ code = '''
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
- for (int j = 0; j < ITERATIONS; j++) {
- for (int i = 0; i < COUNT; i++) {
+ lfs_mount(&lfs, cfg) => 0;
+ for (unsigned j = 0; j < ITERATIONS; j++) {
+ for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "child") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
- for (int i = 0; i < COUNT; i++) {
+ for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
@@ -36,46 +43,54 @@ code = '''
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
- if (j == ITERATIONS-1) {
+ if (j == (unsigned)ITERATIONS-1) {
break;
}
- for (int i = 0; i < COUNT; i++) {
+ for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_remove(&lfs, path) => 0;
}
}
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "child") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
- for (int i = 0; i < COUNT; i++) {
+ for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
- for (int i = 0; i < COUNT; i++) {
+ for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
'''
-[[case]] # outdated head test
-define.ITERATIONS = 20
-define.COUNT = 10
-define.LFS_BLOCK_CYCLES = [8, 1]
+[cases.test_relocations_outdated_head]
+defines.ITERATIONS = 20
+defines.COUNT = 10
+defines.BLOCK_CYCLES = [8, 1]
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
// fill up filesystem so only ~16 blocks are left
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ uint8_t buffer[512];
memset(buffer, 0, 512);
- while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
+ while (BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
lfs_file_write(&lfs, &file, buffer, 512) => 512;
}
lfs_file_close(&lfs, &file) => 0;
@@ -83,18 +98,22 @@ code = '''
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
- for (int j = 0; j < ITERATIONS; j++) {
- for (int i = 0; i < COUNT; i++) {
+ lfs_mount(&lfs, cfg) => 0;
+ for (unsigned j = 0; j < ITERATIONS; j++) {
+ for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
+ lfs_dir_t dir;
+ struct lfs_info info;
lfs_dir_open(&lfs, &dir, "child") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
- for (int i = 0; i < COUNT; i++) {
+ for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
@@ -110,7 +129,8 @@ code = '''
lfs_dir_rewind(&lfs, &dir) => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
- for (int i = 0; i < COUNT; i++) {
+ for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
@@ -126,7 +146,8 @@ code = '''
lfs_dir_rewind(&lfs, &dir) => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
- for (int i = 0; i < COUNT; i++) {
+ for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
@@ -135,7 +156,8 @@ code = '''
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
- for (int i = 0; i < COUNT; i++) {
+ for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_remove(&lfs, path) => 0;
}
@@ -143,45 +165,51 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # reentrant testing for relocations, this is the same as the
- # orphan testing, except here we also set block_cycles so that
- # almost every tree operation needs a relocation
+# reentrant testing for relocations, this is the same as the
+# orphan testing, except here we also set block_cycles so that
+# almost every tree operation needs a relocation
+[cases.test_relocations_reentrant]
reentrant = true
# TODO fix this case, caused by non-DAG trees
-if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
-define = [
- {FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
- {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
- {FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
+# NOTE the second condition is required
+if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
+defines = [
+ {FILES=6, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
+ {FILES=26, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
+ {FILES=3, DEPTH=3, CYCLES=20, BLOCK_CYCLES=1},
]
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
- srand(1);
+ uint32_t prng = 1;
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
- for (int i = 0; i < CYCLES; i++) {
+ for (unsigned i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
- for (int d = 0; d < DEPTH; d++) {
- sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
+ for (unsigned d = 0; d < DEPTH; d++) {
+ sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if it does not exist, we create it, else we destroy
+ struct lfs_info info;
int res = lfs_stat(&lfs, full_path, &info);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
- for (int d = 0; d < DEPTH; d++) {
+ for (unsigned d = 0; d < DEPTH; d++) {
+ char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
- for (int d = 0; d < DEPTH; d++) {
+ for (unsigned d = 0; d < DEPTH; d++) {
+ char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
@@ -194,7 +222,8 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
// try to delete path in reverse order, ignore if dir is not empty
- for (int d = DEPTH-1; d >= 0; d--) {
+ for (unsigned d = DEPTH-1; d+1 > 0; d--) {
+ char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_remove(&lfs, path);
@@ -207,44 +236,50 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # reentrant testing for relocations, but now with random renames!
+# reentrant testing for relocations, but now with random renames!
+[cases.test_relocations_reentrant_renames]
reentrant = true
# TODO fix this case, caused by non-DAG trees
-if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
-define = [
- {FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
- {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
- {FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
+# NOTE the second condition is required
+if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
+defines = [
+ {FILES=6, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
+ {FILES=26, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
+ {FILES=3, DEPTH=3, CYCLES=20, BLOCK_CYCLES=1},
]
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
- srand(1);
+ uint32_t prng = 1;
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
- for (int i = 0; i < CYCLES; i++) {
+ for (unsigned i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
- for (int d = 0; d < DEPTH; d++) {
- sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
+ for (unsigned d = 0; d < DEPTH; d++) {
+ sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if it does not exist, we create it, else we destroy
+ struct lfs_info info;
int res = lfs_stat(&lfs, full_path, &info);
assert(!res || res == LFS_ERR_NOENT);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
- for (int d = 0; d < DEPTH; d++) {
+ for (unsigned d = 0; d < DEPTH; d++) {
+ char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
- for (int d = 0; d < DEPTH; d++) {
+ for (unsigned d = 0; d < DEPTH; d++) {
+ char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
@@ -257,8 +292,8 @@ code = '''
// create new random path
char new_path[256];
- for (int d = 0; d < DEPTH; d++) {
- sprintf(&new_path[2*d], "/%c", alpha[rand() % FILES]);
+ for (unsigned d = 0; d < DEPTH; d++) {
+ sprintf(&new_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if new path does not exist, rename, otherwise destroy
@@ -266,7 +301,8 @@ code = '''
assert(!res || res == LFS_ERR_NOENT);
if (res == LFS_ERR_NOENT) {
// stop once some dir is renamed
- for (int d = 0; d < DEPTH; d++) {
+ for (unsigned d = 0; d < DEPTH; d++) {
+ char path[1024];
strcpy(&path[2*d], &full_path[2*d]);
path[2*d+2] = '\0';
strcpy(&path[128+2*d], &new_path[2*d]);
@@ -278,7 +314,8 @@ code = '''
}
}
- for (int d = 0; d < DEPTH; d++) {
+ for (unsigned d = 0; d < DEPTH; d++) {
+ char path[1024];
strcpy(path, new_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
@@ -290,7 +327,8 @@ code = '''
} else {
// try to delete path in reverse order,
// ignore if dir is not empty
- for (int d = DEPTH-1; d >= 0; d--) {
+ for (unsigned d = DEPTH-1; d+1 > 0; d--) {
+ char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_remove(&lfs, path);
diff --git a/tests/test_seek.toml b/tests/test_seek.toml
index 79d7728a..b976057b 100644
--- a/tests/test_seek.toml
+++ b/tests/test_seek.toml
@@ -1,6 +1,7 @@
-[[case]] # simple file seek
-define = [
+# simple file seek
+[cases.test_seek_read]
+defines = [
{COUNT=132, SKIP=4},
{COUNT=132, SKIP=128},
{COUNT=200, SKIP=10},
@@ -9,11 +10,14 @@ define = [
{COUNT=4, SKIP=2},
]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
- size = strlen("kittycatcat");
+ size_t size = strlen("kittycatcat");
+ uint8_t buffer[1024];
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
@@ -21,7 +25,7 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY) => 0;
lfs_soff_t pos = -1;
@@ -68,8 +72,9 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # simple file seek and write
-define = [
+# simple file seek and write
+[cases.test_seek_write]
+defines = [
{COUNT=132, SKIP=4},
{COUNT=132, SKIP=128},
{COUNT=200, SKIP=10},
@@ -78,11 +83,14 @@ define = [
{COUNT=4, SKIP=2},
]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
- size = strlen("kittycatcat");
+ size_t size = strlen("kittycatcat");
+ uint8_t buffer[1024];
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
@@ -90,7 +98,7 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
lfs_soff_t pos = -1;
@@ -129,15 +137,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # boundary seek and writes
-define.COUNT = 132
-define.OFFSETS = '"{512, 1020, 513, 1021, 511, 1019, 1441}"'
+# boundary seek and writes
+[cases.test_seek_boundary_write]
+defines.COUNT = 132
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
- size = strlen("kittycatcat");
+ size_t size = strlen("kittycatcat");
+ uint8_t buffer[1024];
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
@@ -145,11 +156,11 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
size = strlen("hedgehoghog");
- const lfs_soff_t offsets[] = OFFSETS;
+ const lfs_soff_t offsets[] = {512, 1020, 513, 1021, 511, 1019, 1441};
for (unsigned i = 0; i < sizeof(offsets) / sizeof(offsets[0]); i++) {
lfs_soff_t off = offsets[i];
@@ -183,8 +194,9 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # out of bounds seek
-define = [
+# out of bounds seek
+[cases.test_seek_out_of_bounds]
+defines = [
{COUNT=132, SKIP=4},
{COUNT=132, SKIP=128},
{COUNT=200, SKIP=10},
@@ -193,18 +205,21 @@ define = [
{COUNT=4, SKIP=3},
]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
- size = strlen("kittycatcat");
+ size_t size = strlen("kittycatcat");
+ uint8_t buffer[1024];
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
size = strlen("kittycatcat");
@@ -238,16 +253,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # inline write and seek
-define.SIZE = [2, 4, 128, 132]
+# inline write and seek
+[cases.test_seek_inline_write]
+defines.SIZE = [2, 4, 128, 132]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "tinykitty",
LFS_O_RDWR | LFS_O_CREAT) => 0;
int j = 0;
int k = 0;
+ uint8_t buffer[1024];
memcpy(buffer, "abcdefghijklmnopqrstuvwxyz", 26);
for (unsigned i = 0; i < SIZE; i++) {
lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
@@ -305,16 +324,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # file seek and write with power-loss
+# file seek and write with power-loss
+[cases.test_seek_reentrant_write]
# must be power-of-2 for quadratic probing to be exhaustive
-define.COUNT = [4, 64, 128]
+defines.COUNT = [4, 64, 128]
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
+ lfs_file_t file;
+ uint8_t buffer[1024];
err = lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY);
assert(!err || err == LFS_ERR_NOENT);
if (!err) {
@@ -334,14 +357,14 @@ code = '''
if (lfs_file_size(&lfs, &file) == 0) {
for (int j = 0; j < COUNT; j++) {
strcpy((char*)buffer, "kittycatcat");
- size = strlen((char*)buffer);
+ size_t size = strlen((char*)buffer);
lfs_file_write(&lfs, &file, buffer, size) => size;
}
}
lfs_file_close(&lfs, &file) => 0;
strcpy((char*)buffer, "doggodogdog");
- size = strlen((char*)buffer);
+ size_t size = strlen((char*)buffer);
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => COUNT*size;
diff --git a/tests/test_superblocks.toml b/tests/test_superblocks.toml
index 407c8454..689bbcd2 100644
--- a/tests/test_superblocks.toml
+++ b/tests/test_superblocks.toml
@@ -1,41 +1,53 @@
-[[case]] # simple formatting test
+# simple formatting test
+[cases.test_superblocks_format]
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
'''
-[[case]] # mount/unmount
+# mount/unmount
+[cases.test_superblocks_mount]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_unmount(&lfs) => 0;
'''
-[[case]] # reentrant format
+# reentrant format
+[cases.test_superblocks_reentrant_format]
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
lfs_unmount(&lfs) => 0;
'''
-[[case]] # invalid mount
+# invalid mount
+[cases.test_superblocks_invalid_mount]
code = '''
- lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+ lfs_t lfs;
+ lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''
-[[case]] # expanding superblock
-define.LFS_BLOCK_CYCLES = [32, 33, 1]
-define.N = [10, 100, 1000]
+# expanding superblock
+[cases.test_superblocks_expand]
+defines.BLOCK_CYCLES = [32, 33, 1]
+defines.N = [10, 100, 1000]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
@@ -44,25 +56,30 @@ code = '''
lfs_unmount(&lfs) => 0;
// one last check after power-cycle
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_unmount(&lfs) => 0;
'''
-[[case]] # expanding superblock with power cycle
-define.LFS_BLOCK_CYCLES = [32, 33, 1]
-define.N = [10, 100, 1000]
+# expanding superblock with power cycle
+[cases.test_superblocks_expand_power_cycle]
+defines.BLOCK_CYCLES = [32, 33, 1]
+defines.N = [10, 100, 1000]
code = '''
- lfs_format(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
// remove lingering dummy?
- err = lfs_stat(&lfs, "dummy", &info);
+ struct lfs_info info;
+ int err = lfs_stat(&lfs, "dummy", &info);
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
if (!err) {
assert(strcmp(info.name, "dummy") == 0);
@@ -70,6 +87,7 @@ code = '''
lfs_remove(&lfs, "dummy") => 0;
}
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
@@ -80,26 +98,30 @@ code = '''
}
// one last check after power-cycle
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_unmount(&lfs) => 0;
'''
-[[case]] # reentrant expanding superblock
-define.LFS_BLOCK_CYCLES = [2, 1]
-define.N = 24
+# reentrant expanding superblock
+[cases.test_superblocks_reentrant_expand]
+defines.BLOCK_CYCLES = [2, 1]
+defines.N = 24
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
for (int i = 0; i < N; i++) {
// remove lingering dummy?
+ struct lfs_info info;
err = lfs_stat(&lfs, "dummy", &info);
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
if (!err) {
@@ -108,6 +130,7 @@ code = '''
lfs_remove(&lfs, "dummy") => 0;
}
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
@@ -119,7 +142,8 @@ code = '''
lfs_unmount(&lfs) => 0;
// one last check after power-cycle
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ struct lfs_info info;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
diff --git a/tests/test_truncate.toml b/tests/test_truncate.toml
index 850d7aae..2f10e952 100644
--- a/tests/test_truncate.toml
+++ b/tests/test_truncate.toml
@@ -1,23 +1,29 @@
-[[case]] # simple truncate
-define.MEDIUMSIZE = [32, 2048]
-define.LARGESIZE = 8192
+# simple truncate
+[cases.test_truncate_simple]
+defines.MEDIUMSIZE = [31, 32, 33, 511, 512, 513, 2047, 2048, 2049]
+defines.LARGESIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
+if = 'MEDIUMSIZE < LARGESIZE'
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "baldynoop",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
- size = strlen((char*)buffer);
+ size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
- lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
+ => lfs_min(size, LARGESIZE-j);
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
@@ -27,14 +33,15 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
size = strlen("hair");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
- lfs_file_read(&lfs, &file, buffer, size) => size;
- memcmp(buffer, "hair", size) => 0;
+ lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
+ => lfs_min(size, MEDIUMSIZE-j);
+ memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
@@ -42,26 +49,32 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # truncate and read
-define.MEDIUMSIZE = [32, 2048]
-define.LARGESIZE = 8192
+# truncate and read
+[cases.test_truncate_read]
+defines.MEDIUMSIZE = [31, 32, 33, 511, 512, 513, 2047, 2048, 2049]
+defines.LARGESIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
+if = 'MEDIUMSIZE < LARGESIZE'
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "baldyread",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
- size = strlen((char*)buffer);
+ size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
- lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
+ => lfs_min(size, LARGESIZE-j);
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
@@ -70,22 +83,24 @@ code = '''
size = strlen("hair");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
- lfs_file_read(&lfs, &file, buffer, size) => size;
- memcmp(buffer, "hair", size) => 0;
+ lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
+ => lfs_min(size, MEDIUMSIZE-j);
+ memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
size = strlen("hair");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
- lfs_file_read(&lfs, &file, buffer, size) => size;
- memcmp(buffer, "hair", size) => 0;
+ lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
+ => lfs_min(size, MEDIUMSIZE-j);
+ memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
@@ -93,14 +108,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # write, truncate, and read
+# write, truncate, and read
+[cases.test_truncate_write_read]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "sequence",
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
- size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
+ uint8_t buffer[1024];
+ size_t size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
lfs_size_t qsize = size / 4;
uint8_t *wb = buffer;
uint8_t *rb = buffer + size;
@@ -136,7 +155,7 @@ code = '''
lfs_file_truncate(&lfs, &file, trunc) => 0;
lfs_file_tell(&lfs, &file) => qsize;
lfs_file_size(&lfs, &file) => trunc;
-
+
/* Read should produce second quarter */
lfs_file_read(&lfs, &file, rb, size) => trunc - qsize;
memcmp(rb, wb + qsize, trunc - qsize) => 0;
@@ -145,50 +164,60 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # truncate and write
-define.MEDIUMSIZE = [32, 2048]
-define.LARGESIZE = 8192
+# truncate and write
+[cases.test_truncate_write]
+defines.MEDIUMSIZE = [31, 32, 33, 511, 512, 513, 2047, 2048, 2049]
+defines.LARGESIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
+if = 'MEDIUMSIZE < LARGESIZE'
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "baldywrite",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
- size = strlen((char*)buffer);
+ size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
- lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
+ => lfs_min(size, LARGESIZE-j);
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
+ /* truncate */
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+ /* and write */
strcpy((char*)buffer, "bald");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
- lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_write(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
+ => lfs_min(size, MEDIUMSIZE-j);
}
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
size = strlen("bald");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
- lfs_file_read(&lfs, &file, buffer, size) => size;
- memcmp(buffer, "bald", size) => 0;
+ lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
+ => lfs_min(size, MEDIUMSIZE-j);
+ memcmp(buffer, "bald", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
@@ -196,30 +225,35 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # truncate write under powerloss
-define.SMALLSIZE = [4, 512]
-define.MEDIUMSIZE = [32, 1024]
-define.LARGESIZE = 2048
+# truncate write under powerloss
+[cases.test_truncate_reentrant_write]
+defines.SMALLSIZE = [4, 512]
+defines.MEDIUMSIZE = [0, 3, 4, 5, 31, 32, 33, 511, 512, 513, 1023, 1024, 1025]
+defines.LARGESIZE = 2048
reentrant = true
code = '''
- err = lfs_mount(&lfs, &cfg);
+ lfs_t lfs;
+ int err = lfs_mount(&lfs, cfg);
if (err) {
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
}
+ lfs_file_t file;
err = lfs_file_open(&lfs, &file, "baldy", LFS_O_RDONLY);
assert(!err || err == LFS_ERR_NOENT);
if (!err) {
- size = lfs_file_size(&lfs, &file);
+ size_t size = lfs_file_size(&lfs, &file);
assert(size == 0 ||
- size == LARGESIZE ||
- size == MEDIUMSIZE ||
- size == SMALLSIZE);
+ size == (size_t)LARGESIZE ||
+ size == (size_t)MEDIUMSIZE ||
+ size == (size_t)SMALLSIZE);
for (lfs_off_t j = 0; j < size; j += 4) {
- lfs_file_read(&lfs, &file, buffer, 4) => 4;
- assert(memcmp(buffer, "hair", 4) == 0 ||
- memcmp(buffer, "bald", 4) == 0 ||
- memcmp(buffer, "comb", 4) == 0);
+ uint8_t buffer[1024];
+ lfs_file_read(&lfs, &file, buffer, lfs_min(4, size-j))
+ => lfs_min(4, size-j);
+ assert(memcmp(buffer, "hair", lfs_min(4, size-j)) == 0 ||
+ memcmp(buffer, "bald", lfs_min(4, size-j)) == 0 ||
+ memcmp(buffer, "comb", lfs_min(4, size-j)) == 0);
}
lfs_file_close(&lfs, &file) => 0;
}
@@ -227,22 +261,27 @@ code = '''
lfs_file_open(&lfs, &file, "baldy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
lfs_file_size(&lfs, &file) => 0;
+ uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
- size = strlen((char*)buffer);
+ size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
- lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
+ => lfs_min(size, LARGESIZE-j);
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "baldy", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
+ /* truncate */
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+ /* and write */
strcpy((char*)buffer, "bald");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
- lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_write(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
+ => lfs_min(size, MEDIUMSIZE-j);
}
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
lfs_file_close(&lfs, &file) => 0;
@@ -254,7 +293,8 @@ code = '''
strcpy((char*)buffer, "comb");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < SMALLSIZE; j += size) {
- lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_write(&lfs, &file, buffer, lfs_min(size, SMALLSIZE-j))
+ => lfs_min(size, SMALLSIZE-j);
}
lfs_file_size(&lfs, &file) => SMALLSIZE;
lfs_file_close(&lfs, &file) => 0;
@@ -262,12 +302,14 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # more aggressive general truncation tests
-define.CONFIG = 'range(6)'
-define.SMALLSIZE = 32
-define.MEDIUMSIZE = 2048
-define.LARGESIZE = 8192
+# more aggressive general truncation tests
+[cases.test_truncate_aggressive]
+defines.CONFIG = 'range(6)'
+defines.SMALLSIZE = 32
+defines.MEDIUMSIZE = 2048
+defines.LARGESIZE = 8192
code = '''
+ lfs_t lfs;
#define COUNT 5
const struct {
lfs_off_t startsizes[COUNT];
@@ -312,16 +354,19 @@ code = '''
const lfs_off_t *hotsizes = configs[CONFIG].hotsizes;
const lfs_off_t *coldsizes = configs[CONFIG].coldsizes;
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "hairyhead%d", i);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
- size = strlen((char*)buffer);
+ size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < startsizes[i]; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@@ -340,21 +385,25 @@ code = '''
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "hairyhead%d", i);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => hotsizes[i];
- size = strlen("hair");
+ size_t size = strlen("hair");
lfs_off_t j = 0;
for (; j < startsizes[i] && j < hotsizes[i]; j += size) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
for (; j < hotsizes[i]; j += size) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "\0\0\0\0", size) => 0;
}
@@ -367,22 +416,26 @@ code = '''
lfs_unmount(&lfs) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
for (unsigned i = 0; i < COUNT; i++) {
+ char path[1024];
sprintf(path, "hairyhead%d", i);
+ lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => coldsizes[i];
- size = strlen("hair");
+ size_t size = strlen("hair");
lfs_off_t j = 0;
for (; j < startsizes[i] && j < hotsizes[i] && j < coldsizes[i];
j += size) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
for (; j < coldsizes[i]; j += size) {
+ uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "\0\0\0\0", size) => 0;
}
@@ -393,21 +446,26 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
-[[case]] # noop truncate
-define.MEDIUMSIZE = [32, 2048]
+# noop truncate
+[cases.test_truncate_nop]
+defines.MEDIUMSIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
code = '''
- lfs_format(&lfs, &cfg) => 0;
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_file_t file;
lfs_file_open(&lfs, &file, "baldynoop",
LFS_O_RDWR | LFS_O_CREAT) => 0;
+ uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
- size = strlen((char*)buffer);
+ size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
- lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_write(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
+ => lfs_min(size, MEDIUMSIZE-j);
// this truncate should do nothing
- lfs_file_truncate(&lfs, &file, j+size) => 0;
+ lfs_file_truncate(&lfs, &file, j+lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
@@ -417,8 +475,9 @@ code = '''
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
- lfs_file_read(&lfs, &file, buffer, size) => size;
- memcmp(buffer, "hair", size) => 0;
+ lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
+ => lfs_min(size, MEDIUMSIZE-j);
+ memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
@@ -426,12 +485,13 @@ code = '''
lfs_unmount(&lfs) => 0;
// still there after reboot?
- lfs_mount(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
- lfs_file_read(&lfs, &file, buffer, size) => size;
- memcmp(buffer, "hair", size) => 0;
+ lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
+ => lfs_min(size, MEDIUMSIZE-j);
+ memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;